feat: add ruleset api(#1844)

* fix(controller): decode old object for delete requests

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: modernize golang

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: modernize golang

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: modernize golang

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* fix(config): remove usergroups default

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* fix(config): remove usergroups default

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* sec(ghsa-2ww6-hf35-mfjm): intercept namespace subresource

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* feat(api): add rulestatus api

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* chore: conflicts

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* feat(api): add rulestatus api

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* feat(api): add rulestatus api

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* feat(api): add rulestatus api

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* feat(api): add rulestatus api

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* feat(api): add rulestatus api

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

* feat(api): add rulestatus api

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>

---------

Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>
This commit is contained in:
Oliver Bähler
2026-01-27 14:28:48 +01:00
committed by GitHub
parent b9a14a954d
commit a6b830b1af
284 changed files with 12699 additions and 2162 deletions

View File

@@ -45,7 +45,6 @@ jobs:
fail-fast: false
matrix:
k8s-version:
- 'v1.30.0'
- 'v1.31.0'
- 'v1.32.0'
- 'v1.33.0'

View File

@@ -32,7 +32,7 @@ jobs:
- uses: creekorful/goreportcard-action@1f35ced8cdac2cba28c9a2f2288a16aacfd507f9 # v1.0
- uses: anchore/sbom-action/download-syft@0b82b0b1a22399a1c542d4d656f70cd903571b5c
- name: Install Cosign
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
uses: sigstore/cosign-installer@7e8b541eb2e61bf99390e1afd4be13a184e9ebc5 # v3.10.1
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0
with:

View File

@@ -1,12 +1,13 @@
nwa:
cmd: "update"
holder: "Project Capsule Authors"
year: "2020-2025"
year: "2020-2026"
spdxids: "Apache-2.0"
path:
- "pkg/**/*.go"
- "cmd/**/*.go"
- "api/**/*.go"
- "internal/**/*.go"
- "controllers/**/*.go"
- "main.go"
mute: false

View File

@@ -92,7 +92,7 @@ helm-schema: helm-plugin-schema
helm-test: HELM_KIND_CONFIG ?= ""
helm-test: kind
@mkdir -p /tmp/results || true
@$(KIND) create cluster --wait=60s --name capsule-charts --image kindest/node:$(KUBERNETES_SUPPORTED_VERSION) --config $(HELM_KIND_CONFIG)
@$(KIND) create cluster --wait=60s --name capsule-charts --image kindest/node:$(KUBERNETES_SUPPORTED_VERSION) --config ./hack/kind-cluster.yaml
@make helm-test-exec
@$(KIND) delete cluster --name capsule-charts
@@ -104,7 +104,7 @@ helm-test-exec: ct helm-controller-version ko-build-all
# Setup development env
dev-build: kind
$(KIND) create cluster --wait=60s --name $(CLUSTER_NAME) --image kindest/node:$(KUBERNETES_SUPPORTED_VERSION)
$(KIND) create cluster --wait=60s --name $(CLUSTER_NAME) --image kindest/node:$(KUBERNETES_SUPPORTED_VERSION) --config ./hack/kind-cluster.yaml
$(MAKE) dev-install-deps
.PHONY: dev-destroy
@@ -220,12 +220,12 @@ dev-setup-capsule: dev-setup-fluxcd
dev-setup-capsule-example: dev-setup-fluxcd
@$(KUBECTL) kustomize --load-restrictor='LoadRestrictionsNone' hack/distro/capsule/example-setup | envsubst | kubectl apply -f -
@$(KUBECTL) create ns wind-test --as joe --as-group projectcapsule.dev
@$(KUBECTL) create ns wind-prod --as joe --as-group projectcapsule.dev
@$(KUBECTL) create ns green-test --as bob --as-group projectcapsule.dev
@$(KUBECTL) create ns green-prod --as bob --as-group projectcapsule.dev
@$(KUBECTL) create ns solar-test --as alice --as-group projectcapsule.dev
@$(KUBECTL) create ns solar-prod --as alice --as-group projectcapsule.dev
@$(KUBECTL) create ns wind-test --as joe --as-group projectcapsule.dev || true
@$(KUBECTL) create ns wind-prod --as joe --as-group projectcapsule.dev || true
@$(KUBECTL) create ns green-test --as bob --as-group projectcapsule.dev || true
@$(KUBECTL) create ns green-prod --as bob --as-group projectcapsule.dev || true
@$(KUBECTL) create ns solar-test --as alice --as-group projectcapsule.dev || true
@$(KUBECTL) create ns solar-prod --as alice --as-group projectcapsule.dev || true
wait-for-helmreleases:
@ echo "Waiting for all HelmReleases to have observedGeneration >= 0..."
@@ -316,7 +316,7 @@ e2e-build: kind
$(MAKE) e2e-install
.PHONY: e2e-install
e2e-install: ko-build-all
e2e-install: helm-controller-version ko-build-all
$(MAKE) e2e-load-image CLUSTER_NAME=$(CLUSTER_NAME) IMAGE=$(CAPSULE_IMG) VERSION=$(VERSION)
$(HELM) upgrade \
--dependency-update \
@@ -331,6 +331,7 @@ e2e-install: ko-build-all
--set 'manager.livenessProbe.failureThreshold=10' \
--set 'webhooks.hooks.nodes.enabled=true' \
--set "webhooks.exclusive=true"\
--set "manager.options.logLevel=debug"\
capsule \
./charts/capsule

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2025 Project Capsule Authors
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta1

View File

@@ -15,7 +15,6 @@ func (in *Tenant) SetupWebhookWithManager(mgr ctrl.Manager) error {
return nil
}
return ctrl.NewWebhookManagedBy(mgr).
For(in).
return ctrl.NewWebhookManagedBy(mgr, in).
Complete()
}

View File

@@ -4,11 +4,16 @@
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api"
)
// CapsuleConfigurationStatus defines the Capsule configuration status.
type CapsuleConfigurationStatus struct {
// Last time all caches were invalided
LastCacheInvalidation metav1.Time `json:"lastCacheInvalidation,omitempty"`
// Users which are considered Capsule Users and are bound to the Capsule Tenant construct.
Users api.UserListSpec `json:"users,omitempty"`
}

View File

@@ -4,6 +4,7 @@
package v1beta2
import (
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api"
@@ -53,6 +54,50 @@ type CapsuleConfigurationSpec struct {
// for interacting with namespaces. Because if that label is not defined, it's assumed that namespace interaction was not targeted towards a tenant and will therefor
// be ignored by capsule.
Administrators api.UserListSpec `json:"administrators,omitempty"`
// Configuration for dynamic Validating and Mutating Admission webhooks managed by Capsule.
Admission DynamicAdmission `json:"admission,omitempty"`
// Define Properties for managed ClusterRoles by Capsule
// +kubebuilder:default={}
RBAC *RBACConfiguration `json:"rbac"`
// Define the period of time upon a cache invalidation is executed for all caches.
// +kubebuilder:default="24h"
CacheInvalidation metav1.Duration `json:"cacheInvalidation"`
}
type RBACConfiguration struct {
// The ClusterRoles applied for Administrators
// +kubebuilder:default={capsule-namespace-deleter}
AdministrationClusterRoles []string `json:"administrationClusterRoles,omitempty"`
// The ClusterRoles applied for ServiceAccounts which had owner Promotion
// +kubebuilder:default={capsule-namespace-provisioner,capsule-namespace-deleter}
PromotionClusterRoles []string `json:"promotionClusterRoles,omitempty"`
// Name for the ClusterRole required to grant Namespace Deletion permissions.
// +kubebuilder:default=capsule-namespace-deleter
DeleterClusterRole string `json:"deleter,omitempty"`
// Name for the ClusterRole required to grant Namespace Provision permissions.
// +kubebuilder:default=capsule-namespace-provisioner
ProvisionerClusterRole string `json:"provisioner,omitempty"`
}
type DynamicAdmission struct {
// Configure dynamic Mutating Admission for Capsule
Mutating DynamicAdmissionConfig `json:"mutating,omitempty"`
// Configure dynamic Validating Admission for Capsule
Validating DynamicAdmissionConfig `json:"validating,omitempty"`
}
type DynamicAdmissionConfig struct {
// Name the Admission Webhook
Name api.Name `json:"name,omitempty"`
// Labels added to the Admission Webhook
// +optional
Labels map[string]string `json:"labels,omitempty"`
// Annotations added to the Admission Webhook
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
// From the upstram struct
Client admissionregistrationv1.WebhookClientConfig `json:"client"`
}
type NodeMetadata struct {

View File

@@ -0,0 +1,33 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api"
)
// +kubebuilder:object:generate=true
type NamespaceRule struct {
// Enforce these properties via Rules
NamespaceRuleBody `json:",inline"`
// Select namespaces which are going to usese
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
}
// +kubebuilder:object:generate=true
type NamespaceRuleBody struct {
// Enforcement Rules applied
//+optional
Enforce NamespaceRuleEnforceBody `json:"enforce,omitzero"`
}
// +kubebuilder:object:generate=true
type NamespaceRuleEnforceBody struct {
// Define registries which are allowed to be used within this tenant
// The rules are aggregated, since you can use Regular Expressions the match registry endpoints
Registries []api.OCIRegistry `json:"registries,omitempty"`
}

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2025 Project Capsule Authors
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2025 Project Capsule Authors
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2

View File

@@ -0,0 +1,44 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
type RuleStatus struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitzero"`
// +optional
Status RuleStatusSpec `json:"status,omitzero"`
}
// +kubebuilder:object:root=true
// RuleStatusList contains a list of RuleStatus.
type RuleStatusList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitzero"`
Items []RuleStatus `json:"items"`
}
func init() {
SchemeBuilder.Register(&RuleStatus{}, &RuleStatusList{})
}
// RuleStatus contains the accumulated rules applying to namespace it's deployed in.
// +kubebuilder:object:generate=true
type RuleStatusSpec struct {
// Managed Enforcement properties per Namespace (aggregated from rules)
//+optional
Rule NamespaceRuleBody `json:"rule,omitzero"`
}

View File

@@ -4,78 +4,17 @@
package v1beta2
import (
"context"
"slices"
"sort"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/api/meta"
)
func (in *Tenant) CollectOwners(ctx context.Context, c client.Client, allowPromotion bool, admins api.UserListSpec) (api.OwnerStatusListSpec, error) {
owners := in.Spec.Owners.ToStatusOwners()
// Promoted ServiceAccounts
if allowPromotion && len(in.Status.Namespaces) > 0 {
saList := &corev1.ServiceAccountList{}
if err := c.List(ctx, saList,
client.MatchingLabels{
meta.OwnerPromotionLabel: meta.OwnerPromotionLabelTrigger,
},
); err != nil {
return nil, err
}
for _, sa := range saList.Items {
for _, ns := range in.Status.Namespaces {
if sa.GetNamespace() != ns {
continue
}
owners.Upsert(api.CoreOwnerSpec{
UserSpec: api.UserSpec{
Kind: api.ServiceAccountOwner,
Name: serviceaccount.ServiceAccountUsernamePrefix + sa.Namespace + ":" + sa.Name,
},
ClusterRoles: []string{
api.ProvisionerRoleName,
api.DeleterRoleName,
},
})
}
}
}
// Administrators
for _, a := range admins {
owners.Upsert(api.CoreOwnerSpec{
UserSpec: a,
ClusterRoles: []string{
api.DeleterRoleName,
},
})
}
// Dedicated Owner Objects
listed, err := in.Spec.Permissions.ListMatchingOwners(ctx, c, in.GetName())
if err != nil {
return nil, err
}
for _, o := range listed {
owners.Upsert(o.Spec.CoreOwnerSpec)
}
return owners, nil
}
func (in *Tenant) GetRoleBindings() []api.AdditionalRoleBindingsSpec {
roleBindings := make([]api.AdditionalRoleBindingsSpec, 0) //nolint:prealloc
roleBindings := make([]api.AdditionalRoleBindingsSpec, 0, len(in.Spec.AdditionalRoleBindings))
for _, owner := range in.Status.Owners {
roleBindings = append(roleBindings, owner.ToAdditionalRolebindings()...)

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2025 Project Capsule Authors
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2

View File

@@ -47,6 +47,14 @@ type TenantStatusNamespaceItem struct {
UID k8stypes.UID `json:"uid,omitempty"`
// Managed Metadata
Metadata *TenantStatusNamespaceMetadata `json:"metadata,omitempty"`
// Managed Metadata
//+optional
Enforce TenantStatusNamespaceEnforcement `json:"enforce,omitzero"`
}
type TenantStatusNamespaceEnforcement struct {
// Registries which are allowed within this namespace
Registries []api.OCIRegistry `json:"registry,omitempty"`
}
type TenantStatusNamespaceMetadata struct {

View File

@@ -19,6 +19,14 @@ type TenantSpec struct {
// Specify Permissions for the Tenant.
// +optional
Permissions Permissions `json:"permissions,omitzero"`
// Specify enforcement specifications for the scope of the Tenant.
// We are moving all configuration enforcement. per namespace into a rule construct.
// It's currently not final.
//
// Read More: https://projectcapsule.dev/docs/tenants/rules/
//+optional
Rules []*NamespaceRule `json:"rules,omitzero"`
// Specifies the owners of the Tenant.
// Optional
Owners api.OwnerListSpec `json:"owners,omitempty"`
@@ -36,27 +44,13 @@ type TenantSpec struct {
// Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
// +optional
IngressOptions IngressOptions `json:"ingressOptions,omitzero"`
// Specifies the trusted Image Registries assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed trusted registries. Optional.
ContainerRegistries *api.AllowedListSpec `json:"containerRegistries,omitempty"`
// Specifies the label to control the placement of pods on a given pool of worker nodes. All namespaces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Deprecated: Use Tenant Replications instead (https://projectcapsule.dev/docs/replications/)
//
// Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
// +optional
NetworkPolicies api.NetworkPolicySpec `json:"networkPolicies,omitzero"`
// Deprecated: Use Tenant Replications instead (https://projectcapsule.dev/docs/replications/)
//
// Specifies the resource min/max usage restrictions to the Tenant. The assigned values are inherited by any namespace created in the Tenant. Optional.
// +optional
LimitRanges api.LimitRangesSpec `json:"limitRanges,omitzero"`
// Specifies a list of ResourceQuota resources assigned to the Tenant. The assigned values are inherited by any namespace created in the Tenant. The Capsule operator aggregates ResourceQuota at Tenant level, so that the hard quota is never crossed for the given Tenant. This permits the Tenant owner to consume resources in the Tenant regardless of the namespace. Optional.
// +optional
ResourceQuota api.ResourceQuotaSpec `json:"resourceQuotas,omitzero"`
// Specifies additional RoleBindings assigned to the Tenant. Capsule will ensure that all namespaces in the Tenant always contain the RoleBinding for the given ClusterRole. Optional.
AdditionalRoleBindings []api.AdditionalRoleBindingsSpec `json:"additionalRoleBindings,omitempty"`
// Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
ImagePullPolicies []api.ImagePullPolicySpec `json:"imagePullPolicies,omitempty"`
// Specifies the allowed RuntimeClasses assigned to the Tenant.
// Capsule assures that all Pods resources created in the Tenant can use only one of the allowed RuntimeClasses.
// Optional.
@@ -87,6 +81,26 @@ type TenantSpec struct {
// If unset, Tenant uses CapsuleConfiguration's forceTenantPrefix
// Optional
ForceTenantPrefix *bool `json:"forceTenantPrefix,omitempty"`
// Deprecated: Use Enforcement.Registries instead
//
// Specifies the trusted Image Registries assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed trusted registries. Optional.
ContainerRegistries *api.AllowedListSpec `json:"containerRegistries,omitempty"`
// Deprecated: Use Enforcement.Registries instead
//
// Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
ImagePullPolicies []api.ImagePullPolicySpec `json:"imagePullPolicies,omitempty"`
// Deprecated: Use Tenant Replications instead (https://projectcapsule.dev/docs/replications/)
//
// Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
// +optional
NetworkPolicies api.NetworkPolicySpec `json:"networkPolicies,omitzero"`
// Deprecated: Use Tenant Replications instead (https://projectcapsule.dev/docs/replications/)
//
// Specifies the resource min/max usage restrictions to the Tenant. The assigned values are inherited by any namespace created in the Tenant. Optional.
// +optional
LimitRanges api.LimitRangesSpec `json:"limitRanges,omitzero"`
}
type Permissions struct {
@@ -129,7 +143,8 @@ type Tenant struct {
// +optional
metav1.ObjectMeta `json:"metadata,omitzero"`
Spec TenantSpec `json:"spec"`
// +optional
Spec TenantSpec `json:"spec,omitzero"`
// +optional
Status TenantStatus `json:"status,omitzero"`

View File

@@ -130,6 +130,13 @@ func (in *CapsuleConfigurationSpec) DeepCopyInto(out *CapsuleConfigurationSpec)
*out = make(api.UserListSpec, len(*in))
copy(*out, *in)
}
in.Admission.DeepCopyInto(&out.Admission)
if in.RBAC != nil {
in, out := &in.RBAC, &out.RBAC
*out = new(RBACConfiguration)
(*in).DeepCopyInto(*out)
}
out.CacheInvalidation = in.CacheInvalidation
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapsuleConfigurationSpec.
@@ -145,6 +152,7 @@ func (in *CapsuleConfigurationSpec) DeepCopy() *CapsuleConfigurationSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CapsuleConfigurationStatus) DeepCopyInto(out *CapsuleConfigurationStatus) {
*out = *in
in.LastCacheInvalidation.DeepCopyInto(&out.LastCacheInvalidation)
if in.Users != nil {
in, out := &in.Users, &out.Users
*out = make(api.UserListSpec, len(*in))
@@ -177,6 +185,53 @@ func (in *CapsuleResources) DeepCopy() *CapsuleResources {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DynamicAdmission) DeepCopyInto(out *DynamicAdmission) {
*out = *in
in.Mutating.DeepCopyInto(&out.Mutating)
in.Validating.DeepCopyInto(&out.Validating)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicAdmission.
func (in *DynamicAdmission) DeepCopy() *DynamicAdmission {
if in == nil {
return nil
}
out := new(DynamicAdmission)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DynamicAdmissionConfig) DeepCopyInto(out *DynamicAdmissionConfig) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
in.Client.DeepCopyInto(&out.Client)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicAdmissionConfig.
func (in *DynamicAdmissionConfig) DeepCopy() *DynamicAdmissionConfig {
if in == nil {
return nil
}
out := new(DynamicAdmissionConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GatewayOptions) DeepCopyInto(out *GatewayOptions) {
*out = *in
@@ -357,6 +412,65 @@ func (in *NamespaceOptions) DeepCopy() *NamespaceOptions {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceRule) DeepCopyInto(out *NamespaceRule) {
*out = *in
in.NamespaceRuleBody.DeepCopyInto(&out.NamespaceRuleBody)
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceRule.
func (in *NamespaceRule) DeepCopy() *NamespaceRule {
if in == nil {
return nil
}
out := new(NamespaceRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceRuleBody) DeepCopyInto(out *NamespaceRuleBody) {
*out = *in
in.Enforce.DeepCopyInto(&out.Enforce)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceRuleBody.
func (in *NamespaceRuleBody) DeepCopy() *NamespaceRuleBody {
if in == nil {
return nil
}
out := new(NamespaceRuleBody)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceRuleEnforceBody) DeepCopyInto(out *NamespaceRuleEnforceBody) {
*out = *in
if in.Registries != nil {
in, out := &in.Registries, &out.Registries
*out = make([]api.OCIRegistry, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceRuleEnforceBody.
func (in *NamespaceRuleEnforceBody) DeepCopy() *NamespaceRuleEnforceBody {
if in == nil {
return nil
}
out := new(NamespaceRuleEnforceBody)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeMetadata) DeepCopyInto(out *NodeMetadata) {
*out = *in
@@ -482,6 +596,31 @@ func (in ProcessedItems) DeepCopy() ProcessedItems {
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RBACConfiguration) DeepCopyInto(out *RBACConfiguration) {
*out = *in
if in.AdministrationClusterRoles != nil {
in, out := &in.AdministrationClusterRoles, &out.AdministrationClusterRoles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PromotionClusterRoles != nil {
in, out := &in.PromotionClusterRoles, &out.PromotionClusterRoles
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBACConfiguration.
func (in *RBACConfiguration) DeepCopy() *RBACConfiguration {
if in == nil {
return nil
}
out := new(RBACConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RawExtension) DeepCopyInto(out *RawExtension) {
*out = *in
@@ -925,6 +1064,80 @@ func (in *ResourceSpec) DeepCopy() *ResourceSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RuleStatus) DeepCopyInto(out *RuleStatus) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleStatus.
func (in *RuleStatus) DeepCopy() *RuleStatus {
if in == nil {
return nil
}
out := new(RuleStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RuleStatus) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RuleStatusList) DeepCopyInto(out *RuleStatusList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]RuleStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleStatusList.
func (in *RuleStatusList) DeepCopy() *RuleStatusList {
if in == nil {
return nil
}
out := new(RuleStatusList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RuleStatusList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RuleStatusSpec) DeepCopyInto(out *RuleStatusSpec) {
*out = *in
in.Rule.DeepCopyInto(&out.Rule)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleStatusSpec.
func (in *RuleStatusSpec) DeepCopy() *RuleStatusSpec {
if in == nil {
return nil
}
out := new(RuleStatusSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Tenant) DeepCopyInto(out *Tenant) {
*out = *in
@@ -1241,6 +1454,17 @@ func (in *TenantResourceStatus) DeepCopy() *TenantResourceStatus {
func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
*out = *in
in.Permissions.DeepCopyInto(&out.Permissions)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]*NamespaceRule, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(NamespaceRule)
(*in).DeepCopyInto(*out)
}
}
}
if in.Owners != nil {
in, out := &in.Owners, &out.Owners
*out = make(api.OwnerListSpec, len(*in))
@@ -1269,11 +1493,6 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
(*in).DeepCopyInto(*out)
}
in.IngressOptions.DeepCopyInto(&out.IngressOptions)
if in.ContainerRegistries != nil {
in, out := &in.ContainerRegistries, &out.ContainerRegistries
*out = new(api.AllowedListSpec)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
@@ -1281,8 +1500,6 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
(*out)[key] = val
}
}
in.NetworkPolicies.DeepCopyInto(&out.NetworkPolicies)
in.LimitRanges.DeepCopyInto(&out.LimitRanges)
in.ResourceQuota.DeepCopyInto(&out.ResourceQuota)
if in.AdditionalRoleBindings != nil {
in, out := &in.AdditionalRoleBindings, &out.AdditionalRoleBindings
@@ -1291,11 +1508,6 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ImagePullPolicies != nil {
in, out := &in.ImagePullPolicies, &out.ImagePullPolicies
*out = make([]api.ImagePullPolicySpec, len(*in))
copy(*out, *in)
}
if in.RuntimeClasses != nil {
in, out := &in.RuntimeClasses, &out.RuntimeClasses
*out = new(api.DefaultAllowedListSpec)
@@ -1317,6 +1529,18 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
*out = new(bool)
**out = **in
}
if in.ContainerRegistries != nil {
in, out := &in.ContainerRegistries, &out.ContainerRegistries
*out = new(api.AllowedListSpec)
(*in).DeepCopyInto(*out)
}
if in.ImagePullPolicies != nil {
in, out := &in.ImagePullPolicies, &out.ImagePullPolicies
*out = make([]api.ImagePullPolicySpec, len(*in))
copy(*out, *in)
}
in.NetworkPolicies.DeepCopyInto(&out.NetworkPolicies)
in.LimitRanges.DeepCopyInto(&out.LimitRanges)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantSpec.
@@ -1375,6 +1599,28 @@ func (in *TenantStatus) DeepCopy() *TenantStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TenantStatusNamespaceEnforcement) DeepCopyInto(out *TenantStatusNamespaceEnforcement) {
*out = *in
if in.Registries != nil {
in, out := &in.Registries, &out.Registries
*out = make([]api.OCIRegistry, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantStatusNamespaceEnforcement.
func (in *TenantStatusNamespaceEnforcement) DeepCopy() *TenantStatusNamespaceEnforcement {
if in == nil {
return nil
}
out := new(TenantStatusNamespaceEnforcement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TenantStatusNamespaceItem) DeepCopyInto(out *TenantStatusNamespaceItem) {
*out = *in
@@ -1390,6 +1636,7 @@ func (in *TenantStatusNamespaceItem) DeepCopyInto(out *TenantStatusNamespaceItem
*out = new(TenantStatusNamespaceMetadata)
(*in).DeepCopyInto(*out)
}
in.Enforce.DeepCopyInto(&out.Enforce)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantStatusNamespaceItem.

View File

@@ -115,6 +115,7 @@ The following Values have changed key or Value:
| manager.options.administrators | list | `[]` | Define entities which can act as Administrators in the capsule construct These entities are automatically owners for all existing tenants. Meaning they can add namespaces to any tenant. However they must be specific by using the capsule label for interacting with namespaces. Because if that label is not defined, it's assumed that namespace interaction was not targeted towards a tenant and will therefor be ignored by capsule. May also be handy in GitOps scenarios where certain service accounts need to be able to manage namespaces for all tenants. |
| manager.options.allowServiceAccountPromotion | bool | `false` | ServiceAccounts within tenant namespaces can be promoted to owners of the given tenant this can be achieved by labeling the serviceaccount and then they are considered owners. This can only be done by other owners of the tenant. However ServiceAccounts which have been promoted to owner can not promote further serviceAccounts. |
| manager.options.annotations | object | `{}` | Additional annotations to add to the CapsuleConfiguration resource |
| manager.options.cacheInvalidation | string | `"24h0m0s"` | Duration after which the in-memory cache is invalidated (based on usaage) and re-fetched from the API server |
| manager.options.capsuleConfiguration | string | `"default"` | Change the default name of the capsule configuration name |
| manager.options.capsuleUserGroups | list | `[]` | DEPRECATED: use users properties. Names of the users considered as Capsule users. |
| manager.options.createConfiguration | bool | `true` | Create Configuration |
@@ -125,6 +126,11 @@ The following Values have changed key or Value:
| manager.options.logLevel | string | `"info"` | Set the log verbosity of the capsule with a value from 1 to 5 |
| manager.options.nodeMetadata | object | `{"forbiddenAnnotations":{"denied":[],"deniedRegex":""},"forbiddenLabels":{"denied":[],"deniedRegex":""}}` | Allows to set the forbidden metadata for the worker nodes that could be patched by a Tenant |
| manager.options.protectedNamespaceRegex | string | `""` | If specified, disallows creation of namespaces matching the passed regexp |
| manager.options.rbac | object | `{"administrationClusterRoles":["capsule-namespace-deleter"],"deleter":"capsule-namespace-deleter","promotionClusterRoles":["capsule-namespace-provisioner","capsule-namespace-deleter"],"provisioner":"capsule-namespace-provisioner"}` | Managed RBAC configuration for the controller |
| manager.options.rbac.administrationClusterRoles | list | `["capsule-namespace-deleter"]` | The ClusterRoles applied for Administrators |
| manager.options.rbac.deleter | string | `"capsule-namespace-deleter"` | Name for the ClusterRole required to grant Namespace Deletion permissions. |
| manager.options.rbac.promotionClusterRoles | list | `["capsule-namespace-provisioner","capsule-namespace-deleter"]` | The ClusterRoles applied for ServiceAccounts which had owner Promotion |
| manager.options.rbac.provisioner | string | `"capsule-namespace-provisioner"` | Name for the ClusterRole required to grant Namespace Provision permissions. |
| manager.options.userNames | list | `[]` | DEPRECATED: use users properties. Names of the users considered as Capsule users. |
| manager.options.users | list | `[{"kind":"Group","name":"projectcapsule.dev"}]` | Define entities which are considered part of the Capsule construct. Users not mentioned here will be ignored by Capsule |
| manager.options.workers | int | `1` | Workers (MaxConcurrentReconciles) is the maximum number of concurrent Reconciles which can be run (ALPHA). |
@@ -166,6 +172,7 @@ The following Values have changed key or Value:
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| webhooks.annotations | object | `{}` | Additional Annotations for all webhooks |
| webhooks.exclusive | bool | `false` | When `crds.exclusive` is `true` the webhooks will be installed |
| webhooks.hooks.config.enabled | bool | `true` | Enable the Hook |
| webhooks.hooks.config.failurePolicy | string | `"Ignore"` | [FailurePolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy) |
@@ -210,6 +217,13 @@ The following Values have changed key or Value:
| webhooks.hooks.ingresses.namespaceSelector | object | `{"matchExpressions":[{"key":"capsule.clastix.io/tenant","operator":"Exists"}]}` | [NamespaceSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector) |
| webhooks.hooks.ingresses.objectSelector | object | `{}` | [ObjectSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector) |
| webhooks.hooks.ingresses.reinvocationPolicy | string | `"Never"` | [ReinvocationPolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#reinvocation-policy) |
| webhooks.hooks.managed.enabled | bool | `true` | Enable the Hook |
| webhooks.hooks.managed.failurePolicy | string | `"Fail"` | [FailurePolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy) |
| webhooks.hooks.managed.matchConditions | list | `[]` | [MatchConditions](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy) |
| webhooks.hooks.managed.matchPolicy | string | `"Exact"` | [MatchPolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy) |
| webhooks.hooks.managed.namespaceSelector | object | `{"matchExpressions":[{"key":"capsule.clastix.io/tenant","operator":"Exists"}]}` | [NamespaceSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector) |
| webhooks.hooks.managed.objectSelector | object | `{"matchExpressions":[{"key":"projectcapsule.dev/managed-by","operator":"Exists"}]}` | [ObjectSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector) |
| webhooks.hooks.managed.rules | list | `[{"apiGroups":["*"],"apiVersions":["*"],"operations":["UPDATE","DELETE"],"resources":["*"],"scope":"*"}]` | [Rules](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-rules) |
| webhooks.hooks.namespaceOwnerReference | object | `{}` | Deprecated, use webhooks.hooks.namespaces instead |
| webhooks.hooks.namespaces.enabled | bool | `true` | Enable the Hook |
| webhooks.hooks.namespaces.failurePolicy | string | `"Fail"` | [FailurePolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy) |
@@ -276,12 +290,7 @@ The following Values have changed key or Value:
| webhooks.hooks.tenantLabel.objectSelector | object | `{}` | [ObjectSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector) |
| webhooks.hooks.tenantLabel.reinvocationPolicy | string | `"Never"` | [ReinvocationPolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#reinvocation-policy) |
| webhooks.hooks.tenantLabel.rules | list | `[{"apiGroups":["*"],"apiVersions":["*"],"operations":["CREATE","UPDATE"],"resources":["*"],"scope":"Namespaced"}]` | [Rules](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-rules) |
| webhooks.hooks.tenantResourceObjects.enabled | bool | `true` | Enable the Hook |
| webhooks.hooks.tenantResourceObjects.failurePolicy | string | `"Fail"` | [FailurePolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy) |
| webhooks.hooks.tenantResourceObjects.matchConditions | list | `[]` | [MatchConditions](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy) |
| webhooks.hooks.tenantResourceObjects.matchPolicy | string | `"Exact"` | [MatchPolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy) |
| webhooks.hooks.tenantResourceObjects.namespaceSelector | object | `{"matchExpressions":[{"key":"capsule.clastix.io/tenant","operator":"Exists"}]}` | [NamespaceSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector) |
| webhooks.hooks.tenantResourceObjects.objectSelector | object | `{"matchExpressions":[{"key":"capsule.clastix.io/tenant","operator":"Exists"}]}` | [ObjectSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector) |
| webhooks.hooks.tenantResourceObjects | object | `{}` | Deprecated, use webhooks.hooks.managed instead |
| webhooks.hooks.tenants.enabled | bool | `true` | Enable the Hook |
| webhooks.hooks.tenants.failurePolicy | string | `"Fail"` | [FailurePolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy) |
| webhooks.hooks.tenants.matchConditions | list | `[]` | [MatchConditions](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy) |
@@ -289,6 +298,7 @@ The following Values have changed key or Value:
| webhooks.hooks.tenants.namespaceSelector | object | `{}` | [NamespaceSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector) |
| webhooks.hooks.tenants.objectSelector | object | `{}` | [ObjectSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector) |
| webhooks.hooks.tenants.reinvocationPolicy | string | `"Never"` | [ReinvocationPolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#reinvocation-policy) |
| webhooks.labels | object | `{}` | Additional Labels for all webhooks |
| webhooks.mutatingWebhooksTimeoutSeconds | int | `30` | Timeout in seconds for mutating webhooks |
| webhooks.service.caBundle | string | `""` | CABundle for the webhook service |
| webhooks.service.name | string | `""` | Custom service name for the webhook service |

View File

@@ -64,6 +64,195 @@ spec:
- name
type: object
type: array
admission:
description: Configuration for dynamic Validating and Mutating Admission
webhooks managed by Capsule.
properties:
mutating:
description: Configure dynamic Mutating Admission for Capsule
properties:
annotations:
additionalProperties:
type: string
description: Annotations added to the Admission Webhook
type: object
client:
description: From the upstram struct
properties:
caBundle:
description: |-
`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
If unspecified, system trust roots on the apiserver are used.
format: byte
type: string
service:
description: |-
`service` is a reference to the service for this webhook. Either
`service` or `url` must be specified.
If the webhook is running within the cluster, then you should use `service`.
properties:
name:
description: |-
`name` is the name of the service.
Required
type: string
namespace:
description: |-
`namespace` is the namespace of the service.
Required
type: string
path:
description: |-
`path` is an optional URL path which will be sent in any request to
this service.
type: string
port:
description: |-
If specified, the port on the service that hosting webhook.
Default to 443 for backward compatibility.
`port` should be a valid port number (1-65535, inclusive).
format: int32
type: integer
required:
- name
- namespace
type: object
url:
description: |-
`url` gives the location of the webhook, in standard URL form
(`scheme://host:port/path`). Exactly one of `url` or `service`
must be specified.
The `host` should not refer to a service running in the cluster; use
the `service` field instead. The host might be resolved via external
DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
in-cluster DNS as that would be a layering violation). `host` may
also be an IP address.
Please note that using `localhost` or `127.0.0.1` as a `host` is
risky unless you take great care to run this webhook on all hosts
which run an apiserver which might need to make calls to this
webhook. Such installs are likely to be non-portable, i.e., not easy
to turn up in a new cluster.
The scheme must be "https"; the URL must begin with "https://".
A path is optional, and if present may be any string permissible in
a URL. You may use the path to pass an arbitrary string to the
webhook, for example, a cluster identifier.
Attempting to use a user or basic auth e.g. "user:password@" is not
allowed. Fragments ("#...") and query parameters ("?...") are not
allowed, either.
type: string
type: object
labels:
additionalProperties:
type: string
description: Labels added to the Admission Webhook
type: object
name:
description: Name the Admission Webhook
maxLength: 253
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
required:
- client
type: object
validating:
description: Configure dynamic Validating Admission for Capsule
properties:
annotations:
additionalProperties:
type: string
description: Annotations added to the Admission Webhook
type: object
client:
description: From the upstram struct
properties:
caBundle:
description: |-
`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
If unspecified, system trust roots on the apiserver are used.
format: byte
type: string
service:
description: |-
`service` is a reference to the service for this webhook. Either
`service` or `url` must be specified.
If the webhook is running within the cluster, then you should use `service`.
properties:
name:
description: |-
`name` is the name of the service.
Required
type: string
namespace:
description: |-
`namespace` is the namespace of the service.
Required
type: string
path:
description: |-
`path` is an optional URL path which will be sent in any request to
this service.
type: string
port:
description: |-
If specified, the port on the service that hosting webhook.
Default to 443 for backward compatibility.
`port` should be a valid port number (1-65535, inclusive).
format: int32
type: integer
required:
- name
- namespace
type: object
url:
description: |-
`url` gives the location of the webhook, in standard URL form
(`scheme://host:port/path`). Exactly one of `url` or `service`
must be specified.
The `host` should not refer to a service running in the cluster; use
the `service` field instead. The host might be resolved via external
DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
in-cluster DNS as that would be a layering violation). `host` may
also be an IP address.
Please note that using `localhost` or `127.0.0.1` as a `host` is
risky unless you take great care to run this webhook on all hosts
which run an apiserver which might need to make calls to this
webhook. Such installs are likely to be non-portable, i.e., not easy
to turn up in a new cluster.
The scheme must be "https"; the URL must begin with "https://".
A path is optional, and if present may be any string permissible in
a URL. You may use the path to pass an arbitrary string to the
webhook, for example, a cluster identifier.
Attempting to use a user or basic auth e.g. "user:password@" is not
allowed. Fragments ("#...") and query parameters ("?...") are not
allowed, either.
type: string
type: object
labels:
additionalProperties:
type: string
description: Labels added to the Admission Webhook
type: object
name:
description: Name the Admission Webhook
maxLength: 253
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
required:
- client
type: object
type: object
allowServiceAccountPromotion:
default: false
description: |-
@@ -71,6 +260,11 @@ spec:
this can be achieved by labeling the serviceaccount and then they are considered owners. This can only be done by other owners of the tenant.
However ServiceAccounts which have been promoted to owner can not promote further serviceAccounts.
type: boolean
cacheInvalidation:
default: 24h
description: Define the period of time upon a cache invalidation is
executed for all caches.
type: string
enableTLSReconciler:
default: false
description: |-
@@ -152,6 +346,37 @@ spec:
description: Disallow creation of namespaces, whose name matches this
regexp
type: string
rbac:
default: {}
description: Define Properties for managed ClusterRoles by Capsule
properties:
administrationClusterRoles:
default:
- capsule-namespace-deleter
description: The ClusterRoles applied for Administrators
items:
type: string
type: array
deleter:
default: capsule-namespace-deleter
description: Name for the ClusterRole required to grant Namespace
Deletion permissions.
type: string
promotionClusterRoles:
default:
- capsule-namespace-provisioner
- capsule-namespace-deleter
description: The ClusterRoles applied for ServiceAccounts which
had owner Promotion
items:
type: string
type: array
provisioner:
default: capsule-namespace-provisioner
description: Name for the ClusterRole required to grant Namespace
Provision permissions.
type: string
type: object
userGroups:
description: |-
Deprecated: use users property instead (https://projectcapsule.dev/docs/operating/setup/configuration/#users)
@@ -191,12 +416,18 @@ spec:
type: object
type: array
required:
- cacheInvalidation
- enableTLSReconciler
- rbac
type: object
status:
description: CapsuleConfigurationStatus defines the Capsule configuration
status.
properties:
lastCacheInvalidation:
description: Last time all caches were invalided
format: date-time
type: string
users:
description: Users which are considered Capsule Users and are bound
to the Capsule Tenant construct.

View File

@@ -0,0 +1,94 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.20.0
name: rulestatuses.capsule.clastix.io
spec:
group: capsule.clastix.io
names:
kind: RuleStatus
listKind: RuleStatusList
plural: rulestatuses
singular: rulestatus
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Age
jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta2
schema:
openAPIV3Schema:
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
status:
description: RuleStatus contains the accumulated rules applying to namespace
it's deployed in.
properties:
rule:
description: Managed Enforcement properties per Namespace (aggregated
from rules)
properties:
enforce:
description: Enforcement Rules applied
properties:
registries:
description: |-
Define registries which are allowed to be used within this tenant
The rules are aggregated, since you can use Regular Expressions the match registry endpoints
items:
properties:
policy:
description: Allowed PullPolicy for the given registry.
Supplying no value allows all policies.
items:
description: PullPolicy describes a policy for if/when
to pull a container image
type: string
type: array
url:
description: OCI Registry endpoint, is treated as regular
expression.
type: string
validation:
default:
- pod/images
- pod/volumes
description: Requesting Resources
items:
enum:
- pod/images
- pod/volumes
type: string
type: array
required:
- url
type: object
type: array
type: object
type: object
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -1191,9 +1191,10 @@ spec:
type: object
type: array
containerRegistries:
description: Specifies the trusted Image Registries assigned to the
Tenant. Capsule assures that all Pods resources created in the Tenant
can use only one of the allowed trusted registries. Optional.
description: |-
Deprecated: Use Enforcement.Registries instead
Specifies the trusted Image Registries assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed trusted registries. Optional.
properties:
allowed:
description: Match exact elements which are allowed as class names
@@ -1346,9 +1347,10 @@ spec:
x-kubernetes-map-type: atomic
type: object
imagePullPolicies:
description: Specify the allowed values for the imagePullPolicies
option in Pod resources. Capsule assures that all Pod resources
created in the Tenant can use only one of the allowed policy. Optional.
description: |-
Deprecated: Use Enforcement.Registries instead
Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
items:
enum:
- Always
@@ -2464,6 +2466,100 @@ spec:
- Namespace
type: string
type: object
rules:
description: |-
Specify enforcement specifications for the scope of the Tenant.
We are moving all configuration enforcement. per namespace into a rule construct.
It's currently not final.
Read More: https://projectcapsule.dev/docs/tenants/rules/
items:
properties:
enforce:
description: Enforcement Rules applied
properties:
registries:
description: |-
Define registries which are allowed to be used within this tenant
The rules are aggregated, since you can use Regular Expressions the match registry endpoints
items:
properties:
policy:
description: Allowed PullPolicy for the given registry.
Supplying no value allows all policies.
items:
description: PullPolicy describes a policy for if/when
to pull a container image
type: string
type: array
url:
description: OCI Registry endpoint, is treated as
regular expression.
type: string
validation:
default:
- pod/images
- pod/volumes
description: Requesting Resources
items:
enum:
- pod/images
- pod/volumes
type: string
type: array
required:
- url
type: object
type: array
type: object
namespaceSelector:
description: Select namespaces which are going to usese
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
type: object
type: array
runtimeClasses:
description: |-
Specifies the allowed RuntimeClasses assigned to the Tenant.
@@ -2854,6 +2950,41 @@ spec:
- type
type: object
type: array
enforce:
description: Managed Metadata
properties:
registry:
description: Registries which are allowed within this namespace
items:
properties:
policy:
description: Allowed PullPolicy for the given registry.
Supplying no value allows all policies.
items:
description: PullPolicy describes a policy for if/when
to pull a container image
type: string
type: array
url:
description: OCI Registry endpoint, is treated as
regular expression.
type: string
validation:
default:
- pod/images
- pod/volumes
description: Requesting Resources
items:
enum:
- pod/images
- pod/volumes
type: string
type: array
required:
- url
type: object
type: array
type: object
metadata:
description: Managed Metadata
properties:
@@ -2892,8 +3023,6 @@ spec:
- size
- state
type: object
required:
- spec
type: object
served: true
storage: true

View File

@@ -155,6 +155,24 @@ service:
{{- end }}
{{- end }}
{{/*
Capsule Webhook service (Without Path)
*/}}
{{- define "capsule.webhooks.serviceConfig" -}}
{{- include "capsule.webhooks.cabundle" $ | nindent 0 }}
{{- if $.Values.webhooks.service.url }}
url: {{ trimSuffix "/" $.Values.webhooks.service.url }}
{{- else }}
service:
name: {{ default (printf "%s-webhook-service" (include "capsule.fullname" $)) $.Values.webhooks.service.name }}
namespace: {{ default $.Release.Namespace $.Values.webhooks.service.namespace }}
port: {{ default 443 $.Values.webhooks.service.port }}
{{- end }}
{{- end }}
{{/*
Capsule Webhook endpoint CA Bundle
*/}}
@@ -180,3 +198,22 @@ caBundle: {{ $.Values.webhooks.service.caBundle -}}
{{- $joined := join "," $sizes -}}
{{- sha256sum $joined -}}
{{- end -}}
{{- define "admission.labels" -}}
{{- with $.Values.webhooks.labels }}
{{- toYaml . | nindent 0 }}
{{- end }}
{{- end }}
{{- define "admission.annotations" -}}
{{- if and ($.Values.certManager.generateCertificates) (not $.Values.webhooks.service.caBundle) }}
cert-manager.io/inject-ca-from: {{ $.Release.Namespace }}/{{ include "capsule.fullname" $ }}-webhook-cert
{{- end }}
{{- with $.Values.customAnnotations }}
{{- toYaml . | nindent 0 }}
{{- end }}
{{- with $.Values.webhooks.annotations }}
{{- toYaml . | nindent 0 }}
{{- end }}
{{- end }}

View File

@@ -14,6 +14,34 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
cacheInvalidation: {{ .Values.manager.options.cacheInvalidation }}
rbac:
{{- toYaml .Values.manager.options.rbac | nindent 4 }}
admission:
validating:
name: "{{ include "capsule.fullname" . }}-dynamic"
client:
{{- include "capsule.webhooks.serviceConfig" $ | nindent 8 }}
{{- if (include "admission.labels" $) }}
labels:
{{- include "admission.labels" $ | nindent 8 }}
{{- end }}
{{- if (include "admission.annotations" $) }}
annotations:
{{- include "admission.annotations" $ | nindent 8 }}
{{- end }}
mutating:
name: "{{ include "capsule.fullname" . }}-dynamic"
client:
{{- include "capsule.webhooks.serviceConfig" $ | nindent 8 }}
{{- if (include "admission.labels" $) }}
labels:
{{- include "admission.labels" $ | nindent 8 }}
{{- end }}
{{- if (include "admission.annotations" $) }}
annotations:
{{- include "admission.annotations" $ | nindent 8 }}
{{- end }}
administrators:
{{- toYaml .Values.manager.options.administrators | nindent 4 }}
users:

View File

@@ -32,6 +32,7 @@ rules:
- globaltenantresources.capsule.clastix.io
- tenants.capsule.clastix.io
- tenantowners.capsule.clastix.io
- rulestatuses.capsule.clastix.io
verbs:
- create
- delete

View File

@@ -3,15 +3,12 @@ apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: {{ include "capsule.fullname" . }}-mutating-webhook-configuration
namespace: {{ $.Release.Namespace }}
labels:
{{- include "capsule.labels" . | nindent 4 }}
{{- include "capsule.labels" $ | nindent 4 }}
{{- include "admission.labels" . | nindent 4 }}
annotations:
{{- if .Values.certManager.generateCertificates }}
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "capsule.fullname" . }}-webhook-cert
{{- end }}
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- include "admission.annotations" . | nindent 4 }}
webhooks:
{{- with (mergeOverwrite .Values.webhooks.hooks.pods .Values.webhooks.hooks.defaults.pods) }}
{{- if .enabled }}

View File

@@ -5,14 +5,10 @@ metadata:
name: {{ include "capsule.fullname" . }}-validating-webhook-configuration
namespace: {{ $.Release.Namespace }}
labels:
{{- include "capsule.labels" . | nindent 4 }}
{{- include "capsule.labels" $ | nindent 4 }}
{{- include "admission.labels" . | nindent 4 }}
annotations:
{{- if .Values.certManager.generateCertificates }}
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "capsule.fullname" . }}-webhook-cert
{{- end }}
{{- with .Values.customAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- include "admission.annotations" . | nindent 4 }}
webhooks:
{{- with .Values.webhooks.hooks.cordoning }}
{{- if .enabled }}
@@ -191,6 +187,8 @@ webhooks:
- DELETE
resources:
- namespaces
- namespaces/status
- namespace/finalize
scope: '*'
sideEffects: None
timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }}
@@ -379,13 +377,13 @@ webhooks:
timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }}
{{- end }}
{{- end }}
{{- with .Values.webhooks.hooks.tenantResourceObjects }}
{{- with (mergeOverwrite .Values.webhooks.hooks.managed .Values.webhooks.hooks.tenantResourceObjects) }}
{{- if .enabled }}
- name: resource-objects.tenant.projectcapsule.dev
admissionReviewVersions:
- v1
clientConfig:
{{- include "capsule.webhooks.service" (dict "path" "/tenantresource-objects" "ctx" $) | nindent 4 }}
{{- include "capsule.webhooks.service" (dict "path" "/misc/managed" "ctx" $) | nindent 4 }}
failurePolicy: {{ .failurePolicy }}
matchPolicy: {{ .matchPolicy }}
{{- with .namespaceSelector }}
@@ -401,16 +399,7 @@ webhooks:
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups:
- '*'
apiVersions:
- '*'
operations:
- UPDATE
- DELETE
resources:
- '*'
scope: Namespaced
{{- toYaml .rules | nindent 4 }}
sideEffects: None
timeoutSeconds: {{ $.Values.webhooks.validatingWebhooksTimeoutSeconds }}
{{- end }}

View File

@@ -331,6 +331,10 @@
"description": "Additional annotations to add to the CapsuleConfiguration resource",
"type": "object"
},
"cacheInvalidation": {
"description": "Duration after which the in-memory cache is invalidated (based on usaage) and re-fetched from the API server",
"type": "string"
},
"capsuleConfiguration": {
"description": "Change the default name of the capsule configuration name",
"type": "string"
@@ -395,6 +399,34 @@
"description": "If specified, disallows creation of namespaces matching the passed regexp",
"type": "string"
},
"rbac": {
"description": "Managed RBAC configuration for the controller",
"type": "object",
"properties": {
"administrationClusterRoles": {
"description": "The ClusterRoles applied for Administrators",
"type": "array",
"items": {
"type": "string"
}
},
"deleter": {
"description": "Name for the ClusterRole required to grant Namespace Deletion permissions.",
"type": "string"
},
"promotionClusterRoles": {
"description": "The ClusterRoles applied for ServiceAccounts which had owner Promotion",
"type": "array",
"items": {
"type": "string"
}
},
"provisioner": {
"description": "Name for the ClusterRole required to grant Namespace Provision permissions.",
"type": "string"
}
}
},
"userNames": {
"description": "DEPRECATED: use users properties. Names of the users considered as Capsule users.",
"type": "array"
@@ -744,6 +776,10 @@
"webhooks": {
"type": "object",
"properties": {
"annotations": {
"description": "Additional Annotations for all webhooks",
"type": "object"
},
"exclusive": {
"description": "When `crds.exclusive` is `true` the webhooks will be installed",
"type": "boolean"
@@ -1070,6 +1106,103 @@
}
}
},
"managed": {
"type": "object",
"properties": {
"enabled": {
"description": "Enable the Hook",
"type": "boolean"
},
"failurePolicy": {
"description": "[FailurePolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy)",
"type": "string"
},
"matchConditions": {
"description": "[MatchConditions](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy)",
"type": "array"
},
"matchPolicy": {
"description": "[MatchPolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy)",
"type": "string"
},
"namespaceSelector": {
"description": "[NamespaceSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector)",
"type": "object",
"properties": {
"matchExpressions": {
"type": "array",
"items": {
"type": "object",
"properties": {
"key": {
"type": "string"
},
"operator": {
"type": "string"
}
}
}
}
}
},
"objectSelector": {
"description": "[ObjectSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector)",
"type": "object",
"properties": {
"matchExpressions": {
"type": "array",
"items": {
"type": "object",
"properties": {
"key": {
"type": "string"
},
"operator": {
"type": "string"
}
}
}
}
}
},
"rules": {
"description": "[Rules](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-rules)",
"type": "array",
"items": {
"type": "object",
"properties": {
"apiGroups": {
"type": "array",
"items": {
"type": "string"
}
},
"apiVersions": {
"type": "array",
"items": {
"type": "string"
}
},
"operations": {
"type": "array",
"items": {
"type": "string"
}
},
"resources": {
"type": "array",
"items": {
"type": "string"
}
},
"scope": {
"type": "string"
}
}
}
}
}
},
"namespaceOwnerReference": {
"description": "Deprecated, use webhooks.hooks.namespaces instead",
"type": "object"
@@ -1518,65 +1651,8 @@
}
},
"tenantResourceObjects": {
"type": "object",
"properties": {
"enabled": {
"description": "Enable the Hook",
"type": "boolean"
},
"failurePolicy": {
"description": "[FailurePolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy)",
"type": "string"
},
"matchConditions": {
"description": "[MatchConditions](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy)",
"type": "array"
},
"matchPolicy": {
"description": "[MatchPolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy)",
"type": "string"
},
"namespaceSelector": {
"description": "[NamespaceSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector)",
"type": "object",
"properties": {
"matchExpressions": {
"type": "array",
"items": {
"type": "object",
"properties": {
"key": {
"type": "string"
},
"operator": {
"type": "string"
}
}
}
}
}
},
"objectSelector": {
"description": "[ObjectSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector)",
"type": "object",
"properties": {
"matchExpressions": {
"type": "array",
"items": {
"type": "object",
"properties": {
"key": {
"type": "string"
},
"operator": {
"type": "string"
}
}
}
}
}
}
}
"description": "Deprecated, use webhooks.hooks.managed instead",
"type": "object"
},
"tenants": {
"type": "object",
@@ -1613,6 +1689,10 @@
}
}
},
"labels": {
"description": "Additional Labels for all webhooks",
"type": "object"
},
"mutatingWebhooksTimeoutSeconds": {
"description": "Timeout in seconds for mutating webhooks",
"type": "integer"

View File

@@ -211,6 +211,25 @@ manager:
forbiddenAnnotations:
denied: []
deniedRegex: ""
# -- Duration after which the in-memory cache is invalidated (based on usaage) and re-fetched from the API server
cacheInvalidation: 24h0m0s
# -- Managed RBAC configuration for the controller
rbac:
# -- The ClusterRoles applied for Administrators
administrationClusterRoles:
- capsule-namespace-deleter
# -- The ClusterRoles applied for ServiceAccounts which had owner Promotion
promotionClusterRoles:
- capsule-namespace-provisioner
- capsule-namespace-deleter
# -- Name for the ClusterRole required to grant Namespace Deletion permissions.
deleter: capsule-namespace-deleter
# -- Name for the ClusterRole required to grant Namespace Provision permissions.
provisioner: capsule-namespace-provisioner
# -- DEPRECATED: use users properties.
# Names of the users considered as Capsule users.
userNames: []
@@ -218,7 +237,6 @@ manager:
# Names of the users considered as Capsule users.
capsuleUserGroups: []
# -- A list of extra arguments for the capsule controller
extraArgs:
- "--enable-leader-election=true"
@@ -398,6 +416,12 @@ webhooks:
# -- Timeout in seconds for validating webhooks
validatingWebhooksTimeoutSeconds: 30
# -- Additional Labels for all webhooks
labels: {}
# -- Additional Annotations for all webhooks
annotations: {}
# Configure custom webhook service
service:
# -- The URL where the capsule webhook services are running (Overwrites cluster scoped service definition)
@@ -678,7 +702,7 @@ webhooks:
# -- [ReinvocationPolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#reinvocation-policy)
reinvocationPolicy: Never
tenantResourceObjects:
managed:
# -- Enable the Hook
enabled: true
# -- [FailurePolicy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy)
@@ -688,7 +712,7 @@ webhooks:
# -- [ObjectSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector)
objectSelector:
matchExpressions:
- key: capsule.clastix.io/tenant
- key: "projectcapsule.dev/managed-by"
operator: Exists
# -- [NamespaceSelector](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector)
namespaceSelector:
@@ -697,6 +721,18 @@ webhooks:
operator: Exists
# -- [MatchConditions](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy)
matchConditions: []
# -- [Rules](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-rules)
rules:
- apiGroups:
- '*'
apiVersions:
- '*'
operations:
- UPDATE
- DELETE
resources:
- '*'
scope: '*'
services:
# -- Enable the Hook
@@ -756,3 +792,6 @@ webhooks:
pvc: {}
# -- Deprecated, use webhooks.hooks.pods instead
pods: {}
# -- Deprecated, use webhooks.hooks.managed instead
tenantResourceObjects: {}

View File

@@ -31,6 +31,8 @@ import (
capsulev1beta1 "github.com/projectcapsule/capsule/api/v1beta1"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/internal/cache"
"github.com/projectcapsule/capsule/internal/controllers/admission"
configcontroller "github.com/projectcapsule/capsule/internal/controllers/cfg"
podlabelscontroller "github.com/projectcapsule/capsule/internal/controllers/pod"
"github.com/projectcapsule/capsule/internal/controllers/pv"
@@ -63,8 +65,9 @@ import (
tenantvalidation "github.com/projectcapsule/capsule/internal/webhook/tenant/validation"
tntresource "github.com/projectcapsule/capsule/internal/webhook/tenantresource"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/indexer"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
"github.com/projectcapsule/capsule/pkg/runtime/indexers"
)
var (
@@ -190,7 +193,7 @@ func main() {
if directCfg.EnableTLSConfiguration() {
tlsReconciler := &tlscontroller.Reconciler{
Client: directClient,
Log: ctrl.Log.WithName("controllers").WithName("TLS"),
Log: ctrl.Log.WithName("capsule.ctrl").WithName("tls"),
Namespace: ns,
Configuration: directCfg,
}
@@ -213,12 +216,14 @@ func main() {
}
}
registryCache := cache.NewRegistryRuleSetCache()
if err = (&tenantcontroller.Manager{
RESTConfig: manager.GetConfig(),
Client: manager.GetClient(),
Metrics: metrics.MustMakeTenantRecorder(),
Log: ctrl.Log.WithName("controllers").WithName("Tenant"),
Recorder: manager.GetEventRecorderFor("tenant-controller"),
Log: ctrl.Log.WithName("capsule.ctrl").WithName("tenant"),
Recorder: manager.GetEventRecorder("tenant-controller"),
Configuration: cfg,
}).SetupWithManager(manager, controllerConfig); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Tenant")
@@ -230,7 +235,7 @@ func main() {
os.Exit(1)
}
if err = indexer.AddToManager(ctx, setupLog, manager); err != nil {
if err = indexers.AddToManager(ctx, setupLog, manager); err != nil {
setupLog.Error(err, "unable to setup indexers")
os.Exit(1)
}
@@ -244,11 +249,12 @@ func main() {
// webhooks: the order matters, don't change it and just append
webhooksList := append(
make([]webhook.Webhook, 0),
make([]handlers.Webhook, 0),
route.Pod(
pod.Handler(
pod.ImagePullPolicy(),
pod.ContainerRegistry(cfg),
pod.ContainerRegistryLegacy(cfg),
pod.ContainerRegistry(cfg, registryCache),
pod.PriorityClass(),
pod.RuntimeClass(),
),
@@ -265,10 +271,10 @@ func main() {
service.Validating(),
),
),
route.TenantResourceObjects(utils.InCapsuleGroups(cfg, tntresource.WriteOpsHandler())),
route.NetworkPolicy(utils.InCapsuleGroups(cfg, networkpolicy.Handler())),
route.TenantResourceObjects(handlers.InCapsuleGroups(cfg, tntresource.WriteOpsHandler())),
route.NetworkPolicy(handlers.InCapsuleGroups(cfg, networkpolicy.Handler())),
route.Cordoning(tenantvalidation.CordoningHandler(cfg)),
route.Node(utils.InCapsuleGroups(cfg, node.UserMetadataHandler(cfg, kubeVersion))),
route.Node(handlers.InCapsuleGroups(cfg, node.UserMetadataHandler(cfg, kubeVersion))),
route.ServiceAccounts(
serviceaccounts.Handler(
serviceaccounts.Validating(cfg),
@@ -287,6 +293,7 @@ func main() {
tenantvalidation.IngressClassRegexHandler(),
tenantvalidation.StorageClassRegexHandler(),
tenantvalidation.ContainerRegistryRegexHandler(),
tenantvalidation.RuleHandler(),
tenantvalidation.HostnameRegexHandler(),
tenantvalidation.FreezedEmitter(),
tenantvalidation.ServiceAccountNameHandler(),
@@ -316,9 +323,12 @@ func main() {
route.ResourcePoolValidation((resourcepool.PoolValidationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepool")))),
route.ResourcePoolClaimMutation((resourcepool.ClaimMutationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepoolclaims")))),
route.ResourcePoolClaimValidation((resourcepool.ClaimValidationHandler(ctrl.Log.WithName("webhooks").WithName("resourcepoolclaims")))),
route.TenantAssignment(
route.MiscTenantAssignment(
misc.TenantAssignmentHandler(),
),
route.MiscManagedValidation(
handlers.InCapsuleGroups(cfg, misc.ManagedValidatingHandler()),
),
route.ConfigValidation(
cfgvalidation.WarningHandler(),
),
@@ -326,7 +336,7 @@ func main() {
nodeWebhookSupported, _ := utils.NodeWebhookSupported(kubeVersion)
if !nodeWebhookSupported {
setupLog.Info("Disabling node labels verification webhook as current Kubernetes version doesn't have fix for CVE-2021-25735")
setupLog.Info("disabling node labels verification webhook as current Kubernetes version doesn't have fix for CVE-2021-25735")
}
if err = webhook.Register(manager, webhooksList...); err != nil {
@@ -335,7 +345,7 @@ func main() {
}
rbacManager := &rbaccontroller.Manager{
Log: ctrl.Log.WithName("controllers").WithName("Rbac"),
Log: ctrl.Log.WithName("capsule.ctrl").WithName("rbac"),
Client: manager.GetClient(),
Configuration: cfg,
}
@@ -351,14 +361,14 @@ func main() {
}
if err = (&servicelabelscontroller.ServicesLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("ServiceLabels"),
Log: ctrl.Log.WithName("capsule.ctrl").WithName("services"),
}).SetupWithManager(ctx, manager); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ServiceLabels")
os.Exit(1)
}
if err = (&servicelabelscontroller.EndpointSlicesLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("EndpointSliceLabels"),
Log: ctrl.Log.WithName("capsule.ctrl").WithName("endpointslices"),
}).SetupWithManager(ctx, manager); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "EndpointSliceLabels")
}
@@ -375,7 +385,8 @@ func main() {
if err = (&configcontroller.Manager{
Client: manager.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("CapsuleConfiguration"),
RegistryCache: registryCache,
Log: ctrl.Log.WithName("capsule.ctrl").WithName("configuration"),
}).SetupWithManager(manager, controllerConfig); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "CapsuleConfiguration")
os.Exit(1)
@@ -391,10 +402,21 @@ func main() {
os.Exit(1)
}
if err := resourcepools.Add(
ctrl.Log.WithName("controllers").WithName("ResourcePools"),
if err := admission.Add(
ctrl.Log.WithName("capsule.ctrl").WithName("admission"),
manager,
manager.GetEventRecorderFor("pools-ctrl"),
manager.GetEventRecorder("admission-ctrl"),
controllerConfig,
cfg,
); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "admission")
os.Exit(1)
}
if err := resourcepools.Add(
ctrl.Log.WithName("capsule.ctrl").WithName("resourcepools"),
manager,
manager.GetEventRecorder("pools-ctrl"),
controllerConfig,
); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "resourcepools")

View File

@@ -393,6 +393,7 @@ var _ = Describe("creating a Namespace for a Tenant with additional metadata", L
"matching_namespace_label": "matching_namespace_label_value",
"capsule.clastix.io/tenant": tnt.GetName(),
"kubernetes.io/metadata.name": ns.GetName(),
"env": "e2e",
}
Eventually(func() map[string]string {
@@ -490,6 +491,7 @@ var _ = Describe("creating a Namespace for a Tenant with additional metadata", L
"matching_namespace_label": "matching_namespace_label_value",
"capsule.clastix.io/tenant": tnt.GetName(),
"kubernetes.io/metadata.name": ns.GetName(),
"env": "e2e",
}
Eventually(func() map[string]string {
got := &corev1.Namespace{}
@@ -579,6 +581,7 @@ var _ = Describe("creating a Namespace for a Tenant with additional metadata", L
"matching_namespace_label": "matching_namespace_label_value",
"capsule.clastix.io/tenant": tnt.GetName(),
"kubernetes.io/metadata.name": ns.GetName(),
"env": "e2e",
}
Eventually(func() map[string]string {
got := &corev1.Namespace{}
@@ -660,6 +663,7 @@ var _ = Describe("creating a Namespace for a Tenant with additional metadata", L
"matching_namespace_label": "matching_namespace_label_value",
"capsule.clastix.io/tenant": tnt.GetName(),
"kubernetes.io/metadata.name": ns.GetName(),
"env": "e2e",
}
Eventually(func() map[string]string {
got := &corev1.Namespace{}

158
e2e/rules_managed_test.go Normal file
View File

@@ -0,0 +1,158 @@
package e2e
import (
"context"
"fmt"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/api/meta"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var _ = Describe("NamespaceStatus objects", Label("tenant", "rules"), func() {
ctx := context.Background()
// Two tenants, each with one owner (reuse your existing ownerClient/NamespaceCreation helpers)
tntA := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{Name: "nsstatus-a"},
Spec: capsulev1beta2.TenantSpec{
Owners: api.OwnerListSpec{
{
CoreOwnerSpec: api.CoreOwnerSpec{
UserSpec: api.UserSpec{Name: "matt", Kind: "User"},
},
},
},
},
}
tntB := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{Name: "nsstatus-b"},
Spec: capsulev1beta2.TenantSpec{
Owners: api.OwnerListSpec{
{
CoreOwnerSpec: api.CoreOwnerSpec{
UserSpec: api.UserSpec{Name: "matt", Kind: "User"},
},
},
},
},
}
var (
nsA1 *corev1.Namespace
nsA2 *corev1.Namespace
nsB1 *corev1.Namespace
)
JustBeforeEach(func() {
// Create tenants
EventuallyCreation(func() error {
tntA.ResourceVersion = ""
return k8sClient.Create(ctx, tntA)
}).Should(Succeed())
EventuallyCreation(func() error {
tntB.ResourceVersion = ""
return k8sClient.Create(ctx, tntB)
}).Should(Succeed())
// Create namespaces for each tenant using your helper
nsA1 = NewNamespace("rule-status-ns1", map[string]string{
meta.TenantLabel: tntA.GetName(),
})
nsA2 = NewNamespace("rule-status-ns2", map[string]string{
meta.TenantLabel: tntA.GetName(),
})
nsB1 = NewNamespace("rule-status-ns3", map[string]string{
meta.TenantLabel: tntB.GetName(),
})
NamespaceCreation(nsA1, tntA.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
NamespaceCreation(nsA2, tntA.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
NamespaceCreation(nsB1, tntB.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
// Wait until tenants list their namespaces (optional but makes debugging easier)
TenantNamespaceList(tntA, defaultTimeoutInterval).Should(ContainElements(nsA1.GetName(), nsA2.GetName()))
TenantNamespaceList(tntB, defaultTimeoutInterval).Should(ContainElement(nsB1.GetName()))
})
JustAfterEach(func() {
// Best-effort cleanup namespaces first (your env may already handle this)
for _, n := range []*corev1.Namespace{nsA1, nsA2, nsB1} {
if n == nil {
continue
}
_ = k8sClient.Delete(ctx, n)
}
// Delete tenants
if tntA != nil {
_ = k8sClient.Delete(ctx, tntA)
}
if tntB != nil {
_ = k8sClient.Delete(ctx, tntB)
}
})
// --- Helpers ---
expectNamespaceStatusFor := func(ns *corev1.Namespace, tenantName string) {
By(fmt.Sprintf("verifying NamespaceStatus for namespace %q (tenant=%q)", ns.Name, tenantName))
Eventually(func(g Gomega) {
// Re-read namespace to get UID reliably (in case local object is stale)
curNS := &corev1.Namespace{}
g.Expect(k8sClient.Get(ctx, client.ObjectKey{Name: ns.Name}, curNS)).To(Succeed())
nsStatus := &capsulev1beta2.RuleStatus{}
g.Expect(k8sClient.Get(ctx, client.ObjectKey{Name: meta.NameForManagedRuleStatus(), Namespace: ns.Name}, nsStatus)).To(Succeed())
// 2) OwnerReference must point to the Namespace and be controller owner
g.Expect(nsStatus.OwnerReferences).NotTo(BeEmpty())
var found bool
for _, or := range nsStatus.OwnerReferences {
if or.APIVersion == "v1" &&
or.Kind == "Namespace" &&
or.Name == curNS.Name &&
or.UID == curNS.UID {
found = true
break
}
}
g.Expect(found).To(BeTrue(), "expected NamespaceStatus to have Namespace controller OwnerReference")
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
}
It("creates one NamespaceStatus per namespace, with correct Status.Tenant and Namespace controller OwnerReference", func() {
expectNamespaceStatusFor(nsA1, tntA.Name)
expectNamespaceStatusFor(nsA2, tntA.Name)
expectNamespaceStatusFor(nsB1, tntB.Name)
})
It("removes NamespaceStatus when the Namespace is deleted (ownerReference GC)", func() {
// Ensure it exists first
expectNamespaceStatusFor(nsA1, tntA.Name)
// Delete namespace
Expect(k8sClient.Delete(ctx, nsA1)).To(Succeed())
// Namespace deletion can take time; once it's gone, the status should be GC'd
Eventually(func() bool {
// confirm namespace gone or terminating; either way, check status disappears eventually
nsStatus := &capsulev1beta2.RuleStatus{}
err := k8sClient.Get(ctx, client.ObjectKey{Name: meta.NameForManagedRuleStatus(), Namespace: nsA1.Name}, nsStatus)
return apierrors.IsNotFound(err)
}, defaultTimeoutInterval, defaultPollInterval).Should(BeTrue())
})
})

525
e2e/rules_registry_test.go Normal file
View File

@@ -0,0 +1,525 @@
// Copyright 2020-2023 Project Capsule Authors.
// SPDX-License-Identifier: Apache-2.0
package e2e
import (
"context"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/api/meta"
)
var _ = Describe("enforcing a Container Registry", Label("tenant", "rules", "images", "registry"), func() {
originConfig := &capsulev1beta2.CapsuleConfiguration{}
tnt := &capsulev1beta2.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: "container-registry",
},
Spec: capsulev1beta2.TenantSpec{
Owners: api.OwnerListSpec{
{
CoreOwnerSpec: api.CoreOwnerSpec{
UserSpec: api.UserSpec{
Name: "matt",
Kind: "User",
},
},
},
},
Rules: []*capsulev1beta2.NamespaceRule{
{
NamespaceRuleBody: capsulev1beta2.NamespaceRuleBody{
Enforce: capsulev1beta2.NamespaceRuleEnforceBody{
Registries: []api.OCIRegistry{
// Global: allow any registry, but require PullPolicy Always (images+volumes)
{
Registry: ".*",
Validation: []api.RegistryValidationTarget{
api.ValidateImages,
api.ValidateVolumes,
},
Policy: []corev1.PullPolicy{corev1.PullAlways},
},
// More specific harbor rule (no policy override => should NOT remove Always restriction)
{
Registry: "harbor/.*",
Validation: []api.RegistryValidationTarget{
api.ValidateImages,
api.ValidateVolumes,
},
},
},
},
},
},
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"environment": "prod",
},
},
NamespaceRuleBody: capsulev1beta2.NamespaceRuleBody{
Enforce: capsulev1beta2.NamespaceRuleEnforceBody{
Registries: []api.OCIRegistry{
// Prod-only special-case
{
Registry: "harbor/production-image/.*",
Validation: []api.RegistryValidationTarget{
api.ValidateImages,
api.ValidateVolumes,
},
Policy: []corev1.PullPolicy{corev1.PullAlways},
},
},
},
},
},
},
},
}
// ---- Small local helpers (keep e2e readable) ----
expectNamespaceStatusRegistries := func(nsName string, want []string) {
Eventually(func(g Gomega) {
nsStatus := &capsulev1beta2.RuleStatus{}
g.Expect(k8sClient.Get(
context.Background(),
client.ObjectKey{Name: meta.NameForManagedRuleStatus(), Namespace: nsName},
nsStatus,
)).To(Succeed())
got := make([]string, 0, len(nsStatus.Status.Rule.Enforce.Registries))
for _, r := range nsStatus.Status.Rule.Enforce.Registries {
got = append(got, r.Registry)
}
g.Expect(got).To(Equal(want))
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
}
createPodAndExpectDenied := func(cs kubernetes.Interface, nsName string, pod *corev1.Pod, substrings ...string) {
base := pod.DeepCopy()
baseName := base.Name
if baseName == "" {
baseName = "pod"
}
Eventually(func() error {
// unique name per attempt to avoid AlreadyExists
p := base.DeepCopy()
p.Name = fmt.Sprintf("%s-%d", baseName, int(time.Now().UnixNano()%1e6))
_, err := cs.CoreV1().Pods(nsName).Create(context.Background(), p, metav1.CreateOptions{})
if err == nil {
_ = cs.CoreV1().Pods(nsName).Delete(context.Background(), p.Name, metav1.DeleteOptions{})
return fmt.Errorf("expected create to be denied, but it succeeded")
}
if apierrors.IsAlreadyExists(err) {
return fmt.Errorf("unexpected AlreadyExists: %v", err)
}
msg := err.Error()
for _, s := range substrings {
if !strings.Contains(msg, s) {
return fmt.Errorf("expected error to contain %q, got: %s", s, msg)
}
}
return nil
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
}
createPodAndExpectAllowed := func(cs kubernetes.Interface, nsName string, pod *corev1.Pod) {
EventuallyCreation(func() error {
_, err := cs.CoreV1().Pods(nsName).Create(context.Background(), pod, metav1.CreateOptions{})
return err
}).Should(Succeed())
}
JustBeforeEach(func() {
Expect(k8sClient.Get(context.Background(), client.ObjectKey{Name: defaultConfigurationName}, originConfig)).To(Succeed())
EventuallyCreation(func() error {
tnt.ResourceVersion = ""
return k8sClient.Create(context.TODO(), tnt)
}).Should(Succeed())
})
JustAfterEach(func() {
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
// Restore Configuration
Eventually(func() error {
c := &capsulev1beta2.CapsuleConfiguration{}
if err := k8sClient.Get(context.Background(), client.ObjectKey{Name: originConfig.Name}, c); err != nil {
return err
}
c.Spec = originConfig.Spec
return k8sClient.Update(context.Background(), c)
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
})
It("aggregates enforcement rules into NamespaceStatus for a non-prod namespace", func() {
ns := NewNamespace("")
cs := ownerClient(tnt.Spec.Owners[0].UserSpec)
NamespaceCreation(ns, tnt.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
// Non-prod: should include only the global rule body (two registries in order)
expectNamespaceStatusRegistries(ns.GetName(), []string{
".*",
"harbor/.*",
})
// Sanity: we can still create a trivial pod with explicit Always (since global allows all registries)
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "sanity"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{Name: "c", Image: "gcr.io/google_containers/pause-amd64:3.0", ImagePullPolicy: corev1.PullAlways},
},
},
}
createPodAndExpectAllowed(cs, ns.Name, pod)
})
It("aggregates enforcement rules into NamespaceStatus for a prod namespace", func() {
ns := NewNamespace("", map[string]string{
"environment": "prod",
})
cs := ownerClient(tnt.Spec.Owners[0].UserSpec)
NamespaceCreation(ns, tnt.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
// Prod: should include global + prod rule (3 registries in order)
expectNamespaceStatusRegistries(ns.GetName(), []string{
".*",
"harbor/.*",
"harbor/production-image/.*",
})
// Sanity allow with Always
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "prod-sanity"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{Name: "c", Image: "harbor/production-image/app:1", ImagePullPolicy: corev1.PullAlways},
},
},
}
createPodAndExpectAllowed(cs, ns.Name, pod)
})
It("denies a container image when pullPolicy is not explicitly set under restriction (dev)", func() {
ns := NewNamespace("")
cs := ownerClient(tnt.Spec.Owners[0].UserSpec)
NamespaceCreation(ns, tnt.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
// No ImagePullPolicy set => "" => should be denied because global rule restricts policy to Always
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "no-pullpolicy"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{Name: "c", Image: "gcr.io/google_containers/pause-amd64:3.0"},
},
},
}
createPodAndExpectDenied(cs, ns.Name, pod,
"uses pullPolicy=IfNotPresent",
"not allowed",
"allowed: Always",
)
})
It("denies a harbor image with pullPolicy IfNotPresent because global Always must still apply (dev)", func() {
ns := NewNamespace("")
cs := ownerClient(tnt.Spec.Owners[0].UserSpec)
NamespaceCreation(ns, tnt.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "harbor-wrong-policy"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "c",
Image: "harbor/some-team/app:1",
ImagePullPolicy: corev1.PullIfNotPresent,
},
},
},
}
createPodAndExpectDenied(cs, ns.Name, pod,
"pullPolicy=IfNotPresent",
"not allowed",
"allowed:",
)
})
It("allows a harbor image with pullPolicy Always (dev)", func() {
ns := NewNamespace("")
cs := ownerClient(tnt.Spec.Owners[0].UserSpec)
NamespaceCreation(ns, tnt.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "harbor-always"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "c",
Image: "harbor/some-team/app:1",
ImagePullPolicy: corev1.PullAlways,
},
},
},
}
createPodAndExpectAllowed(cs, ns.Name, pod)
})
It("denies initContainers when they violate policy (dev) and includes the correct location in the message", func() {
ns := NewNamespace("")
cs := ownerClient(tnt.Spec.Owners[0].UserSpec)
NamespaceCreation(ns, tnt.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "init-deny"},
Spec: corev1.PodSpec{
InitContainers: []corev1.Container{
{
Name: "init",
Image: "harbor/some-team/init:1",
ImagePullPolicy: corev1.PullIfNotPresent, // should be denied
},
},
Containers: []corev1.Container{
{
Name: "c",
Image: "harbor/some-team/app:1",
ImagePullPolicy: corev1.PullAlways,
},
},
},
}
createPodAndExpectDenied(cs, ns.Name, pod,
"initContainers[0]",
"pullPolicy=IfNotPresent",
"allowed:",
)
})
It("denies volume image pullPolicy if not allowed (dev)", func() {
ns := NewNamespace("")
cs := ownerClient(tnt.Spec.Owners[0].UserSpec)
NamespaceCreation(ns, tnt.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "volume-deny"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
// main container must exist
{Name: "c", Image: "harbor/some-team/app:1", ImagePullPolicy: corev1.PullAlways},
},
Volumes: []corev1.Volume{
{
Name: "imgvol",
VolumeSource: corev1.VolumeSource{
Image: &corev1.ImageVolumeSource{
Reference: "harbor/some-team/volimg:1",
PullPolicy: corev1.PullIfNotPresent, // should be denied
},
},
},
},
},
}
createPodAndExpectDenied(cs, ns.Name, pod,
"volumes[0](imgvol)",
"pullPolicy=IfNotPresent",
"allowed:",
)
})
It("allows prod-specific image only with Always, still enforcing global policy", func() {
ns := NewNamespace("", map[string]string{
"environment": "prod",
})
cs := ownerClient(tnt.Spec.Owners[0].UserSpec)
NamespaceCreation(ns, tnt.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
// Wrong policy => denied
bad := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "prod-bad"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{Name: "c", Image: "harbor/production-image/app:1", ImagePullPolicy: corev1.PullNever},
},
},
}
createPodAndExpectDenied(cs, ns.Name, bad,
"pullPolicy=Never",
"allowed:",
)
// Correct policy => allowed
good := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "prod-good"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{Name: "c", Image: "harbor/production-image/app:1", ImagePullPolicy: corev1.PullAlways},
},
},
}
createPodAndExpectAllowed(cs, ns.Name, good)
})
It("denies adding an ephemeral container with wrong pullPolicy on UPDATE", func() {
ns := NewNamespace("")
cs := ownerClient(tnt.Spec.Owners[0].UserSpec)
NamespaceCreation(ns, tnt.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
expectNamespaceStatusRegistries(ns.GetName(), []string{".*", "harbor/.*"})
cleanupRBAC := GrantEphemeralContainersUpdate(ns.Name, tnt.Spec.Owners[0].UserSpec.Name)
defer cleanupRBAC()
// Create an allowed pod
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "base"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{Name: "c", Image: "harbor/some-team/app:1", ImagePullPolicy: corev1.PullAlways},
},
},
}
createPodAndExpectAllowed(cs, ns.Name, pod)
// Now attempt to add an ephemeral container with IfNotPresent (should be denied)
ephem := corev1.EphemeralContainer{
EphemeralContainerCommon: corev1.EphemeralContainerCommon{
Name: "debug",
Image: "harbor/some-team/debug:1",
ImagePullPolicy: corev1.PullIfNotPresent,
},
}
Eventually(func() error {
// Must use the ephemeralcontainers subresource
cur, err := cs.CoreV1().Pods(ns.Name).Get(context.Background(), pod.Name, metav1.GetOptions{})
if err != nil {
return err
}
cur.Spec.EphemeralContainers = append(cur.Spec.EphemeralContainers, ephem)
_, err = cs.CoreV1().Pods(ns.Name).UpdateEphemeralContainers(
context.Background(),
cur.Name,
cur,
metav1.UpdateOptions{},
)
if err == nil {
return fmt.Errorf("expected UpdateEphemeralContainers to be denied, but it succeeded")
}
msg := err.Error()
// Your webhook reports "ephemeralContainers[0]" location
if !strings.Contains(msg, "ephemeralContainers") || !strings.Contains(msg, "pullPolicy=IfNotPresent") {
return fmt.Errorf("unexpected error: %v", err)
}
return nil
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
})
It("denies a pod when volume image reference changes to a disallowed pullPolicy (recreate)", func() {
ns := NewNamespace("")
cs := ownerClient(tnt.Spec.Owners[0].UserSpec)
NamespaceCreation(ns, tnt.Spec.Owners[0].UserSpec, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
expectNamespaceStatusRegistries(ns.GetName(), []string{".*", "harbor/.*"})
pod1 := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "vol-ok"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{Name: "c", Image: "harbor/some-team/app:1", ImagePullPolicy: corev1.PullAlways},
},
Volumes: []corev1.Volume{
{
Name: "imgvol",
VolumeSource: corev1.VolumeSource{
Image: &corev1.ImageVolumeSource{
Reference: "harbor/some-team/volimg:1",
PullPolicy: corev1.PullAlways,
},
},
},
},
},
}
createPodAndExpectAllowed(cs, ns.Name, pod1)
pod2 := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "vol-bad"},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{Name: "c", Image: "harbor/some-team/app:1", ImagePullPolicy: corev1.PullAlways},
},
Volumes: []corev1.Volume{
{
Name: "imgvol",
VolumeSource: corev1.VolumeSource{
Image: &corev1.ImageVolumeSource{
Reference: "harbor/some-team/volimg:2",
PullPolicy: corev1.PullIfNotPresent,
},
},
},
},
},
}
createPodAndExpectDenied(cs, ns.Name, pod2,
"volumes[0](imgvol)",
"pullPolicy=IfNotPresent",
"allowed:",
)
})
})

View File

@@ -281,7 +281,7 @@ var _ = Describe("Promoting ServiceAccounts to Owners", Label("config"), Label("
Eventually(func(g Gomega) []rbacv1.Subject {
crb := &rbacv1.ClusterRoleBinding{}
err := k8sClient.Get(context.TODO(), types.NamespacedName{Name: api.ProvisionerRoleName}, crb)
err := k8sClient.Get(context.TODO(), types.NamespacedName{Name: originConfig.Spec.RBAC.ProvisionerClusterRole}, crb)
g.Expect(err).NotTo(HaveOccurred())
return crb.Subjects
@@ -337,7 +337,7 @@ var _ = Describe("Promoting ServiceAccounts to Owners", Label("config"), Label("
Eventually(func(g Gomega) []rbacv1.Subject {
crb := &rbacv1.ClusterRoleBinding{}
err := k8sClient.Get(context.TODO(), types.NamespacedName{Name: api.ProvisionerRoleName}, crb)
err := k8sClient.Get(context.TODO(), types.NamespacedName{Name: originConfig.Spec.RBAC.ProvisionerClusterRole}, crb)
g.Expect(err).NotTo(HaveOccurred())
return crb.Subjects

View File

@@ -25,7 +25,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to

View File

@@ -73,11 +73,16 @@ func NewNamespace(name string, labels ...map[string]string) *corev1.Namespace {
}
namespaceLabels := make(map[string]string)
namespaceLabels["env"] = "e2e"
if len(labels) > 0 {
namespaceLabels = labels[0]
for _, lab := range labels {
for k, v := range lab {
namespaceLabels[k] = v
}
}
}
namespaceLabels["env"] = "e2e"
return &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
@@ -402,6 +407,74 @@ func GetKubernetesVersion() *versionUtil.Version {
return ver
}
func GrantEphemeralContainersUpdate(ns string, username string) (cleanup func()) {
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-ephemeralcontainers",
Namespace: ns,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"pods/ephemeralcontainers"},
Verbs: []string{"update", "patch"},
},
// Optional but often useful for the test flow:
{
APIGroups: []string{""},
Resources: []string{"pods"},
Verbs: []string{"get", "list", "watch"},
},
},
}
rb := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-ephemeralcontainers",
Namespace: ns,
},
Subjects: []rbacv1.Subject{
{
Kind: rbacv1.UserKind,
Name: username,
APIGroup: rbacv1.GroupName,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "Role",
Name: role.Name,
},
}
// Create-or-update (simple)
EventuallyCreation(func() error {
_ = k8sClient.Delete(context.Background(), rb)
_ = k8sClient.Delete(context.Background(), role)
if err := k8sClient.Create(context.Background(), role); err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
if err := k8sClient.Create(context.Background(), rb); err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
return nil
}).Should(Succeed())
// Give RBAC a moment to propagate in the apiserver authorizer cache
Eventually(func() error {
cs := ownerClient(api.UserSpec{Name: username, Kind: "User"})
_, err := cs.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{Limit: 1})
return err
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
return func() {
// Best-effort cleanup
_ = k8sClient.Delete(context.Background(), rb)
_ = k8sClient.Delete(context.Background(), role)
}
}
func DeepCompare(expected, actual interface{}) (bool, string) {
expVal := reflect.ValueOf(expected)
actVal := reflect.ValueOf(actual)

79
go.mod
View File

@@ -4,7 +4,7 @@ go 1.25.4
require (
github.com/go-logr/logr v1.4.3
github.com/onsi/ginkgo/v2 v2.27.4
github.com/onsi/ginkgo/v2 v2.27.5
github.com/onsi/gomega v1.39.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.23.2
@@ -20,12 +20,14 @@ require (
k8s.io/apiserver v0.35.0
k8s.io/client-go v0.35.0
k8s.io/utils v0.0.0-20260108192941-914a6e750570
sigs.k8s.io/cluster-api v1.12.1
sigs.k8s.io/controller-runtime v0.22.4
sigs.k8s.io/cluster-api v1.12.2
sigs.k8s.io/controller-runtime v0.23.0
sigs.k8s.io/gateway-api v1.4.1
)
require (
dario.cat/mergo v1.0.2 // indirect
github.com/BurntSushi/toml v1.6.0 // indirect
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -33,61 +35,80 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/fluxcd/cli-utils v0.37.1-flux.1 // indirect
github.com/fluxcd/pkg/apis/kustomize v1.15.0 // indirect
github.com/fluxcd/pkg/ssa v0.64.0 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.22.3 // indirect
github.com/go-openapi/jsonreference v0.21.3 // indirect
github.com/go-openapi/swag v0.25.3 // indirect
github.com/go-openapi/swag/cmdutils v0.25.3 // indirect
github.com/go-openapi/swag/conv v0.25.3 // indirect
github.com/go-openapi/swag/fileutils v0.25.3 // indirect
github.com/go-openapi/swag/jsonname v0.25.3 // indirect
github.com/go-openapi/swag/jsonutils v0.25.3 // indirect
github.com/go-openapi/swag/loading v0.25.3 // indirect
github.com/go-openapi/swag/mangling v0.25.3 // indirect
github.com/go-openapi/swag/netutils v0.25.3 // indirect
github.com/go-openapi/swag/stringutils v0.25.3 // indirect
github.com/go-openapi/swag/typeutils v0.25.3 // indirect
github.com/go-openapi/swag/yamlutils v0.25.3 // indirect
github.com/go-openapi/jsonpointer v0.22.4 // indirect
github.com/go-openapi/jsonreference v0.21.4 // indirect
github.com/go-openapi/swag v0.25.4 // indirect
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
github.com/go-openapi/swag/conv v0.25.4 // indirect
github.com/go-openapi/swag/fileutils v0.25.4 // indirect
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
github.com/go-openapi/swag/jsonutils v0.25.4 // indirect
github.com/go-openapi/swag/loading v0.25.4 // indirect
github.com/go-openapi/swag/mangling v0.25.4 // indirect
github.com/go-openapi/swag/netutils v0.25.4 // indirect
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
github.com/go-sprout/sprout v1.0.3 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gobuffalo/flect v1.0.3 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/gnostic-models v0.7.1 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.2 // indirect
github.com/prometheus/common v0.67.5 // indirect
github.com/prometheus/procfs v0.19.2 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/tidwall/gjson v1.18.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tidwall/sjson v1.2.5 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/wI2L/jsondiff v0.6.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.47.0 // indirect
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.33.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/mod v0.31.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/term v0.39.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/tools v0.40.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/cli-runtime v0.35.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/kustomize/api v0.21.0 // indirect
sigs.k8s.io/kustomize/kyaml v0.21.0 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)

93
go.sum
View File

@@ -2,6 +2,10 @@ cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
@@ -39,6 +43,12 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fluxcd/cli-utils v0.37.1-flux.1 h1:WnG2mHxCPZMj/soIq/S/1zvbrGCJN3GJGbNfG06X55M=
github.com/fluxcd/cli-utils v0.37.1-flux.1/go.mod h1:aND5wX3LuTFtB7eUT7vsWr8mmxRVSPR2Wkvbn0SqPfw=
github.com/fluxcd/pkg/apis/kustomize v1.15.0 h1:p8wPIxdmn0vy0a664rsE9JKCfnliZz4HUsDcTy4ZOxA=
github.com/fluxcd/pkg/apis/kustomize v1.15.0/go.mod h1:XWdsx8P15OiMaQIvmUjYWdmD3zAwhl5q9osl5iCqcOk=
github.com/fluxcd/pkg/ssa v0.64.0 h1:B/8VYMIYMeRmolup2HOoWNqXh4UeXi6w2LvXXvl6MZM=
github.com/fluxcd/pkg/ssa v0.64.0/go.mod h1:RjvVjJIoRo1ecsv91yMuiqzO6cpNag80M6MOB/vrJdc=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
@@ -49,6 +59,8 @@ github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZ
github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
@@ -57,38 +69,69 @@ github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.22.3 h1:dKMwfV4fmt6Ah90zloTbUKWMD+0he+12XYAsPotrkn8=
github.com/go-openapi/jsonpointer v0.22.3/go.mod h1:0lBbqeRsQ5lIanv3LHZBrmRGHLHcQoOXQnf88fHlGWo=
github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc=
github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4=
github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=
github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4=
github.com/go-openapi/swag v0.25.3 h1:FAa5wJXyDtI7yUztKDfZxDrSx+8WTg31MfCQ9s3PV+s=
github.com/go-openapi/swag v0.25.3/go.mod h1:tX9vI8Mj8Ny+uCEk39I1QADvIPI7lkndX4qCsEqhkS8=
github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ=
github.com/go-openapi/swag/cmdutils v0.25.3 h1:EIwGxN143JCThNHnqfqs85R8lJcJG06qjJRZp3VvjLI=
github.com/go-openapi/swag/cmdutils v0.25.3/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4=
github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
github.com/go-openapi/swag/conv v0.25.3 h1:PcB18wwfba7MN5BVlBIV+VxvUUeC2kEuCEyJ2/t2X7E=
github.com/go-openapi/swag/conv v0.25.3/go.mod h1:n4Ibfwhn8NJnPXNRhBO5Cqb9ez7alBR40JS4rbASUPU=
github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4=
github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU=
github.com/go-openapi/swag/fileutils v0.25.3 h1:P52Uhd7GShkeU/a1cBOuqIcHMHBrA54Z2t5fLlE85SQ=
github.com/go-openapi/swag/fileutils v0.25.3/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk=
github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y=
github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk=
github.com/go-openapi/swag/jsonname v0.25.3 h1:U20VKDS74HiPaLV7UZkztpyVOw3JNVsit+w+gTXRj0A=
github.com/go-openapi/swag/jsonname v0.25.3/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
github.com/go-openapi/swag/jsonutils v0.25.3 h1:kV7wer79KXUM4Ea4tBdAVTU842Rg6tWstX3QbM4fGdw=
github.com/go-openapi/swag/jsonutils v0.25.3/go.mod h1:ILcKqe4HC1VEZmJx51cVuZQ6MF8QvdfXsQfiaCs0z9o=
github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA=
github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY=
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.3 h1:/i3E9hBujtXfHy91rjtwJ7Fgv5TuDHgnSrYjhFxwxOw=
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.3/go.mod h1:8kYfCR2rHyOj25HVvxL5Nm8wkfzggddgjZm6RgjT8Ao=
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo=
github.com/go-openapi/swag/loading v0.25.3 h1:Nn65Zlzf4854MY6Ft0JdNrtnHh2bdcS/tXckpSnOb2Y=
github.com/go-openapi/swag/loading v0.25.3/go.mod h1:xajJ5P4Ang+cwM5gKFrHBgkEDWfLcsAKepIuzTmOb/c=
github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s=
github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE=
github.com/go-openapi/swag/mangling v0.25.3 h1:rGIrEzXaYWuUW1MkFmG3pcH+EIA0/CoUkQnIyB6TUyo=
github.com/go-openapi/swag/mangling v0.25.3/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg=
github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48=
github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg=
github.com/go-openapi/swag/netutils v0.25.3 h1:XWXHZfL/65ABiv8rvGp9dtE0C6QHTYkCrNV77jTl358=
github.com/go-openapi/swag/netutils v0.25.3/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg=
github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0=
github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg=
github.com/go-openapi/swag/stringutils v0.25.3 h1:nAmWq1fUTWl/XiaEPwALjp/8BPZJun70iDHRNq/sH6w=
github.com/go-openapi/swag/stringutils v0.25.3/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0=
github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8=
github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0=
github.com/go-openapi/swag/typeutils v0.25.3 h1:2w4mEEo7DQt3V4veWMZw0yTPQibiL3ri2fdDV4t2TQc=
github.com/go-openapi/swag/typeutils v0.25.3/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE=
github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw=
github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE=
github.com/go-openapi/swag/yamlutils v0.25.3 h1:LKTJjCn/W1ZfMec0XDL4Vxh8kyAnv1orH5F2OREDUrg=
github.com/go-openapi/swag/yamlutils v0.25.3/go.mod h1:Y7QN6Wc5DOBXK14/xeo1cQlq0EA0wvLoSv13gDQoCao=
github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw=
github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc=
github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4=
github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg=
github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
github.com/go-sprout/sprout v1.0.3 h1:LLuz0D3aYazgbVTOwCVuMor3LOUVYinipXRIdjA/D+I=
github.com/go-sprout/sprout v1.0.3/go.mod h1:cFFzpnyGGry3cmN0UNCAM1f7AGok6vPVabeYQzBMBZY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
@@ -101,6 +144,8 @@ github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c=
github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -142,12 +187,14 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8=
github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/ginkgo/v2 v2.27.4 h1:fcEcQW/A++6aZAZQNUmNjvA9PSOzefMJBerHJ4t8v8Y=
github.com/onsi/ginkgo/v2 v2.27.4/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/ginkgo/v2 v2.27.5 h1:ZeVgZMx2PDMdJm/+w5fE/OyG6ILo1Y3e+QX4zSR0zTE=
github.com/onsi/ginkgo/v2 v2.27.5/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM=
github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
github.com/onsi/gomega v1.39.0 h1:y2ROC3hKFmQZJNFeGAMeHZKkjBL65mIZcvrLQBF9k6Q=
@@ -167,6 +214,8 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.67.2 h1:PcBAckGFTIHt2+L3I33uNRTlKTplNzFctXcWhPyAEN8=
github.com/prometheus/common v0.67.2/go.mod h1:63W3KZb1JOKgcjlIr64WW/LvFGAqKPj0atm+knVGEko=
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
@@ -186,16 +235,20 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
@@ -204,8 +257,12 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/wI2L/jsondiff v0.6.1 h1:ISZb9oNWbP64LHnu4AUhsMF5W0FIj5Ok3Krip9Shqpw=
github.com/wI2L/jsondiff v0.6.1/go.mod h1:KAEIojdQq66oJiHhDyQez2x+sRit0vIzC9KeK0yizxM=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
@@ -238,26 +295,42 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0=
gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
@@ -268,6 +341,8 @@ google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -286,6 +361,8 @@ k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4=
k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds=
k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE=
k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY=
k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
k8s.io/cluster-bootstrap v0.34.2 h1:oKckPeunVCns37BntcsxaOesDul32yzGd3DFLjW2fc8=
@@ -296,6 +373,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ=
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 h1:OfgiEo21hGiwx1oJUU5MpEaeOEg6coWndBkZF/lkFuE=
k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
k8s.io/utils v0.0.0-20260108192941-914a6e750570 h1:JT4W8lsdrGENg9W+YwwdLJxklIuKWdRm+BC+xt33FOY=
@@ -304,15 +383,25 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUo
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/cluster-api v1.12.1 h1:s3DivSZjXdu2HPyOtV/n6XwSZBaIycZdKNs4y8X+3lY=
sigs.k8s.io/cluster-api v1.12.1/go.mod h1:+S6WJdi8UPdqv5q9nka5al3ed/Qa0zAcSBgzTaa9VKA=
sigs.k8s.io/cluster-api v1.12.2 h1:+b+M2IygfvFZJq7bsaloNakimMEVNf81zkGR1IiuxXs=
sigs.k8s.io/cluster-api v1.12.2/go.mod h1:2XuF/dmN3c/1VITb6DB44N5+Ecvsvd5KOWqrY9Q53nU=
sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
sigs.k8s.io/controller-runtime v0.23.0 h1:Ubi7klJWiwEWqDY+odSVZiFA0aDSevOCXpa38yCSYu8=
sigs.k8s.io/controller-runtime v0.23.0/go.mod h1:DBOIr9NsprUqCZ1ZhsuJ0wAnQSIxY/C6VjZbmLgw0j0=
sigs.k8s.io/gateway-api v1.4.1 h1:NPxFutNkKNa8UfLd2CMlEuhIPMQgDQ6DXNKG9sHbJU8=
sigs.k8s.io/gateway-api v1.4.1/go.mod h1:AR5RSqciWP98OPckEjOjh2XJhAe2Na4LHyXD2FUY7Qk=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.21.0 h1:I7nry5p8iDJbuRdYS7ez8MUvw7XVNPcIP5GkzzuXIIQ=
sigs.k8s.io/kustomize/api v0.21.0/go.mod h1:XGVQuR5n2pXKWbzXHweZU683pALGw/AMVO4zU4iS8SE=
sigs.k8s.io/kustomize/kyaml v0.21.0 h1:7mQAf3dUwf0wBerWJd8rXhVcnkk5Tvn/q91cGkaP6HQ=
sigs.k8s.io/kustomize/kyaml v0.21.0/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E=
sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=

View File

@@ -4,21 +4,38 @@ kind: Tenant
metadata:
name: solar
spec:
owners:
- name: alice
kind: User
permissions:
matchOwners:
- matchLabels:
team: platform
- matchLabels:
tenant: solar
owners:
- name: alice
kind: User
additionalRoleBindings:
- clusterRoleName: 'view'
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: joe
rules:
- enforce:
registries:
- url: "harbor/.*"
policy:
- "Never"
- namespaceSelector:
matchExpressions:
- key: env
operator: In
values:
- "prod"
enforce:
registries:
- url: "harbor/v2/customer-registry/prod-image/.*"
policy:
- "Always"
---
apiVersion: capsule.clastix.io/v1beta2
kind: Tenant

8
hack/kind-cluster.yaml Normal file
View File

@@ -0,0 +1,8 @@
---
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: capsule
featureGates:
ImageVolume: true
nodes:
- role: control-plane

View File

@@ -1,13 +0,0 @@
# With Kind configuration is used to
# share a folder between the outside sistem
# and the internal container (capsule-controller-manager),
# In this way we will be able to get the metadata
# generated by harpoon at the end of the e2e tests execution.
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: capsule-tracing
nodes:
- role: control-plane
extraMounts:
- hostPath: /tmp/results
containerPath: /tmp/results

26
internal/cache/invalidation.go vendored Normal file
View File

@@ -0,0 +1,26 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package cache
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func ShouldInvalidate(last *metav1.Time, now time.Time, interval time.Duration) bool {
if interval <= 0 {
return false
}
if last == nil || last.IsZero() {
return true
}
if last.After(now) {
return false
}
return now.Sub(last.Time) >= interval
}

232
internal/cache/registries.go vendored Normal file
View File

@@ -0,0 +1,232 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package cache
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"regexp"
"sort"
"strings"
"sync"
corev1 "k8s.io/api/core/v1"
"github.com/projectcapsule/capsule/pkg/api"
)
type RuleSet struct {
ID string
Compiled []CompiledRule
HasImages bool
HasVolumes bool
}
type CompiledRule struct {
Registry string
RE *regexp.Regexp
AllowedPolicy map[corev1.PullPolicy]struct{} // nil/empty => allow any
ValidateImages bool
ValidateVolumes bool
}
type RegistryRuleSetCache struct {
mu sync.RWMutex
rs map[string]*RuleSet
}
func NewRegistryRuleSetCache() *RegistryRuleSetCache {
return &RegistryRuleSetCache{
rs: make(map[string]*RuleSet),
}
}
func (c *RegistryRuleSetCache) GetOrBuild(specRules []api.OCIRegistry) (rs *RuleSet, fromCache bool, err error) {
if len(specRules) == 0 {
return nil, false, nil
}
id := c.HashRules(specRules)
c.mu.RLock()
rs = c.rs[id]
c.mu.RUnlock()
if rs != nil {
return rs, true, nil
}
// Build outside locks (regex compile etc.)
built, err := buildRuleSet(id, specRules)
if err != nil {
return nil, false, err
}
// Insert with double-check
c.mu.Lock()
defer c.mu.Unlock()
if c.rs == nil {
c.rs = make(map[string]*RuleSet)
}
// Another goroutine may have inserted meanwhile
if rs = c.rs[id]; rs != nil {
return rs, true, nil
}
c.rs[id] = built
return built, false, nil
}
func (c *RegistryRuleSetCache) Stats() int {
c.mu.RLock()
defer c.mu.RUnlock()
return len(c.rs)
}
// activeIDs: set of ids currently referenced by RuleStatus in cluster.
func (c *RegistryRuleSetCache) PruneActive(activeIDs map[string]struct{}) int {
c.mu.Lock()
defer c.mu.Unlock()
removed := 0
for id := range c.rs {
if _, ok := activeIDs[id]; ok {
continue
}
delete(c.rs, id)
removed++
}
return removed
}
func (c *RegistryRuleSetCache) HashRules(specRules []api.OCIRegistry) string {
var b strings.Builder
b.Grow(len(specRules) * 64)
const (
sepRule = "\n"
sepField = "\x1f"
sepList = "\x1e"
)
for _, r := range specRules {
url := strings.TrimSpace(r.Registry)
policies := make([]string, 0, len(r.Policy))
for _, p := range r.Policy {
policies = append(policies, strings.TrimSpace(string(p)))
}
sort.Strings(policies)
validations := make([]string, 0, len(r.Validation))
for _, v := range r.Validation {
validations = append(validations, strings.TrimSpace(string(v)))
}
sort.Strings(validations)
b.WriteString(url)
b.WriteString(sepField)
for i, p := range policies {
if i > 0 {
b.WriteString(sepList)
}
b.WriteString(p)
}
b.WriteString(sepField)
for i, v := range validations {
if i > 0 {
b.WriteString(sepList)
}
b.WriteString(v)
}
b.WriteString(sepRule)
}
sum := sha256.Sum256([]byte(b.String()))
return hex.EncodeToString(sum[:])
}
// Has is useful in tests and debugging.
func (c *RegistryRuleSetCache) Has(id string) bool {
c.mu.RLock()
defer c.mu.RUnlock()
_, ok := c.rs[id]
return ok
}
// InsertForTest can be behind a build tag if you prefer, but it's fine to keep simple.
//
//nolint:unused
func (c *RegistryRuleSetCache) insertForTest(id string) {
c.mu.Lock()
defer c.mu.Unlock()
if c.rs == nil {
c.rs = make(map[string]*RuleSet)
}
c.rs[id] = &RuleSet{ID: id}
}
func buildRuleSet(id string, specRules []api.OCIRegistry) (*RuleSet, error) {
rs := &RuleSet{
ID: id,
Compiled: make([]CompiledRule, 0, len(specRules)),
}
for _, r := range specRules {
re, err := regexp.Compile(r.Registry)
if err != nil {
return nil, fmt.Errorf("invalid registry regex %q: %w", r.Registry, err)
}
cr := CompiledRule{
Registry: r.Registry,
RE: re,
}
if len(r.Policy) > 0 {
cr.AllowedPolicy = make(map[corev1.PullPolicy]struct{}, len(r.Policy))
for _, p := range r.Policy {
cr.AllowedPolicy[p] = struct{}{}
}
}
for _, v := range r.Validation {
switch v {
case api.ValidateImages:
cr.ValidateImages = true
rs.HasImages = true
case api.ValidateVolumes:
cr.ValidateVolumes = true
rs.HasVolumes = true
}
}
rs.Compiled = append(rs.Compiled, cr)
}
return rs, nil
}

524
internal/cache/registries_test.go vendored Normal file
View File

@@ -0,0 +1,524 @@
package cache
import (
"sync"
"testing"
corev1 "k8s.io/api/core/v1"
"github.com/projectcapsule/capsule/pkg/api"
)
func set(ids ...string) map[string]struct{} {
m := make(map[string]struct{}, len(ids))
for _, id := range ids {
m[id] = struct{}{}
}
return m
}
func TestRegistryRuleSetCache_GetOrBuild_ReturnsFromCacheFlag(t *testing.T) {
c := NewRegistryRuleSetCache()
rules := []api.OCIRegistry{
{
Registry: "harbor/.*",
Validation: []api.RegistryValidationTarget{api.ValidateImages, api.ValidateVolumes},
Policy: []corev1.PullPolicy{corev1.PullNever},
},
}
rs1, fromCache1, err := c.GetOrBuild(rules)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if rs1 == nil {
t.Fatalf("expected ruleset, got nil")
}
if fromCache1 {
t.Fatalf("expected fromCache=false on first build, got true")
}
rs2, fromCache2, err := c.GetOrBuild(rules)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if rs2 == nil {
t.Fatalf("expected ruleset, got nil")
}
if !fromCache2 {
t.Fatalf("expected fromCache=true on second call, got false")
}
if rs1 != rs2 {
t.Fatalf("expected same cached pointer, got rs1=%p rs2=%p", rs1, rs2)
}
}
func TestRuleSetCache_GetOrBuild_EmptyReturnsNil(t *testing.T) {
c := NewRegistryRuleSetCache()
rs, _, err := c.GetOrBuild(nil)
if err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if rs != nil {
t.Fatalf("expected nil ruleset, got %#v", rs)
}
rs, _, err = c.GetOrBuild([]api.OCIRegistry{})
if err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if rs != nil {
t.Fatalf("expected nil ruleset, got %#v", rs)
}
if got := c.Stats(); got != 0 {
t.Fatalf("expected Stats()=0, got %d", got)
}
}
func TestRuleSetCache_GetOrBuild_InvalidRegexReturnsError(t *testing.T) {
c := NewRegistryRuleSetCache()
// invalid regex
rules := []api.OCIRegistry{
{
Registry: "([",
Validation: []api.RegistryValidationTarget{api.ValidateImages},
Policy: []corev1.PullPolicy{corev1.PullAlways},
},
}
rs, _, err := c.GetOrBuild(rules)
if err == nil {
t.Fatalf("expected error, got nil")
}
if rs != nil {
t.Fatalf("expected nil ruleset on error, got %#v", rs)
}
if got := c.Stats(); got != 0 {
t.Fatalf("expected Stats()=0 after failing build, got %d", got)
}
}
func TestRuleSetCache_GetOrBuild_DeduplicatesByContent(t *testing.T) {
c := NewRegistryRuleSetCache()
rulesA := []api.OCIRegistry{
{
Registry: "harbor/.*",
Validation: []api.RegistryValidationTarget{api.ValidateImages, api.ValidateVolumes},
Policy: []corev1.PullPolicy{corev1.PullNever},
},
}
// same content but different backing slice
rulesB := []api.OCIRegistry{
{
Registry: "harbor/.*",
Validation: []api.RegistryValidationTarget{api.ValidateImages, api.ValidateVolumes},
Policy: []corev1.PullPolicy{corev1.PullNever},
},
}
rs1, _, err := c.GetOrBuild(rulesA)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
rs2, _, err := c.GetOrBuild(rulesB)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
// the whole point: should be the exact same pointer
if rs1 != rs2 {
t.Fatalf("expected same cached pointer, got rs1=%p rs2=%p", rs1, rs2)
}
if got := c.Stats(); got != 1 {
t.Fatalf("expected Stats()=1, got %d", got)
}
// sanity: compiled fields are correct (no DeepEqual; check specific invariants)
if rs1.ID == "" {
t.Fatalf("expected non-empty ruleset ID")
}
if len(rs1.Compiled) != 1 {
t.Fatalf("expected 1 compiled rule, got %d", len(rs1.Compiled))
}
cr := rs1.Compiled[0]
if cr.RE == nil {
t.Fatalf("expected compiled regexp, got nil")
}
if cr.Registry != "harbor/.*" {
t.Fatalf("expected Registry to match input, got %q", cr.Registry)
}
if !cr.ValidateImages || !cr.ValidateVolumes {
t.Fatalf("expected ValidateImages and ValidateVolumes true, got images=%v volumes=%v", cr.ValidateImages, cr.ValidateVolumes)
}
if rs1.HasImages != true || rs1.HasVolumes != true {
t.Fatalf("expected ruleset flags HasImages/HasVolumes true, got images=%v volumes=%v", rs1.HasImages, rs1.HasVolumes)
}
if cr.AllowedPolicy == nil {
t.Fatalf("expected AllowedPolicy map non-nil")
}
if _, ok := cr.AllowedPolicy[corev1.PullNever]; !ok {
t.Fatalf("expected AllowedPolicy to contain PullNever")
}
}
func TestRuleSetCache_GetOrBuild_OrderMatters_LaterWins(t *testing.T) {
c := NewRegistryRuleSetCache()
// Two rules with same items but swapped order
// hashRules preserves rule order, so the IDs must differ.
rules1 := []api.OCIRegistry{
{Registry: ".*", Validation: []api.RegistryValidationTarget{api.ValidateImages}, Policy: []corev1.PullPolicy{corev1.PullAlways}},
{Registry: "harbor/.*", Validation: []api.RegistryValidationTarget{api.ValidateImages}},
}
rules2 := []api.OCIRegistry{
{Registry: "harbor/.*", Validation: []api.RegistryValidationTarget{api.ValidateImages}},
{Registry: ".*", Validation: []api.RegistryValidationTarget{api.ValidateImages}, Policy: []corev1.PullPolicy{corev1.PullAlways}},
}
rs1, _, err := c.GetOrBuild(rules1)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
rs2, _, err := c.GetOrBuild(rules2)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if rs1 == rs2 {
t.Fatalf("expected different cached entries due to different rule order, got same pointer %p", rs1)
}
if rs1.ID == rs2.ID {
t.Fatalf("expected different IDs for different order, got same %q", rs1.ID)
}
if got := c.Stats(); got != 2 {
t.Fatalf("expected Stats()=2, got %d", got)
}
// Verify compiled slice preserves the rule order we provided
if len(rs1.Compiled) != 2 {
t.Fatalf("expected 2 compiled rules, got %d", len(rs1.Compiled))
}
if rs1.Compiled[0].Registry != ".*" || rs1.Compiled[1].Registry != "harbor/.*" {
t.Fatalf("expected compiled order to match input for rules1, got %q then %q",
rs1.Compiled[0].Registry, rs1.Compiled[1].Registry)
}
}
func TestRuleSetCache_GetOrBuild_ConcurrentReturnsSamePointer(t *testing.T) {
c := NewRegistryRuleSetCache()
rules := []api.OCIRegistry{
{
Registry: "harbor/.*",
Validation: []api.RegistryValidationTarget{api.ValidateImages, api.ValidateVolumes},
Policy: []corev1.PullPolicy{corev1.PullAlways, corev1.PullIfNotPresent},
},
}
const workers = 32
var wg sync.WaitGroup
wg.Add(workers)
results := make([]*RuleSet, workers)
errs := make([]error, workers)
for i := 0; i < workers; i++ {
go func(i int) {
defer wg.Done()
rs, _, err := c.GetOrBuild(rules)
results[i] = rs
errs[i] = err
}(i)
}
wg.Wait()
for i := 0; i < workers; i++ {
if errs[i] != nil {
t.Fatalf("worker %d got err: %v", i, errs[i])
}
if results[i] == nil {
t.Fatalf("worker %d got nil ruleset", i)
}
}
// all pointers must match the first
first := results[0]
for i := 1; i < workers; i++ {
if results[i] != first {
t.Fatalf("expected same cached pointer across goroutines; got %p vs %p", first, results[i])
}
}
}
func TestRegistryRuleSetCache_GetOrBuild_ConcurrentPointersAndFlags(t *testing.T) {
c := NewRegistryRuleSetCache()
rules := []api.OCIRegistry{
{Registry: "harbor/.*", Validation: []api.RegistryValidationTarget{api.ValidateImages}},
}
const workers = 32
var wg sync.WaitGroup
wg.Add(workers)
results := make([]*RuleSet, workers)
flags := make([]bool, workers)
errs := make([]error, workers)
for i := 0; i < workers; i++ {
go func(i int) {
defer wg.Done()
rs, fromCache, err := c.GetOrBuild(rules)
results[i] = rs
flags[i] = fromCache
errs[i] = err
}(i)
}
wg.Wait()
for i := 0; i < workers; i++ {
if errs[i] != nil {
t.Fatalf("worker %d err: %v", i, errs[i])
}
if results[i] == nil {
t.Fatalf("worker %d got nil ruleset", i)
}
}
first := results[0]
for i := 1; i < workers; i++ {
if results[i] != first {
t.Fatalf("expected same cached pointer across goroutines; got %p vs %p", first, results[i])
}
}
seenFalse := false
seenTrue := false
for i := 0; i < workers; i++ {
if flags[i] {
seenTrue = true
} else {
seenFalse = true
}
}
if !seenFalse {
t.Fatalf("expected at least one fromCache=false (builder), got none")
}
if !seenTrue {
t.Fatalf("expected at least one fromCache=true (builder), got none")
}
}
func TestRegistryRuleSetCache_InsertForTest_ThenHasAndLen(t *testing.T) {
c := NewRegistryRuleSetCache()
if got := c.Stats(); got != 0 {
t.Fatalf("expected Len()=0, got %d", got)
}
if c.Has("x") {
t.Fatalf("expected Has(x)=false on empty cache")
}
c.insertForTest("x")
if !c.Has("x") {
t.Fatalf("expected Has(x)=true after insert")
}
if got := c.Stats(); got != 1 {
t.Fatalf("expected Len()=1 after insert, got %d", got)
}
}
func TestRegistryRuleSetCache_InsertForTest_DuplicateDoesNotIncreaseLen(t *testing.T) {
c := NewRegistryRuleSetCache()
c.insertForTest("x")
if got := c.Stats(); got != 1 {
t.Fatalf("expected Len()=1 after first insert, got %d", got)
}
c.insertForTest("x")
if got := c.Stats(); got != 1 {
t.Fatalf("expected Len() to remain 1 after duplicate insert, got %d", got)
}
if !c.Has("x") {
t.Fatalf("expected Has(x)=true after duplicate insert")
}
}
func TestRegistryRuleSetCache_HasFalseForMissingKey(t *testing.T) {
c := NewRegistryRuleSetCache()
c.insertForTest("a")
if c.Has("b") {
t.Fatalf("expected Has(b)=false when only a exists")
}
}
func TestRegistryRuleSetCache_PruneActive_RemovesOnlyInactive(t *testing.T) {
c := NewRegistryRuleSetCache()
c.insertForTest("a")
c.insertForTest("b")
c.insertForTest("c")
removed := c.PruneActive(set("b"))
if removed != 2 {
t.Fatalf("expected removed=2, got %d", removed)
}
if got := c.Stats(); got != 1 {
t.Fatalf("expected Len()=1 after prune, got %d", got)
}
if !c.Has("b") {
t.Fatalf("expected b to remain")
}
if c.Has("a") || c.Has("c") {
t.Fatalf("expected a and c to be removed")
}
}
func TestRegistryRuleSetCache_PruneActive_AllActiveNoChange(t *testing.T) {
c := NewRegistryRuleSetCache()
c.insertForTest("a")
c.insertForTest("b")
removed := c.PruneActive(set("a", "b"))
if removed != 0 {
t.Fatalf("expected removed=0, got %d", removed)
}
if got := c.Stats(); got != 2 {
t.Fatalf("expected Len()=2, got %d", got)
}
if !c.Has("a") || !c.Has("b") {
t.Fatalf("expected both a and b to remain")
}
}
func TestRegistryRuleSetCache_PruneActive_EmptyActivePrunesAll(t *testing.T) {
c := NewRegistryRuleSetCache()
c.insertForTest("a")
c.insertForTest("b")
removed := c.PruneActive(set())
if removed != 2 {
t.Fatalf("expected removed=2, got %d", removed)
}
if got := c.Stats(); got != 0 {
t.Fatalf("expected Len()=0 after prune all, got %d", got)
}
if c.Has("a") || c.Has("b") {
t.Fatalf("expected cache to be empty after prune all")
}
}
func TestRegistryRuleSetCache_PruneActive_NilActivePrunesAll(t *testing.T) {
c := NewRegistryRuleSetCache()
c.insertForTest("a")
removed := c.PruneActive(nil)
if removed != 1 {
t.Fatalf("expected removed=1, got %d", removed)
}
if got := c.Stats(); got != 0 {
t.Fatalf("expected Len()=0 after prune, got %d", got)
}
if c.Has("a") {
t.Fatalf("expected a to be removed")
}
}
func TestRegistryRuleSetCache_PruneActive_EmptyCacheNoop(t *testing.T) {
c := NewRegistryRuleSetCache()
removed := c.PruneActive(set("a"))
if removed != 0 {
t.Fatalf("expected removed=0 on empty cache, got %d", removed)
}
if got := c.Stats(); got != 0 {
t.Fatalf("expected Len()=0, got %d", got)
}
}
func TestRegistryRuleSetCache_PruneActive_Idempotent(t *testing.T) {
c := NewRegistryRuleSetCache()
c.insertForTest("a")
c.insertForTest("b")
c.insertForTest("c")
active := set("a")
removed1 := c.PruneActive(active)
if removed1 != 2 {
t.Fatalf("expected first prune removed=2, got %d", removed1)
}
if got := c.Stats(); got != 1 {
t.Fatalf("expected Len()=1 after first prune, got %d", got)
}
if !c.Has("a") {
t.Fatalf("expected a to remain after first prune")
}
removed2 := c.PruneActive(active)
if removed2 != 0 {
t.Fatalf("expected second prune removed=0, got %d", removed2)
}
if got := c.Stats(); got != 1 {
t.Fatalf("expected Len()=1 after second prune, got %d", got)
}
}
func TestRegistryRuleSetCache_PruneActive_RemovesCorrectCountWithLargerSet(t *testing.T) {
c := NewRegistryRuleSetCache()
// Insert 10 IDs: id0..id9
for i := 0; i < 10; i++ {
c.insertForTest("id" + itoa(i))
}
// Keep 3: id0,id4,id9
removed := c.PruneActive(set("id0", "id4", "id9"))
if removed != 7 {
t.Fatalf("expected removed=7, got %d", removed)
}
if got := c.Stats(); got != 3 {
t.Fatalf("expected Len()=3, got %d", got)
}
if !c.Has("id0") || !c.Has("id4") || !c.Has("id9") {
t.Fatalf("expected id0,id4,id9 to remain")
}
}
// tiny int->string without fmt (faster, no allocations beyond result)
func itoa(i int) string {
// Enough for small test numbers
if i == 0 {
return "0"
}
var buf [20]byte
n := len(buf)
for i > 0 {
n--
buf[n] = byte('0' + (i % 10))
i /= 10
}
return string(buf[n:])
}

View File

@@ -0,0 +1,41 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package admission
import (
"fmt"
"github.com/go-logr/logr"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/projectcapsule/capsule/internal/controllers/utils"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
)
func Add(
log logr.Logger,
mgr manager.Manager,
recorder events.EventRecorder,
cfg utils.ControllerOptions,
capsuleConfig configuration.Configuration,
) (err error) {
if err = (&validatingReconciler{
client: mgr.GetClient(),
log: log.WithName("admission"),
configuration: capsuleConfig,
}).SetupWithManager(mgr, cfg); err != nil {
return fmt.Errorf("unable to create validating admission controller: %w", err)
}
if err = (&mutatingReconciler{
client: mgr.GetClient(),
log: log.WithName("admission"),
configuration: capsuleConfig,
}).SetupWithManager(mgr, cfg); err != nil {
return fmt.Errorf("unable to create mutating admission controller: %w", err)
}
return nil
}

View File

@@ -0,0 +1,166 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
//nolint:dupl
package admission
import (
"context"
"maps"
"sort"
"github.com/go-logr/logr"
admissionv1 "k8s.io/api/admissionregistration/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/internal/controllers/utils"
"github.com/projectcapsule/capsule/pkg/api/meta"
clt "github.com/projectcapsule/capsule/pkg/runtime/client"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/predicates"
)
type mutatingReconciler struct {
client client.Client
configuration configuration.Configuration
log logr.Logger
}
func (r *mutatingReconciler) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.ControllerOptions) error {
return ctrl.NewControllerManagedBy(mgr).
Named("capsule/admission/mutating").
For(
&capsulev1beta2.CapsuleConfiguration{},
builder.WithPredicates(
predicate.GenerationChangedPredicate{},
predicates.NamesMatchingPredicate{Names: []string{ctrlConfig.ConfigurationName}},
),
).
WithOptions(controller.Options{MaxConcurrentReconciles: ctrlConfig.MaxConcurrentReconciles}).
Complete(r)
}
func (r *mutatingReconciler) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
err = r.reconcileConfiguration(ctx, r.configuration.Admission().Mutating)
return res, err
}
func (r *mutatingReconciler) reconcileConfiguration(
ctx context.Context,
cfg capsulev1beta2.DynamicAdmissionConfig,
) error {
desiredName := string(cfg.Name)
hooks, err := r.webhooks(ctx, cfg)
if err != nil {
return err
}
if len(hooks) == 0 {
managed, err := r.listManagedWebhookConfigs(ctx)
if err != nil {
return err
}
for i := range managed {
if err := r.deleteWebhookConfig(ctx, managed[i].Name); err != nil {
return err
}
}
return nil
}
obj := &admissionv1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{Name: string(cfg.Name)},
}
sort.Slice(hooks, func(i, j int) bool { return hooks[i].Name < hooks[j].Name })
labels := obj.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
maps.Copy(labels, cfg.Labels)
labels[meta.CreatedByCapsuleLabel] = meta.ControllerValue
obj.SetLabels(labels)
annotations := obj.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
maps.Copy(annotations, cfg.Annotations)
obj.SetAnnotations(annotations)
if err := clt.CreateOrPatch(ctx, r.client, obj, meta.FieldManagerCapsuleController, true); err != nil {
return err
}
// Garbage-collect any old managed validating webhook configs with different name
managed, err := r.listManagedWebhookConfigs(ctx)
if err != nil {
return err
}
for i := range managed {
if managed[i].Name == desiredName {
continue
}
if err := r.deleteWebhookConfig(ctx, managed[i].Name); err != nil {
return err
}
}
return nil
}
func (r *mutatingReconciler) listManagedWebhookConfigs(ctx context.Context) ([]admissionv1.MutatingWebhookConfiguration, error) {
list := &admissionv1.MutatingWebhookConfigurationList{}
if err := r.client.List(ctx, list, client.MatchingLabels{
meta.CreatedByCapsuleLabel: meta.ControllerValue,
}); err != nil {
return nil, err
}
return list.Items, nil
}
func (r *mutatingReconciler) deleteWebhookConfig(ctx context.Context, name string) error {
if name == "" {
return nil
}
obj := &admissionv1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{Name: name},
}
err := r.client.Delete(ctx, obj)
if apierrors.IsNotFound(err) {
return nil
}
return err
}
func (r *mutatingReconciler) webhooks(
ctx context.Context,
cfg capsulev1beta2.DynamicAdmissionConfig,
) (hooks []admissionv1.MutatingWebhook, err error) {
return
}

View File

@@ -0,0 +1,157 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
//nolint:dupl
package admission
import (
"context"
"maps"
"sort"
"github.com/go-logr/logr"
admissionv1 "k8s.io/api/admissionregistration/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/internal/controllers/utils"
"github.com/projectcapsule/capsule/pkg/api/meta"
clt "github.com/projectcapsule/capsule/pkg/runtime/client"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
)
type validatingReconciler struct {
client client.Client
configuration configuration.Configuration
log logr.Logger
}
func (r *validatingReconciler) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.ControllerOptions) error {
return ctrl.NewControllerManagedBy(mgr).
Named("capsule/admission/validating").
For(&capsulev1beta2.CapsuleConfiguration{}, utils.NamesMatchingPredicate(ctrlConfig.ConfigurationName)).
WithOptions(controller.Options{MaxConcurrentReconciles: ctrlConfig.MaxConcurrentReconciles}).
Complete(r)
}
func (r *validatingReconciler) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
err = r.reconcileValidatingConfiguration(ctx, r.configuration.Admission().Validating)
return res, err
}
func (r *validatingReconciler) reconcileValidatingConfiguration(
ctx context.Context,
cfg capsulev1beta2.DynamicAdmissionConfig,
) error {
desiredName := string(cfg.Name)
hooks, err := r.validatingWebhooks(ctx, cfg)
if err != nil {
return err
}
if len(hooks) == 0 {
managed, err := r.listManagedValidatingWebhookConfigs(ctx)
if err != nil {
return err
}
for i := range managed {
if err := r.deleteValidatingWebhookConfig(ctx, managed[i].Name); err != nil {
return err
}
}
return nil
}
obj := &admissionv1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{Name: string(cfg.Name)},
}
sort.Slice(hooks, func(i, j int) bool { return hooks[i].Name < hooks[j].Name })
labels := obj.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
maps.Copy(labels, cfg.Labels)
labels[meta.CreatedByCapsuleLabel] = meta.ControllerValue
obj.SetLabels(labels)
annotations := obj.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
maps.Copy(annotations, cfg.Annotations)
obj.SetAnnotations(annotations)
if err := clt.CreateOrPatch(ctx, r.client, obj, meta.FieldManagerCapsuleController, true); err != nil {
return err
}
// Garbage-collect any old managed validating webhook configs with different name
managed, err := r.listManagedValidatingWebhookConfigs(ctx)
if err != nil {
return err
}
for i := range managed {
if managed[i].Name == desiredName {
continue
}
if err := r.deleteValidatingWebhookConfig(ctx, managed[i].Name); err != nil {
return err
}
}
return nil
}
func (r *validatingReconciler) listManagedValidatingWebhookConfigs(ctx context.Context) ([]admissionv1.ValidatingWebhookConfiguration, error) {
list := &admissionv1.ValidatingWebhookConfigurationList{}
if err := r.client.List(ctx, list, client.MatchingLabels{
meta.CreatedByCapsuleLabel: meta.ControllerValue,
}); err != nil {
return nil, err
}
return list.Items, nil
}
func (r *validatingReconciler) deleteValidatingWebhookConfig(ctx context.Context, name string) error {
if name == "" {
return nil
}
obj := &admissionv1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{Name: name},
}
err := r.client.Delete(ctx, obj)
if apierrors.IsNotFound(err) {
return nil
}
return err
}
func (r *validatingReconciler) validatingWebhooks(
ctx context.Context,
cfg capsulev1beta2.DynamicAdmissionConfig,
) (hooks []admissionv1.ValidatingWebhook, err error) {
return
}

View File

@@ -0,0 +1,72 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package config
import (
"context"
"github.com/go-logr/logr"
"sigs.k8s.io/controller-runtime/pkg/client"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api/meta"
)
func (r *Manager) getItemsForStatusRegistryCache(ctx context.Context) ([]capsulev1beta2.RuleStatus, error) {
rsList := &capsulev1beta2.RuleStatusList{}
if err := r.List(ctx, rsList,
client.MatchingLabels{
meta.NewManagedByCapsuleLabel: meta.ControllerValue,
meta.CapsuleNameLabel: meta.NameForManagedRuleStatus(),
},
); err != nil {
return nil, err
}
return rsList.Items, nil
}
func (r *Manager) warmupRuleStatusRegistryCache(ctx context.Context, log logr.Logger, items []capsulev1beta2.RuleStatus) error {
for _, item := range items {
regs := item.Status.Rule.Enforce.Registries
if len(regs) == 0 {
continue
}
if _, _, err := r.RegistryCache.GetOrBuild(regs); err != nil {
return err
}
}
log.V(5).Info("warmed up cache based on existing rules", "rules", len(items), "cache_rules", r.RegistryCache.Stats())
return nil
}
func (r *Manager) invalidateRuleStatusRegistryCache(ctx context.Context, log logr.Logger) error {
items, err := r.getItemsForStatusRegistryCache(ctx)
if err != nil {
return err
}
log.V(5).Info("cached before invalidation", "cache_rules", r.RegistryCache.Stats())
active := make(map[string]struct{}, len(items))
for _, item := range items {
regs := item.Status.Rule.Enforce.Registries
if len(regs) == 0 {
continue
}
id := r.RegistryCache.HashRules(regs)
active[id] = struct{}{}
}
_ = r.RegistryCache.PruneActive(active)
log.V(5).Info("cached after invalidation", "rules", len(items), "cache_rules", r.RegistryCache.Stats())
return nil
}

View File

@@ -0,0 +1,51 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package config
import (
"context"
"github.com/go-logr/logr"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
)
// invalidateCaches invokes for all caches their invalidation functions.
func (r *Manager) invalidateCaches(ctx context.Context, log logr.Logger) error {
err := r.invalidateRuleStatusRegistryCache(ctx, log)
if err != nil {
return err
}
now := metav1.Now()
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
cfg := &capsulev1beta2.CapsuleConfiguration{}
if err := r.Get(ctx, client.ObjectKey{Name: r.configName}, cfg); err != nil {
return err
}
cfg.Status.LastCacheInvalidation = now
return r.Status().Update(ctx, cfg)
})
}
// populateCaches warms up all custom caches.
func (r *Manager) populateCaches(ctx context.Context, log logr.Logger) error {
items, err := r.getItemsForStatusRegistryCache(ctx)
if err != nil {
return err
}
err = r.warmupRuleStatusRegistryCache(ctx, log, items)
if err != nil {
return err
}
return nil
}

View File

@@ -6,12 +6,14 @@ package config
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -21,20 +23,34 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/internal/cache"
"github.com/projectcapsule/capsule/internal/controllers/utils"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/predicates"
)
type Manager struct {
client.Client
configName string
RegistryCache *cache.RegistryRuleSetCache
Log logr.Logger
}
func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.ControllerOptions) error {
return ctrl.NewControllerManagedBy(mgr).
For(&capsulev1beta2.CapsuleConfiguration{}, utils.NamesMatchingPredicate(ctrlConfig.ConfigurationName)).
func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.ControllerOptions) (err error) {
r.configName = ctrlConfig.ConfigurationName
err = ctrl.NewControllerManagedBy(mgr).
Named("capsule/configuration").
For(
&capsulev1beta2.CapsuleConfiguration{},
builder.WithPredicates(
predicate.GenerationChangedPredicate{},
predicates.NamesMatchingPredicate{Names: []string{ctrlConfig.ConfigurationName}},
),
).
Watches(
&capsulev1beta2.TenantOwner{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
@@ -82,22 +98,41 @@ func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.Controller
}),
).
Complete(r)
if err != nil {
return err
}
// register Start(ctx) as a manager runnable.
return mgr.Add(r)
}
// Start is the Runnable function triggered upon Manager start-up to perform cache population.
func (r *Manager) Start(ctx context.Context) error {
if err := r.populateCaches(ctx, r.Log); err != nil {
r.Log.Error(err, "cache population failed")
return nil
}
r.Log.Info("caches populated")
return nil
}
func (r *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
r.Log.V(5).Info("CapsuleConfiguration reconciliation started", "request.name", request.Name)
log := r.Log.WithValues("configuration", request.Name)
cfg := configuration.NewCapsuleConfiguration(ctx, r.Client, request.Name)
instance := &capsulev1beta2.CapsuleConfiguration{}
if err = r.Get(ctx, request.NamespacedName, instance); err != nil {
if apierrors.IsNotFound(err) {
r.Log.V(3).Info("Request object not found, could have been deleted after reconcile request")
log.V(3).Info("requested object not found, could have been deleted after reconcile request")
return reconcile.Result{}, nil
}
r.Log.Error(err, "Error reading the object")
log.Error(err, "error reading the object")
return res, err
}
@@ -110,21 +145,31 @@ func (r *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
}
}()
// Validating the Capsule Configuration options
// Validating the Capsule Configuration options.
if _, err = cfg.ProtectedNamespaceRegexp(); err != nil {
panic(errors.Wrap(err, "Invalid configuration for protected Namespace regex"))
panic(errors.Wrap(err, "invalid configuration for protected Namespace regex"))
}
r.Log.V(5).Info("Validated Regex")
if err := r.gatherCapsuleUsers(ctx, instance, cfg); err != nil {
return reconcile.Result{}, err
}
r.Log.V(5).Info("Gathered users", "users", len(instance.Status.Users))
log.V(5).Info("gathering capsule users", "users", len(instance.Status.Users))
interval := cfg.CacheInvalidation()
if cache.ShouldInvalidate(ptr.To(instance.Status.LastCacheInvalidation), time.Now(), interval.Duration) {
log.V(3).Info("invalidating caches")
if err := r.invalidateCaches(ctx, log); err != nil {
return res, err
}
}
return reconcile.Result{
Requeue: true,
RequeueAfter: interval.Duration,
}, err
}
func (r *Manager) gatherCapsuleUsers(
ctx context.Context,

View File

@@ -21,6 +21,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
"github.com/projectcapsule/capsule/pkg/utils"
)
@@ -30,6 +31,7 @@ type MetadataReconciler struct {
func (m *MetadataReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
Named("capsule/pod").
For(&corev1.Pod{}, m.forOptionPerInstanceName(ctx)).
Complete(m)
}
@@ -41,9 +43,9 @@ func (m *MetadataReconciler) Reconcile(ctx context.Context, request ctrl.Request
tenant, err := m.getTenant(ctx, request.NamespacedName, m.Client)
if err != nil {
noTenantObjError := &NonTenantObjectError{}
noTenantObjError := &caperrors.NonTenantObjectError{}
noPodMetaError := &NoPodMetadataError{}
noPodMetaError := &caperrors.NoPodMetadataError{}
if errors.As(err, &noTenantObjError) || errors.As(err, &noPodMetaError) {
return reconcile.Result{}, nil
}
@@ -82,7 +84,7 @@ func (m *MetadataReconciler) getTenant(ctx context.Context, namespacedName types
capsuleLabel, _ := utils.GetTypeLabel(&capsulev1beta2.Tenant{})
if _, ok := ns.GetLabels()[capsuleLabel]; !ok {
return nil, NewNonTenantObject(namespacedName.Name)
return nil, caperrors.NewNonTenantObject(namespacedName.Name)
}
if err := client.Get(ctx, types.NamespacedName{Name: ns.Labels[capsuleLabel]}, tenant); err != nil {
@@ -90,7 +92,7 @@ func (m *MetadataReconciler) getTenant(ctx context.Context, namespacedName types
}
if tenant.Spec.PodOptions == nil || tenant.Spec.PodOptions.AdditionalMetadata == nil {
return nil, NewNoPodMetadata(namespacedName.Name)
return nil, caperrors.NewNoPodMetadata(namespacedName.Name)
}
return tenant, nil

View File

@@ -19,8 +19,8 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/internal/controllers/utils"
"github.com/projectcapsule/capsule/pkg/tenant"
capsuleutils "github.com/projectcapsule/capsule/pkg/utils"
"github.com/projectcapsule/capsule/pkg/utils/tenant"
)
type Controller struct {
@@ -38,6 +38,7 @@ func (c *Controller) SetupWithManager(mgr ctrl.Manager, cfg utils.ControllerOpti
c.label = label
return ctrl.NewControllerManagedBy(mgr).
Named("capsule/persistentvolumes").
For(&corev1.PersistentVolume{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
pv, ok := object.(*corev1.PersistentVolume)
if !ok {

View File

@@ -6,7 +6,6 @@ package rbac
import (
"context"
"errors"
"fmt"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
@@ -27,7 +26,12 @@ import (
"github.com/projectcapsule/capsule/internal/controllers/utils"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/api/meta"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/predicates"
)
const (
controllerManager = "rbac-controller"
)
type Manager struct {
@@ -38,9 +42,12 @@ type Manager struct {
//nolint:revive
func (r *Manager) SetupWithManager(ctx context.Context, mgr ctrl.Manager, ctrlConfig utils.ControllerOptions) (err error) {
namesPredicate := utils.NamesMatchingPredicate(api.ProvisionerRoleName, api.DeleterRoleName)
namesPredicate := predicates.LabelsMatching(map[string]string{
meta.CreatedByCapsuleLabel: controllerManager,
})
crErr := ctrl.NewControllerManagedBy(mgr).
Named("capsule/rbac/roles").
For(&rbacv1.ClusterRole{}, namesPredicate).
Complete(r)
if crErr != nil {
@@ -48,6 +55,7 @@ func (r *Manager) SetupWithManager(ctx context.Context, mgr ctrl.Manager, ctrlCo
}
crbErr := ctrl.NewControllerManagedBy(mgr).
Named("capsule/rbac/bindings").
For(&rbacv1.ClusterRoleBinding{}, namesPredicate).
Watches(&capsulev1beta2.CapsuleConfiguration{}, handler.Funcs{
UpdateFunc: func(ctx context.Context, updateEvent event.TypedUpdateEvent[client.Object], limitingInterface workqueue.TypedRateLimitingInterface[reconcile.Request]) {
@@ -63,7 +71,7 @@ func (r *Manager) SetupWithManager(ctx context.Context, mgr ctrl.Manager, ctrlCo
r.handleSAChange(ctx, e.Object)
},
UpdateFunc: func(ctx context.Context, e event.TypedUpdateEvent[client.Object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
if utils.LabelsChanged([]string{meta.OwnerPromotionLabel}, e.ObjectOld.GetLabels(), e.ObjectNew.GetLabels()) {
if predicates.LabelsChanged([]string{meta.OwnerPromotionLabel}, e.ObjectOld.GetLabels(), e.ObjectNew.GetLabels()) {
r.handleSAChange(ctx, e.ObjectNew)
}
},
@@ -82,22 +90,18 @@ func (r *Manager) SetupWithManager(ctx context.Context, mgr ctrl.Manager, ctrlCo
// Reconcile serves both required ClusterRole and ClusterRoleBinding resources: that's ok, we're watching for multiple
// Resource kinds and we're just interested to the ones with the said name since they're bounded together.
func (r *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
rbac := r.Configuration.RBAC()
switch request.Name {
case api.ProvisionerRoleName:
if err = r.EnsureClusterRole(ctx, api.ProvisionerRoleName); err != nil {
r.Log.Error(err, "Reconciliation for ClusterRole failed", "ClusterRole", api.ProvisionerRoleName)
case rbac.ProvisionerClusterRole:
if err = r.EnsureClusterRoleProvisioner(ctx); err != nil {
r.Log.Error(err, "reconciliation for ClusterRole failed", "ClusterRole", rbac.ProvisionerClusterRole)
break
}
if err = r.EnsureClusterRoleBindingsProvisioner(ctx); err != nil {
r.Log.Error(err, "Reconciliation for ClusterRoleBindings (Provisioner) failed")
break
}
case api.DeleterRoleName:
if err = r.EnsureClusterRole(ctx, api.DeleterRoleName); err != nil {
r.Log.Error(err, "Reconciliation for ClusterRole failed", "ClusterRole", api.DeleterRoleName)
case rbac.DeleterClusterRole:
if err = r.EnsureClusterRoleDeleter(ctx); err != nil {
r.Log.Error(err, "reconciliation for ClusterRole failed", "ClusterRole", rbac.DeleterClusterRole)
}
}
@@ -105,13 +109,29 @@ func (r *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
}
func (r *Manager) EnsureClusterRoleBindingsProvisioner(ctx context.Context) error {
rbac := r.Configuration.RBAC()
crb := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: api.ProvisionerRoleName},
ObjectMeta: metav1.ObjectMeta{Name: rbac.ProvisionerClusterRole},
}
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, crb, func() error {
crb.RoleRef = api.ProvisionerClusterRoleBinding.RoleRef
crb.RoleRef = rbacv1.RoleRef{
Kind: "ClusterRole",
Name: rbac.ProvisionerClusterRole,
APIGroup: rbacv1.GroupName,
}
labels := crb.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
labels[meta.CreatedByCapsuleLabel] = controllerManager
crb.SetLabels(labels)
crb.Subjects = nil
users := r.Configuration.GetUsersByStatus()
@@ -169,54 +189,92 @@ func (r *Manager) EnsureClusterRoleBindingsProvisioner(ctx context.Context) erro
})
}
func (r *Manager) EnsureClusterRole(ctx context.Context, roleName string) (err error) {
role, ok := api.ClusterRoles[roleName]
if !ok {
return fmt.Errorf("clusterRole %s is not mapped", roleName)
}
func (r *Manager) EnsureClusterRoleProvisioner(ctx context.Context) (err error) {
clusterRole := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: role.GetName(),
Name: r.Configuration.RBAC().ProvisionerClusterRole,
},
}
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, clusterRole, func() error {
clusterRole.Rules = role.Rules
labels := clusterRole.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
labels[meta.CreatedByCapsuleLabel] = controllerManager
clusterRole.SetLabels(labels)
clusterRole.Rules = []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"namespaces"},
Verbs: []string{"create", "patch"},
},
}
return nil
})
if err != nil {
return err
}
err = r.EnsureClusterRoleBindingsProvisioner(ctx)
if err != nil && apierrors.IsAlreadyExists(err) {
return nil
}
return r.garbageCollectRBAC(ctx)
}
func (r *Manager) EnsureClusterRoleDeleter(ctx context.Context) (err error) {
clusterRole := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: r.Configuration.RBAC().DeleterClusterRole,
},
}
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, clusterRole, func() error {
labels := clusterRole.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
labels[meta.CreatedByCapsuleLabel] = controllerManager
clusterRole.SetLabels(labels)
clusterRole.Rules = []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"namespaces"},
Verbs: []string{"delete"},
},
}
return nil
})
if err != nil {
return err
}
return r.garbageCollectRBAC(ctx)
}
// Start is the Runnable function triggered upon Manager start-up to perform the first RBAC reconciliation
// since we're not creating empty CR and CRB upon Capsule installation: it's a run-once task, since the reconciliation
// is handled by the Reconciler implemented interface.
func (r *Manager) Start(ctx context.Context) error {
for roleName := range api.ClusterRoles {
r.Log.V(4).Info("setting up ClusterRoles", "ClusterRole", roleName)
if err := r.EnsureClusterRole(ctx, roleName); err != nil {
if apierrors.IsAlreadyExists(err) {
continue
}
return err
}
}
r.Log.V(4).Info("setting up ClusterRoleBindings")
if err := r.EnsureClusterRoleBindingsProvisioner(ctx); err != nil {
if apierrors.IsAlreadyExists(err) {
return nil
}
if err := r.EnsureClusterRoleProvisioner(ctx); err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
return nil
if err := r.EnsureClusterRoleDeleter(ctx); err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
return r.garbageCollectRBAC(ctx)
}
func (r *Manager) handleSAChange(ctx context.Context, obj client.Object) {
@@ -228,3 +286,72 @@ func (r *Manager) handleSAChange(ctx context.Context, obj client.Object) {
r.Log.Error(err, "cannot update ClusterRoleBinding upon ServiceAccount event")
}
}
func (r *Manager) garbageCollectRBAC(ctx context.Context) error {
rbac := r.Configuration.RBAC()
desiredCR := map[string]struct{}{
rbac.ProvisionerClusterRole: {},
rbac.DeleterClusterRole: {},
}
desiredCRB := map[string]struct{}{
rbac.ProvisionerClusterRole: {},
}
if err := r.garbageCollectClusterRoles(ctx, desiredCR); err != nil {
return err
}
if err := r.garbageCollectClusterRoleBindings(ctx, desiredCRB); err != nil {
return err
}
return nil
}
//nolint:dupl
func (r *Manager) garbageCollectClusterRoles(ctx context.Context, desired map[string]struct{}) error {
list := &rbacv1.ClusterRoleList{}
if err := r.Client.List(ctx, list, client.MatchingLabels{
meta.CreatedByCapsuleLabel: controllerManager,
}); err != nil {
return err
}
for i := range list.Items {
cr := &list.Items[i]
if _, ok := desired[cr.Name]; ok {
continue
}
if err := r.Client.Delete(ctx, cr); err != nil && !apierrors.IsNotFound(err) {
return err
}
}
return nil
}
//nolint:dupl
func (r *Manager) garbageCollectClusterRoleBindings(ctx context.Context, desired map[string]struct{}) error {
list := &rbacv1.ClusterRoleBindingList{}
if err := r.Client.List(ctx, list, client.MatchingLabels{
meta.CreatedByCapsuleLabel: controllerManager,
}); err != nil {
return err
}
for i := range list.Items {
crb := &list.Items[i]
if _, ok := desired[crb.Name]; ok {
continue
}
if err := r.Client.Delete(ctx, crb); err != nil && !apierrors.IsNotFound(err) {
return err
}
}
return nil
}

View File

@@ -12,7 +12,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
@@ -27,6 +27,7 @@ import (
"github.com/projectcapsule/capsule/internal/metrics"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/api/meta"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
)
type resourceClaimController struct {
@@ -34,11 +35,12 @@ type resourceClaimController struct {
metrics *metrics.ClaimRecorder
log logr.Logger
recorder record.EventRecorder
recorder events.EventRecorder
}
func (r *resourceClaimController) SetupWithManager(mgr ctrl.Manager, cfg utils.ControllerOptions) error {
return ctrl.NewControllerManagedBy(mgr).
Named("capsule/resourcepools/claims").
For(&capsulev1beta2.ResourcePoolClaim{}).
Watches(
&capsulev1beta2.ResourcePool{},
@@ -209,12 +211,14 @@ func (r resourceClaimController) allocateResourcePool(
UID: pool.GetUID(),
}
if !meta.HasLooseOwnerReference(cl, pool) {
reference := meta.GetLooseOwnerReference(pool)
if !meta.HasLooseOwnerReference(cl, reference) {
log.V(4).Info("adding ownerreference for", "pool", pool.Name)
patch := client.MergeFrom(cl.DeepCopy())
if err := meta.SetLooseOwnerReference(cl, pool, r.Scheme()); err != nil {
if err := meta.SetLooseOwnerReference(cl, reference); err != nil {
return err
}
@@ -250,7 +254,7 @@ func (r resourceClaimController) allocateResourcePool(
func updateStatusAndEmitEvent(
ctx context.Context,
c client.Client,
recorder record.EventRecorder,
recorder events.EventRecorder,
claim *capsulev1beta2.ResourcePoolClaim,
condition metav1.Condition,
) (err error) {
@@ -283,14 +287,12 @@ func updateStatusAndEmitEvent(
eventType = corev1.EventTypeWarning
}
recorder.AnnotatedEventf(
recorder.Eventf(
claim,
map[string]string{
"Status": string(claim.Status.Condition.Status),
"Type": claim.Status.Condition.Type,
},
nil,
eventType,
claim.Status.Condition.Reason,
evt.ActionReconciled,
claim.Status.Condition.Message,
)

View File

@@ -7,7 +7,7 @@ import (
"fmt"
"github.com/go-logr/logr"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/projectcapsule/capsule/internal/controllers/utils"
@@ -17,7 +17,7 @@ import (
func Add(
log logr.Logger,
mgr manager.Manager,
recorder record.EventRecorder,
recorder events.EventRecorder,
cfg utils.ControllerOptions,
) (err error) {
if err = (&resourcePoolController{

View File

@@ -16,7 +16,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -30,6 +30,7 @@ import (
"github.com/projectcapsule/capsule/internal/metrics"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/api/meta"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/utils"
)
@@ -38,11 +39,12 @@ type resourcePoolController struct {
metrics *metrics.ResourcePoolRecorder
log logr.Logger
recorder record.EventRecorder
recorder events.EventRecorder
}
func (r *resourcePoolController) SetupWithManager(mgr ctrl.Manager, cfg ctrlutils.ControllerOptions) error {
return ctrl.NewControllerManagedBy(mgr).
Named("capsule/resourcepools/pools").
For(&capsulev1beta2.ResourcePool{}).
Owns(&corev1.ResourceQuota{}).
Watches(&capsulev1beta2.ResourcePoolClaim{},
@@ -350,15 +352,15 @@ func (r *resourcePoolController) handleClaimResourceExhaustion(
currentExhaustions map[string]api.PoolExhaustionResource,
exhaustions map[string]api.PoolExhaustionResource,
) (err error) {
status := make([]string, 0) //nolint:prealloc
resourceNames := make([]string, 0) //nolint:prealloc
resourceNames := make([]string, 0, len(currentExhaustions))
for resourceName := range currentExhaustions {
resourceNames = append(resourceNames, resourceName)
}
sort.Strings(resourceNames)
status := make([]string, 0, len(resourceNames))
for _, resourceName := range resourceNames {
ex := currentExhaustions[resourceName]
@@ -441,7 +443,7 @@ func (r *resourcePoolController) handleClaimDisassociation(
if !*pool.Spec.Config.DeleteBoundResources || meta.ReleaseAnnotationTriggers(current) {
patch := client.MergeFrom(current.DeepCopy())
meta.RemoveLooseOwnerReference(current, pool)
meta.RemoveLooseOwnerReference(current, meta.GetLooseOwnerReference(pool))
meta.ReleaseAnnotationRemove(current)
if err := r.Patch(ctx, current, patch); err != nil {
@@ -454,15 +456,13 @@ func (r *resourcePoolController) handleClaimDisassociation(
return fmt.Errorf("failed to update claim status: %w", err)
}
r.recorder.AnnotatedEventf(
r.recorder.Eventf(
pool,
current,
map[string]string{
"Status": string(metav1.ConditionFalse),
"Type": meta.NotReadyCondition,
},
corev1.EventTypeNormal,
"Disassociated",
"Claim is disassociated from the pool",
evt.ReasonDisassociated,
evt.ActionDisassociating,
"claim is disassociated from the pool",
)
return nil

View File

@@ -25,6 +25,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
tpl "github.com/projectcapsule/capsule/pkg/template"
"github.com/projectcapsule/capsule/pkg/tenant"
)
const (
@@ -243,7 +244,9 @@ func (r *Processor) HandleSection(ctx context.Context, tnt capsulev1beta2.Tenant
for rawIndex, item := range spec.RawItems {
template := string(item.Raw)
tmplString := tpl.TemplateForTenantAndNamespace(template, &tnt, &ns)
fastContext := tenant.ContextForTenantAndNamespace(&tnt, &ns)
tmplString := tpl.FastTemplate(template, fastContext)
obj, keysAndValues := unstructured.Unstructured{}, []any{"index", rawIndex}

View File

@@ -21,6 +21,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
"github.com/projectcapsule/capsule/pkg/utils"
)
@@ -33,9 +34,9 @@ type abstractServiceLabelsReconciler struct {
func (r *abstractServiceLabelsReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
tenant, err := r.getTenant(ctx, request.NamespacedName, r.client)
if err != nil {
noTenantObjError := &NonTenantObjectError{}
noTenantObjError := &caperrors.NonTenantObjectError{}
noSvcMetaError := &NoServicesMetadataError{}
noSvcMetaError := &caperrors.NoServicesMetadataError{}
if errors.As(err, &noTenantObjError) || errors.As(err, &noSvcMetaError) {
return reconcile.Result{}, nil
}
@@ -85,7 +86,7 @@ func (r *abstractServiceLabelsReconciler) getTenant(ctx context.Context, namespa
capsuleLabel, _ := utils.GetTypeLabel(&capsulev1beta2.Tenant{})
if _, ok := ns.GetLabels()[capsuleLabel]; !ok {
return nil, NewNonTenantObject(namespacedName.Name)
return nil, caperrors.NewNonTenantObject(namespacedName.Name)
}
if err := client.Get(ctx, types.NamespacedName{Name: ns.Labels[capsuleLabel]}, tenant); err != nil {
@@ -93,7 +94,7 @@ func (r *abstractServiceLabelsReconciler) getTenant(ctx context.Context, namespa
}
if tenant.Spec.ServiceOptions == nil || tenant.Spec.ServiceOptions.AdditionalMetadata == nil {
return nil, NewNoServicesMetadata(namespacedName.Name)
return nil, caperrors.NewNoServicesMetadata(namespacedName.Name)
}
return tenant, nil

View File

@@ -28,5 +28,6 @@ func (r *EndpointSlicesLabelsReconciler) SetupWithManager(ctx context.Context, m
return ctrl.NewControllerManagedBy(mgr).
For(r.abstractServiceLabelsReconciler.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName(ctx)).
Named("capsule/endpointslices").
Complete(r)
}

View File

@@ -1,30 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package servicelabels
import "fmt"
type NonTenantObjectError struct {
objectName string
}
func NewNonTenantObject(objectName string) error {
return &NonTenantObjectError{objectName: objectName}
}
func (n NonTenantObjectError) Error() string {
return fmt.Sprintf("Skipping labels sync for %s as it doesn't belong to tenant", n.objectName)
}
type NoServicesMetadataError struct {
objectName string
}
func NewNoServicesMetadata(objectName string) error {
return &NoServicesMetadataError{objectName: objectName}
}
func (n NoServicesMetadataError) Error() string {
return fmt.Sprintf("Skipping labels sync for %s because no AdditionalLabels or AdditionalAnnotations presents in Tenant spec", n.objectName)
}

View File

@@ -26,5 +26,6 @@ func (r *ServicesLabelsReconciler) SetupWithManager(ctx context.Context, mgr ctr
return ctrl.NewControllerManagedBy(mgr).
For(r.abstractServiceLabelsReconciler.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName(ctx)).
Named("capsule/services").
Complete(r)
}

View File

@@ -72,8 +72,6 @@ func (r *Manager) syncLimitRange(ctx context.Context, tenant *capsulev1beta2.Ten
return controllerutil.SetControllerReference(tenant, target, r.Scheme())
})
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring LimitRange %s", target.GetName()), err)
r.Log.V(4).Info("LimitRange sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
if err != nil {

View File

@@ -22,7 +22,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"k8s.io/client-go/util/retry"
"k8s.io/client-go/util/workqueue"
ctrl "sigs.k8s.io/controller-runtime"
@@ -40,7 +40,9 @@ import (
"github.com/projectcapsule/capsule/internal/metrics"
"github.com/projectcapsule/capsule/pkg/api"
meta "github.com/projectcapsule/capsule/pkg/api/meta"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/gvk"
"github.com/projectcapsule/capsule/pkg/runtime/predicates"
)
type Manager struct {
@@ -48,7 +50,7 @@ type Manager struct {
Metrics *metrics.TenantRecorder
Log logr.Logger
Recorder record.EventRecorder
Recorder events.EventRecorder
Configuration configuration.Configuration
RESTConfig *rest.Config
classes supportedClasses
@@ -61,6 +63,7 @@ type supportedClasses struct {
func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.ControllerOptions) error {
ctrlBuilder := ctrl.NewControllerManagedBy(mgr).
Named("capsule/tenants").
For(
&capsulev1beta2.Tenant{},
builder.WithPredicates(
@@ -74,8 +77,10 @@ func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.Controller
Watches(
&capsulev1beta2.CapsuleConfiguration{},
handler.EnqueueRequestsFromMapFunc(r.enqueueAllTenants),
utils.NamesMatchingPredicate(ctrlConfig.ConfigurationName),
builder.WithPredicates(utils.CapsuleConfigSpecChangedPredicate),
builder.WithPredicates(
predicates.CapsuleConfigSpecChangedPredicate{},
predicates.NamesMatchingPredicate{Names: []string{ctrlConfig.ConfigurationName}},
),
).
Watches(
&corev1.Namespace{},
@@ -88,7 +93,7 @@ func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.Controller
r.collectAvailableStorageClasses,
"cannot collect storage classes",
),
builder.WithPredicates(utils.UpdatedMetadataPredicate),
builder.WithPredicates(predicates.UpdatedLabelsPredicate{}),
).
Watches(
&schedulingv1.PriorityClass{},
@@ -97,7 +102,7 @@ func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.Controller
r.collectAvailablePriorityClasses,
"cannot collect priority classes",
),
builder.WithPredicates(utils.UpdatedMetadataPredicate),
builder.WithPredicates(predicates.UpdatedLabelsPredicate{}),
).
Watches(
&nodev1.RuntimeClass{},
@@ -106,7 +111,7 @@ func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.Controller
r.collectAvailableRuntimeClasses,
"cannot collect runtime classes",
),
builder.WithPredicates(utils.UpdatedMetadataPredicate),
builder.WithPredicates(predicates.UpdatedLabelsPredicate{}),
).
Watches(
&capsulev1beta2.TenantOwner{},
@@ -183,12 +188,12 @@ func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.Controller
})
},
},
builder.WithPredicates(utils.PromotedServiceaccountPredicate),
builder.WithPredicates(predicates.PromotedServiceaccountPredicate{}),
).
WithOptions(controller.Options{MaxConcurrentReconciles: ctrlConfig.MaxConcurrentReconciles})
// GatewayClass is Optional
r.classes.gateway = utils.HasGVK(mgr.GetRESTMapper(), schema.GroupVersionKind{
r.classes.gateway = gvk.HasGVK(mgr.GetRESTMapper(), schema.GroupVersionKind{
Group: "gateway.networking.k8s.io",
Version: "v1",
Kind: "GatewayClass",
@@ -202,12 +207,12 @@ func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.Controller
r.collectAvailableGatewayClasses,
"cannot collect gateway classes",
),
builder.WithPredicates(utils.UpdatedMetadataPredicate),
builder.WithPredicates(predicates.UpdatedLabelsPredicate{}),
)
}
// DeviceClass is Optional
r.classes.device = utils.HasGVK(mgr.GetRESTMapper(), schema.GroupVersionKind{
r.classes.device = gvk.HasGVK(mgr.GetRESTMapper(), schema.GroupVersionKind{
Group: "resource.k8s.io",
Version: "v1",
Kind: "DeviceClass",
@@ -221,7 +226,7 @@ func (r *Manager) SetupWithManager(mgr ctrl.Manager, ctrlConfig utils.Controller
r.collectAvailableDeviceClasses,
"cannot collect device classes",
),
builder.WithPredicates(utils.UpdatedMetadataPredicate),
builder.WithPredicates(predicates.UpdatedLabelsPredicate{}),
)
}
@@ -235,7 +240,7 @@ func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ct
instance := &capsulev1beta2.Tenant{}
if err = r.Get(ctx, request.NamespacedName, instance); err != nil {
if apierrors.IsNotFound(err) {
r.Log.V(3).Info("Request object not found, could have been deleted after reconcile request")
r.Log.V(3).Info("request object not found, could have been deleted after reconcile request")
// If tenant was deleted or cannot be found, clean up metrics
r.Metrics.DeleteAllMetricsForTenant(request.Name)
@@ -243,7 +248,7 @@ func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ct
return reconcile.Result{}, nil
}
r.Log.Error(err, "Error reading the object")
r.Log.Error(err, "error reading the object")
return result, err
}
@@ -278,7 +283,7 @@ func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ct
}
// Ensuring ResourceQuota
r.Log.V(4).Info("Ensuring limit resources count is updated")
r.Log.V(4).Info("ensuring limit resources count is updated")
if err = r.syncCustomResourceQuotaUsages(ctx, instance); err != nil {
err = fmt.Errorf("cannot count limited resources: %w", err)
@@ -287,7 +292,7 @@ func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ct
}
// Reconcile Namespaces
r.Log.V(4).Info("Starting processing of Namespaces", "items", len(instance.Status.Namespaces))
r.Log.V(4).Info("starting processing of Namespaces", "items", len(instance.Status.Namespaces))
if err = r.reconcileNamespaces(ctx, instance); err != nil {
err = fmt.Errorf("namespace(s) had reconciliation errors")
@@ -296,7 +301,7 @@ func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ct
}
// Ensuring NetworkPolicy resources
r.Log.V(4).Info("Starting processing of Network Policies")
r.Log.V(4).Info("starting processing of Network Policies")
if err = r.syncNetworkPolicies(ctx, instance); err != nil {
err = fmt.Errorf("cannot sync networkPolicy items: %w", err)

View File

@@ -20,7 +20,7 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api/meta"
"github.com/projectcapsule/capsule/pkg/utils/tenant"
"github.com/projectcapsule/capsule/pkg/tenant"
)
// Ensuring all annotations are applied to each Namespace handled by the Tenant.
@@ -116,9 +116,20 @@ func (r *Manager) reconcileNamespace(ctx context.Context, namespace string, tnt
r.syncNamespaceStatusMetrics(tnt, ns)
}()
// Collect Rules for namespace
ruleBody, err := tenant.BuildNamespaceRuleBodyForNamespace(ns, tnt)
if err != nil {
return err
}
err = r.ensureRuleStatus(ctx, ns, tnt, ruleBody, namespace)
if err != nil {
return err
}
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (conflictErr error) {
_, conflictErr = controllerutil.CreateOrUpdate(ctx, r.Client, ns, func() error {
metaStatus, err = r.reconcileMetadata(ctx, ns, tnt, stat)
metaStatus, err = r.reconcileNamespaceMetadata(ctx, ns, tnt, stat)
return err
})
@@ -129,8 +140,53 @@ func (r *Manager) reconcileNamespace(ctx context.Context, namespace string, tnt
return err
}
func (r *Manager) ensureRuleStatus(
ctx context.Context,
ns *corev1.Namespace,
tnt *capsulev1beta2.Tenant,
rule *capsulev1beta2.NamespaceRuleBody,
namespace string,
) error {
nsStatus := &capsulev1beta2.RuleStatus{
ObjectMeta: metav1.ObjectMeta{
Name: meta.NameForManagedRuleStatus(),
Namespace: namespace,
},
}
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, nsStatus, func() error {
labels := nsStatus.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
labels[meta.NewManagedByCapsuleLabel] = meta.ControllerValue
labels[meta.CapsuleNameLabel] = nsStatus.Name
nsStatus.SetLabels(labels)
err := controllerutil.SetOwnerReference(tnt, nsStatus, r.Scheme())
if err != nil {
return err
}
return controllerutil.SetOwnerReference(ns, nsStatus, r.Scheme())
})
if err != nil {
return err
}
nsStatus.Status.Rule = *rule
if err := r.Status().Update(ctx, nsStatus); err != nil {
return err
}
return nil
}
//nolint:nestif
func (r *Manager) reconcileMetadata(
func (r *Manager) reconcileNamespaceMetadata(
ctx context.Context,
ns *corev1.Namespace,
tnt *capsulev1beta2.Tenant,

View File

@@ -72,9 +72,7 @@ func (r *Manager) syncNetworkPolicy(ctx context.Context, tenant *capsulev1beta2.
return controllerutil.SetControllerReference(tenant, target, r.Scheme())
})
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring NetworkPolicy %s", target.GetName()), err)
r.Log.V(4).Info("Network Policy sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
r.Log.V(4).Info("network Policy sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
if err != nil {
return err

View File

@@ -68,20 +68,20 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2
var tntRequirement *labels.Requirement
if tntRequirement, scopeErr = labels.NewRequirement(meta.TenantLabel, selection.Equals, []string{tenant.Name}); scopeErr != nil {
r.Log.Error(scopeErr, "Cannot build ResourceQuota Tenant requirement")
r.Log.Error(scopeErr, "cannot build ResourceQuota Tenant requirement")
}
// Requirement to list ResourceQuota for the current index
var indexRequirement *labels.Requirement
if indexRequirement, scopeErr = labels.NewRequirement(meta.ResourceQuotaLabel, selection.Equals, []string{strconv.Itoa(index)}); scopeErr != nil {
r.Log.Error(scopeErr, "Cannot build ResourceQuota index requirement")
r.Log.Error(scopeErr, "cannot build ResourceQuota index requirement")
}
// Listing all the ResourceQuota according to the said requirements.
// These are required since Capsule is going to sum all the used quota to
// sum them and get the Tenant one.
list := &corev1.ResourceQuotaList{}
if scopeErr = r.List(ctx, list, &client.ListOptions{LabelSelector: labels.NewSelector().Add(*tntRequirement).Add(*indexRequirement)}); scopeErr != nil {
r.Log.Error(scopeErr, "Cannot list ResourceQuota", "tenantFilter", tntRequirement.String(), "indexFilter", indexRequirement.String())
r.Log.Error(scopeErr, "cannot list ResourceQuota", "tenantFilter", tntRequirement.String(), "indexFilter", indexRequirement.String())
return scopeErr
}
@@ -92,7 +92,7 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2
// For this case, we're going to block the Quota setting the Hard as the
// used one.
for name, hardQuota := range resourceQuota.Hard {
r.Log.V(4).Info("Desired hard " + name.String() + " quota is " + hardQuota.String())
r.Log.V(4).Info("desired hard " + name.String() + " quota is " + hardQuota.String())
// Getting the whole usage across all the Tenant Namespaces
var quantity resource.Quantity
@@ -100,7 +100,7 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2
quantity.Add(item.Status.Used[name])
}
r.Log.V(4).Info("Computed " + name.String() + " quota for the whole Tenant is " + quantity.String())
r.Log.V(4).Info("computed " + name.String() + " quota for the whole Tenant is " + quantity.String())
// Expose usage and limit metrics for the resource (name) of the ResourceQuota (index)
r.Metrics.TenantResourceUsageGauge.WithLabelValues(
@@ -247,9 +247,7 @@ func (r *Manager) syncResourceQuota(ctx context.Context, tenant *capsulev1beta2.
return retryErr
})
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring ResourceQuota %s", target.GetName()), err)
r.Log.V(4).Info("Resource Quota sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
r.Log.V(4).Info("resource Quota sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
if err != nil {
return err
@@ -338,7 +336,7 @@ func (r *Manager) resourceQuotasUpdate(ctx context.Context, resourceName corev1.
if err = group.Wait(); err != nil {
// We had an error and we mark the whole transaction as failed
// to process it another time according to the Tenant controller back-off factor.
r.Log.Error(err, "Cannot update outer ResourceQuotas", "resourceName", resourceName.String())
r.Log.Error(err, "cannot update outer ResourceQuotas", "resourceName", resourceName.String())
err = fmt.Errorf("update of outer ResourceQuota items has failed: %w", err)
}

View File

@@ -25,8 +25,8 @@ func (r *Manager) syncCustomResourceQuotaUsages(ctx context.Context, tenant *cap
group string
version string
}
//nolintlint:prealloc
var resourceList []resource
resourceList := make([]resource, 0, len(tenant.GetAnnotations()))
for k := range tenant.GetAnnotations() {
if !strings.HasPrefix(k, capsulev1beta2.ResourceQuotaAnnotationPrefix) {

View File

@@ -92,14 +92,11 @@ func (r *Manager) syncAdditionalRoleBinding(
return controllerutil.SetControllerReference(tenant, target, r.Scheme())
})
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring RoleBinding %s", target.GetName()), err)
if err != nil {
r.Log.Error(err, "Cannot sync RoleBinding")
r.Log.Error(err, "cannot sync RoleBinding")
}
r.Log.V(4).Info(fmt.Sprintf("RoleBinding sync result: %s", string(res)), "name", target.Name, "namespace", target.Namespace)
r.Log.V(4).Info(fmt.Sprintf("roleBinding sync result: %s", string(res)), "name", target.Name, "namespace", target.Namespace)
if err != nil {
return err

View File

@@ -21,15 +21,16 @@ import (
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/tenant"
)
// Sets a label on the Tenant object with it's name.
func (r *Manager) collectOwners(ctx context.Context, tnt *capsulev1beta2.Tenant) (err error) {
owners, err := tnt.CollectOwners(
owners, err := tenant.CollectOwners(
ctx,
r.Client,
r.Configuration.AllowServiceAccountPromotion(),
r.Configuration.Administrators(),
tnt,
r.Configuration,
)
if err != nil {
return err

View File

@@ -6,15 +6,12 @@ package tenant
import (
"context"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -171,7 +168,7 @@ func (r *Manager) pruningResources(ctx context.Context, ns string, keys []string
selector = selector.Add(*notIn)
}
r.Log.V(3).Info("Pruning objects with label selector " + selector.String())
r.Log.V(4).Info("pruning objects with label selector " + selector.String())
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
return r.DeleteAllOf(ctx, obj, &client.DeleteAllOfOptions{
@@ -183,14 +180,3 @@ func (r *Manager) pruningResources(ctx context.Context, ns string, keys []string
})
})
}
func (r *Manager) emitEvent(object runtime.Object, namespace string, res controllerutil.OperationResult, msg string, err error) {
eventType := corev1.EventTypeNormal
if err != nil {
eventType = corev1.EventTypeWarning
res = "Error"
}
r.Recorder.AnnotatedEventf(object, map[string]string{"OperationResult": string(res)}, eventType, namespace, msg)
}

View File

@@ -29,8 +29,9 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/projectcapsule/capsule/internal/controllers/utils"
"github.com/projectcapsule/capsule/pkg/cert"
"github.com/projectcapsule/capsule/pkg/configuration"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
"github.com/projectcapsule/capsule/pkg/runtime/cert"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
)
const (
@@ -62,6 +63,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&corev1.Secret{}, utils.NamesMatchingPredicate(r.Configuration.TLSSecretName())).
Named("capsule/tls").
Watches(&admissionregistrationv1.ValidatingWebhookConfiguration{}, enqueueFn, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
return object.GetName() == r.Configuration.ValidatingWebhookConfigurationName()
}))).
@@ -140,7 +142,7 @@ func (r Reconciler) ReconcileCertificates(ctx context.Context, certSecret *corev
operatorPods, err := r.getOperatorPods(ctx)
if err != nil {
if errors.As(err, &RunningInOutOfClusterModeError{}) {
if errors.As(err, &caperrors.RunningInOutOfClusterModeError{}) {
r.Log.Info("skipping annotation of Pods for cert-manager", "error", err.Error())
return nil
@@ -331,7 +333,7 @@ func (r Reconciler) getOperatorPods(ctx context.Context) (*corev1.PodList, error
leaderPod := &corev1.Pod{}
if err := r.Get(ctx, types.NamespacedName{Namespace: os.Getenv("NAMESPACE"), Name: hostname}, leaderPod); err != nil {
return nil, RunningInOutOfClusterModeError{}
return nil, caperrors.RunningInOutOfClusterModeError{}
}
podList := &corev1.PodList{}

View File

@@ -7,34 +7,34 @@ import (
"context"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type warningHandler struct{}
func WarningHandler() capsulewebhook.Handler {
func WarningHandler() handlers.Handler {
return &warningHandler{}
}
func (h *warningHandler) OnCreate(_ client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func {
func (h *warningHandler) OnCreate(_ client.Client, decoder admission.Decoder, _ events.EventRecorder) handlers.Func {
return func(_ context.Context, req admission.Request) *admission.Response {
return h.handle(decoder, req)
}
}
func (h *warningHandler) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (h *warningHandler) OnDelete(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *warningHandler) OnUpdate(_ client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func {
func (h *warningHandler) OnUpdate(_ client.Client, decoder admission.Decoder, _ events.EventRecorder) handlers.Func {
return func(_ context.Context, req admission.Request) *admission.Response {
return h.handle(decoder, req)
}

View File

@@ -1,91 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package defaults
import (
"fmt"
"reflect"
gatewayv1 "sigs.k8s.io/gateway-api/apis/v1"
)
type StorageClassError struct {
storageClass string
msg error
}
func NewStorageClassError(class string, msg error) error {
return &StorageClassError{
storageClass: class,
msg: msg,
}
}
func (e StorageClassError) Error() string {
return fmt.Sprintf("Failed to resolve Storage Class %s: %s", e.storageClass, e.msg)
}
type IngressClassError struct {
ingressClass string
msg error
}
func NewIngressClassError(class string, msg error) error {
return &IngressClassError{
ingressClass: class,
msg: msg,
}
}
func (e IngressClassError) Error() string {
return fmt.Sprintf("Failed to resolve Ingress Class %s: %s", e.ingressClass, e.msg)
}
type GatewayClassError struct {
gatewayClass string
msg error
}
func NewGatewayClassError(class string, msg error) error {
return &GatewayClassError{
gatewayClass: class,
msg: msg,
}
}
func (e GatewayClassError) Error() string {
return fmt.Sprintf("Failed to resolve Gateway Class %s: %s", e.gatewayClass, e.msg)
}
type GatewayError struct {
gateway string
msg error
}
func NewGatewayError(gateway gatewayv1.ObjectName, msg error) error {
return &GatewayError{
gateway: reflect.ValueOf(gateway).String(),
msg: msg,
}
}
func (e GatewayError) Error() string {
return fmt.Sprintf("Failed to resolve Gateway %s: %s", e.gateway, e.msg)
}
type PriorityClassError struct {
priorityClass string
msg error
}
func NewPriorityClassError(class string, msg error) error {
return &PriorityClassError{
priorityClass: class,
msg: msg,
}
}
func (e PriorityClassError) Error() string {
return fmt.Sprintf("Failed to resolve Priority Class %s: %s", e.priorityClass, e.msg)
}

View File

@@ -8,18 +8,17 @@ import (
"encoding/json"
"net/http"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
gatewayv1 "sigs.k8s.io/gateway-api/apis/v1"
capsulegateway "github.com/projectcapsule/capsule/internal/webhook/gateway"
"github.com/projectcapsule/capsule/internal/webhook/utils"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
)
func mutateGatewayDefaults(ctx context.Context, req admission.Request, c client.Client, decoder admission.Decoder, recorder record.EventRecorder, namespce string) *admission.Response {
func mutateGatewayDefaults(ctx context.Context, req admission.Request, c client.Client, decoder admission.Decoder, namespce string) *admission.Response {
gatewayObj := &gatewayv1.Gateway{}
if err := decoder.Decode(req, gatewayObj); err != nil {
return utils.ErroredResponse(err)
@@ -50,7 +49,7 @@ func mutateGatewayDefaults(ctx context.Context, req admission.Request, c client.
if gatewayObj.Spec.GatewayClassName == ("") {
mutate = true
} else {
response := admission.Denied(NewGatewayError(gatewayObj.Spec.GatewayClassName, err).Error())
response := admission.Denied(caperrors.NewGatewayError(gatewayObj.Spec.GatewayClassName, err).Error())
return &response
}
@@ -58,7 +57,7 @@ func mutateGatewayDefaults(ctx context.Context, req admission.Request, c client.
if gatewayClass != nil && gatewayClass.Name != allowed.Default {
if err != nil && !k8serrors.IsNotFound(err) {
response := admission.Denied(NewGatewayClassError(gatewayClass.Name, err).Error())
response := admission.Denied(caperrors.NewGatewayClassError(gatewayClass.Name, err).Error())
return &response
}
@@ -79,8 +78,6 @@ func mutateGatewayDefaults(ctx context.Context, req admission.Request, c client.
return &response
}
recorder.Eventf(tnt, corev1.EventTypeNormal, "TenantDefault", "Assigned Tenant default Gateway Class %s to %s/%s", allowed.Default, gatewayObj.Name, gatewayObj.Namespace)
response := admission.PatchResponseFromRaw(req.Object.Raw, marshaled)
return &response

View File

@@ -8,12 +8,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type handler struct {
@@ -21,43 +21,43 @@ type handler struct {
version *version.Version
}
func Handler(cfg configuration.Configuration, version *version.Version) capsulewebhook.Handler {
func Handler(cfg configuration.Configuration, version *version.Version) handlers.Handler {
return &handler{
cfg: cfg,
version: version,
}
}
func (h *handler) OnCreate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (h *handler) OnCreate(client client.Client, decoder admission.Decoder, _ events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.mutate(ctx, req, client, decoder, recorder)
return h.mutate(ctx, req, client, decoder)
}
}
func (h *handler) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (h *handler) OnDelete(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *handler) OnUpdate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (h *handler) OnUpdate(client client.Client, decoder admission.Decoder, _ events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.mutate(ctx, req, client, decoder, recorder)
return h.mutate(ctx, req, client, decoder)
}
}
func (h *handler) mutate(ctx context.Context, req admission.Request, c client.Client, decoder admission.Decoder, recorder record.EventRecorder) *admission.Response {
func (h *handler) mutate(ctx context.Context, req admission.Request, c client.Client, decoder admission.Decoder) *admission.Response {
var response *admission.Response
switch req.Resource {
case metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}:
response = mutatePodDefaults(ctx, req, c, decoder, recorder, req.Namespace)
response = mutatePodDefaults(ctx, req, c, decoder, req.Namespace)
case metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"}:
response = mutatePVCDefaults(ctx, req, c, decoder, recorder, req.Namespace)
response = mutatePVCDefaults(ctx, req, c, decoder, req.Namespace)
case metav1.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "ingresses"}, metav1.GroupVersionResource{Group: "networking.k8s.io", Version: "v1beta1", Resource: "ingresses"}:
response = mutateIngressDefaults(ctx, req, h.version, c, decoder, recorder, req.Namespace)
response = mutateIngressDefaults(ctx, req, h.version, c, decoder, req.Namespace)
case metav1.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1", Resource: "gateways"}:
response = mutateGatewayDefaults(ctx, req, c, decoder, recorder, req.Namespace)
response = mutateGatewayDefaults(ctx, req, c, decoder, req.Namespace)
}
if response == nil {

View File

@@ -8,19 +8,18 @@ import (
"encoding/json"
"net/http"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsuleingress "github.com/projectcapsule/capsule/internal/webhook/ingress"
"github.com/projectcapsule/capsule/internal/webhook/utils"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
)
func mutateIngressDefaults(ctx context.Context, req admission.Request, version *version.Version, c client.Client, decoder admission.Decoder, recorder record.EventRecorder, namespace string) *admission.Response {
func mutateIngressDefaults(ctx context.Context, req admission.Request, version *version.Version, c client.Client, decoder admission.Decoder, namespace string) *admission.Response {
ingress, err := capsuleingress.FromRequest(req, decoder)
if err != nil {
return utils.ErroredResponse(err)
@@ -51,7 +50,7 @@ func mutateIngressDefaults(ctx context.Context, req admission.Request, version *
if ingressClassName := ingress.IngressClass(); ingressClassName != nil && *ingressClassName != allowed.Default {
if ingressClass, err = utils.GetIngressClassByName(ctx, version, c, ingressClassName); err != nil && !k8serrors.IsNotFound(err) {
response := admission.Denied(NewIngressClassError(*ingressClassName, err).Error())
response := admission.Denied(caperrors.NewIngressClassError(*ingressClassName, err).Error())
return &response
}
@@ -72,8 +71,6 @@ func mutateIngressDefaults(ctx context.Context, req admission.Request, version *
return &response
}
recorder.Eventf(tnt, corev1.EventTypeNormal, "TenantDefault", "Assigned Tenant default Ingress Class %s to %s/%s", allowed.Default, ingress.Name(), ingress.Namespace())
response := admission.PatchResponseFromRaw(req.Object.Raw, marshaled)
return &response

View File

@@ -10,17 +10,17 @@ import (
corev1 "k8s.io/api/core/v1"
schedulev1 "k8s.io/api/scheduling/v1"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/utils/tenant"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
"github.com/projectcapsule/capsule/pkg/tenant"
)
func mutatePodDefaults(ctx context.Context, req admission.Request, c client.Client, decoder admission.Decoder, recorder record.EventRecorder, namespace string) *admission.Response {
func mutatePodDefaults(ctx context.Context, req admission.Request, c client.Client, decoder admission.Decoder, namespace string) *admission.Response {
var pod corev1.Pod
if err := decoder.Decode(req, &pod); err != nil {
return utils.ErroredResponse(err)
@@ -40,23 +40,9 @@ func mutatePodDefaults(ctx context.Context, req admission.Request, c client.Clie
pcMutated, pcErr := handlePriorityClassDefault(ctx, c, tnt.Spec.PriorityClasses, &pod)
if pcErr != nil {
return utils.ErroredResponse(pcErr)
} else if pcMutated {
defer func() {
if err == nil {
recorder.Eventf(tnt, corev1.EventTypeNormal, "TenantDefault", "Assigned Tenant default Priority Class %s to %s/%s", tnt.Spec.PriorityClasses.Default, pod.Namespace, pod.Name)
}
}()
}
rcMutated := handleRuntimeClassDefault(tnt.Spec.RuntimeClasses, &pod)
if rcMutated {
defer func() {
if err == nil {
recorder.Eventf(tnt, corev1.EventTypeNormal, "TenantDefault", "Assigned Tenant default Runtime Class %s to %s/%s", tnt.Spec.RuntimeClasses.Default, pod.Namespace, pod.Name)
}
}()
}
if !rcMutated && !pcMutated {
return nil
}
@@ -104,7 +90,7 @@ func handlePriorityClassDefault(ctx context.Context, c client.Client, allowed *a
cpc, err = utils.GetPriorityClassByName(ctx, c, priorityClassPod)
// Should not happen, since API already checks if PC present
if err != nil {
return false, NewPriorityClassError(priorityClassPod, err)
return false, caperrors.NewPriorityClassError(priorityClassPod, err)
}
} else {
mutated = true

View File

@@ -10,16 +10,16 @@ import (
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/utils/tenant"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
"github.com/projectcapsule/capsule/pkg/tenant"
)
func mutatePVCDefaults(ctx context.Context, req admission.Request, c client.Client, decoder admission.Decoder, recorder record.EventRecorder, namespace string) *admission.Response {
func mutatePVCDefaults(ctx context.Context, req admission.Request, c client.Client, decoder admission.Decoder, namespace string) *admission.Response {
var err error
pvc := &corev1.PersistentVolumeClaim{}
@@ -53,7 +53,7 @@ func mutatePVCDefaults(ctx context.Context, req admission.Request, c client.Clie
if storageClassName := pvc.Spec.StorageClassName; storageClassName != nil && *storageClassName != allowed.Default {
csc, err = utils.GetStorageClassByName(ctx, c, *storageClassName)
if err != nil && !k8serrors.IsNotFound(err) {
response := admission.Denied(NewStorageClassError(*storageClassName, err).Error())
response := admission.Denied(caperrors.NewStorageClassError(*storageClassName, err).Error())
return &response
}
@@ -72,8 +72,6 @@ func mutatePVCDefaults(ctx context.Context, req admission.Request, c client.Clie
return utils.ErroredResponse(err)
}
recorder.Eventf(tnt, corev1.EventTypeNormal, "TenantDefault", "Assigned Tenant default Storage Class %s to %s/%s", allowed.Default, pvc.Namespace, pvc.Name)
response := admission.PatchResponseFromRaw(req.Object.Raw, marshaled)
return &response

View File

@@ -10,22 +10,24 @@ import (
corev1 "k8s.io/api/core/v1"
resources "k8s.io/api/resource/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/utils/tenant"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
"github.com/projectcapsule/capsule/pkg/tenant"
)
type deviceClass struct{}
func DeviceClass() capsulewebhook.Handler {
func DeviceClass() handlers.Handler {
return &deviceClass{}
}
func (h *deviceClass) OnCreate(c client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (h *deviceClass) OnCreate(c client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
switch res := req.Kind.Kind; res {
case "ResourceClaim":
@@ -48,19 +50,19 @@ func (h *deviceClass) OnCreate(c client.Client, decoder admission.Decoder, recor
}
}
func (h *deviceClass) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (h *deviceClass) OnDelete(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *deviceClass) OnUpdate(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (h *deviceClass) OnUpdate(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *deviceClass) validateResourceRequest(ctx context.Context, c client.Client, _ admission.Decoder, recorder record.EventRecorder, req admission.Request, namespace string, requests []resources.DeviceRequest) *admission.Response {
func (h *deviceClass) validateResourceRequest(ctx context.Context, c client.Client, _ admission.Decoder, recorder events.EventRecorder, req admission.Request, namespace string, requests []resources.DeviceRequest) *admission.Response {
tnt, err := tenant.TenantByStatusNamespace(ctx, c, namespace)
if err != nil {
return utils.ErroredResponse(err)
@@ -84,9 +86,9 @@ func (h *deviceClass) validateResourceRequest(ctx context.Context, c client.Clie
}
if dc == nil {
recorder.Eventf(tnt, corev1.EventTypeWarning, "MissingDeviceClass", "%s %s/%s is missing DeviceClass", req.Kind.Kind, req.Namespace, req.Name)
recorder.Eventf(tnt, dc, corev1.EventTypeWarning, evt.ReasonMissingDeviceClass, evt.ActionValidationDenied, "%s %s/%s is missing DeviceClass", req.Kind.Kind, req.Namespace, req.Name)
response := admission.Denied(NewDeviceClassUndefined(*allowed).Error())
response := admission.Denied(caperrors.NewDeviceClassUndefined(*allowed).Error())
return &response
}
@@ -97,9 +99,9 @@ func (h *deviceClass) validateResourceRequest(ctx context.Context, c client.Clie
case allowed.Match(dc.Name) || selector:
return nil
default:
recorder.Eventf(tnt, corev1.EventTypeWarning, "ForbiddenDeviceClass", "%s %s/%s DeviceClass %s is forbidden for the current Tenant", req.Kind.Kind, req.Namespace, req.Name, &dc)
recorder.Eventf(tnt, dc, corev1.EventTypeWarning, evt.ReasonForbiddenDeviceClass, evt.ActionValidationDenied, "%s %s/%s DeviceClass %s is forbidden for the current Tenant", req.Kind.Kind, req.Namespace, req.Name, &dc)
response := admission.Denied(NewDeviceClassForbidden(dc.Name, *allowed).Error())
response := admission.Denied(caperrors.NewDeviceClassForbidden(dc.Name, *allowed).Error())
return &response
}

View File

@@ -1,43 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package gateway
import (
"fmt"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/api"
)
type gatewayClassForbiddenError struct {
gatewayClassName string
spec api.DefaultAllowedListSpec
}
func NewGatewayClassForbidden(class string, spec api.DefaultAllowedListSpec) error {
return &gatewayClassForbiddenError{
gatewayClassName: class,
spec: spec,
}
}
func (i gatewayClassForbiddenError) Error() string {
err := fmt.Sprintf("Gateway Class %s is forbidden for the current Tenant: ", i.gatewayClassName)
return utils.DefaultAllowedValuesErrorMessage(i.spec, err)
}
type gatewayClassUndefinedError struct {
spec api.DefaultAllowedListSpec
}
func NewGatewayClassUndefined(spec api.DefaultAllowedListSpec) error {
return &gatewayClassUndefinedError{
spec: spec,
}
}
func (i gatewayClassUndefinedError) Error() string {
return utils.DefaultAllowedValuesErrorMessage(i.spec, "No gateway Class is forbidden for the current Tenant. Specify a gateway Class which is allowed within the Tenant: ")
}

View File

@@ -9,46 +9,48 @@ import (
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
gatewayv1 "sigs.k8s.io/gateway-api/apis/v1"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/configuration"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type class struct {
configuration configuration.Configuration
}
func Class(configuration configuration.Configuration) capsulewebhook.Handler {
func Class(configuration configuration.Configuration) handlers.Handler {
return &class{
configuration: configuration,
}
}
func (r *class) OnCreate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (r *class) OnCreate(client client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return r.validate(ctx, client, req, decoder, recorder)
}
}
func (r *class) OnUpdate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (r *class) OnUpdate(client client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return r.validate(ctx, client, req, decoder, recorder)
}
}
func (r *class) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (r *class) OnDelete(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (r *class) validate(ctx context.Context, client client.Client, req admission.Request, decoder admission.Decoder, recorder record.EventRecorder) *admission.Response {
func (r *class) validate(ctx context.Context, client client.Client, req admission.Request, decoder admission.Decoder, recorder events.EventRecorder) *admission.Response {
gatewayObj := &gatewayv1.Gateway{}
if err := decoder.Decode(req, gatewayObj); err != nil {
return utils.ErroredResponse(err)
@@ -77,9 +79,9 @@ func (r *class) validate(ctx context.Context, client client.Client, req admissio
}
if gatewayClass == nil {
recorder.Eventf(tnt, corev1.EventTypeWarning, "MissingGatewayClass", "Gateway %s/%s is missing GatewayClass", req.Namespace, req.Name)
recorder.Eventf(tnt, gatewayClass, corev1.EventTypeWarning, evt.ReasonMissingGatewayClass, evt.ActionValidationDenied, "Gateway %s/%s is missing GatewayClass", req.Namespace, req.Name)
response := admission.Denied(NewGatewayClassUndefined(*allowed).Error())
response := admission.Denied(caperrors.NewGatewayClassUndefined(*allowed).Error())
return &response
}
@@ -106,9 +108,9 @@ func (r *class) validate(ctx context.Context, client client.Client, req admissio
case allowed.Match(gatewayClass.Name) || selector:
return nil
default:
recorder.Eventf(tnt, corev1.EventTypeWarning, "ForbiddenGatewayClass", "Gateway %s/%s GatewayClass %s is forbidden for the current Tenant", req.Namespace, req.Name, &gatewayClass)
recorder.Eventf(tnt, gatewayClass, corev1.EventTypeWarning, evt.ReasonForbiddenGatewayClass, evt.ActionValidationDenied, "Gateway %s/%s GatewayClass %s is forbidden for the current Tenant", req.Namespace, req.Name, &gatewayClass)
response := admission.Denied(NewGatewayClassForbidden(gatewayObj.Name, *allowed).Error())
response := admission.Denied(caperrors.NewGatewayClassForbidden(gatewayObj.Name, *allowed).Error())
return &response
}

View File

@@ -1,40 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package webhook
import (
"context"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
)
type Func func(ctx context.Context, req admission.Request) *admission.Response
type Handler interface {
OnCreate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) Func
OnDelete(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) Func
OnUpdate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) Func
}
type HanderWithTenant interface {
OnCreate(c client.Client, decoder admission.Decoder, recorder record.EventRecorder, tnt *capsulev1beta2.Tenant) Func
OnUpdate(c client.Client, decoder admission.Decoder, recorder record.EventRecorder, tnt *capsulev1beta2.Tenant) Func
OnDelete(c client.Client, decoder admission.Decoder, recorder record.EventRecorder, tnt *capsulev1beta2.Tenant) Func
}
type TypedHandler[T client.Object] interface {
OnCreate(c client.Client, obj T, decoder admission.Decoder, recorder record.EventRecorder) Func
OnUpdate(c client.Client, obj T, old T, decoder admission.Decoder, recorder record.EventRecorder) Func
OnDelete(c client.Client, obj T, decoder admission.Decoder, recorder record.EventRecorder) Func
}
type TypedHandlerWithTenant[T client.Object] interface {
OnCreate(c client.Client, obj T, decoder admission.Decoder, recorder record.EventRecorder, tnt *capsulev1beta2.Tenant) Func
OnUpdate(c client.Client, obj T, old T, decoder admission.Decoder, recorder record.EventRecorder, tnt *capsulev1beta2.Tenant) Func
OnDelete(c client.Client, obj T, decoder admission.Decoder, recorder record.EventRecorder, tnt *capsulev1beta2.Tenant) Func
}

View File

@@ -10,14 +10,16 @@ import (
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/configuration"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type class struct {
@@ -25,32 +27,39 @@ type class struct {
version *version.Version
}
func Class(configuration configuration.Configuration, version *version.Version) capsulewebhook.Handler {
func Class(configuration configuration.Configuration, version *version.Version) handlers.Handler {
return &class{
configuration: configuration,
version: version,
}
}
func (r *class) OnCreate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (r *class) OnCreate(client client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return r.validate(ctx, r.version, client, req, decoder, recorder)
}
}
func (r *class) OnUpdate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (r *class) OnUpdate(client client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return r.validate(ctx, r.version, client, req, decoder, recorder)
}
}
func (r *class) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (r *class) OnDelete(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (r *class) validate(ctx context.Context, version *version.Version, client client.Client, req admission.Request, decoder admission.Decoder, recorder record.EventRecorder) *admission.Response {
func (r *class) validate(
ctx context.Context,
version *version.Version,
client client.Client,
req admission.Request,
decoder admission.Decoder,
recorder events.EventRecorder,
) *admission.Response {
ingress, err := FromRequest(req, decoder)
if err != nil {
return utils.ErroredResponse(err)
@@ -76,9 +85,9 @@ func (r *class) validate(ctx context.Context, version *version.Version, client c
ingressClass := ingress.IngressClass()
if ingressClass == nil {
recorder.Eventf(tnt, corev1.EventTypeWarning, "MissingIngressClass", "Ingress %s/%s is missing IngressClass", req.Namespace, req.Name)
recorder.Eventf(tnt, nil, corev1.EventTypeWarning, evt.ReasonMissingIngressClass, evt.ActionValidationDenied, "Ingress %s/%s is missing IngressClass", req.Namespace, req.Name)
response := admission.Denied(NewIngressClassUndefined(*allowed).Error())
response := admission.Denied(caperrors.NewIngressClassUndefined(*allowed).Error())
return &response
}
@@ -106,9 +115,9 @@ func (r *class) validate(ctx context.Context, version *version.Version, client c
case allowed.Match(*ingressClass) || selector:
return nil
default:
recorder.Eventf(tnt, corev1.EventTypeWarning, "ForbiddenIngressClass", "Ingress %s/%s IngressClass %s is forbidden for the current Tenant", req.Namespace, req.Name, &ingressClass)
recorder.Eventf(tnt, nil, corev1.EventTypeWarning, evt.ReasonForbiddenIngressClass, evt.ActionValidationDenied, "Ingress %s/%s IngressClass %s is forbidden for the current Tenant", req.Namespace, req.Name, &ingressClass)
response := admission.Denied(NewIngressClassForbidden(*ingressClass, *allowed).Error())
response := admission.Denied(caperrors.NewIngressClassForbidden(*ingressClass, *allowed).Error())
return &response
}

View File

@@ -14,45 +14,47 @@ import (
networkingv1beta1 "k8s.io/api/networking/v1beta1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/indexer/ingress"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
"github.com/projectcapsule/capsule/pkg/runtime/indexers/ingress"
)
type collision struct {
configuration configuration.Configuration
}
func Collision(configuration configuration.Configuration) capsulewebhook.Handler {
func Collision(configuration configuration.Configuration) handlers.Handler {
return &collision{configuration: configuration}
}
func (r *collision) OnCreate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (r *collision) OnCreate(client client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return r.validate(ctx, client, req, decoder, recorder)
}
}
func (r *collision) OnUpdate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (r *collision) OnUpdate(client client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return r.validate(ctx, client, req, decoder, recorder)
}
}
func (r *collision) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (r *collision) OnDelete(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (r *collision) validate(ctx context.Context, client client.Client, req admission.Request, decoder admission.Decoder, recorder record.EventRecorder) *admission.Response {
func (r *collision) validate(ctx context.Context, client client.Client, req admission.Request, decoder admission.Decoder, recorder events.EventRecorder) *admission.Response {
ing, err := FromRequest(req, decoder)
if err != nil {
return utils.ErroredResponse(err)
@@ -73,9 +75,9 @@ func (r *collision) validate(ctx context.Context, client client.Client, req admi
return nil
}
var collisionErr *ingressHostnameCollisionError
var collisionErr *caperrors.IngressHostnameCollisionError
if errors.As(err, &collisionErr) {
recorder.Eventf(tenant, corev1.EventTypeWarning, "IngressHostnameCollision", "Ingress %s/%s hostname is colliding", ing.Namespace(), ing.Name())
recorder.Eventf(tenant, nil, corev1.EventTypeWarning, evt.ReasonIngressHostnameCollision, evt.ActionValidationDenied, "Ingress %s/%s hostname is colliding", ing.Namespace(), ing.Name())
}
response := admission.Denied(err.Error())
@@ -151,7 +153,7 @@ func (r *collision) validateCollision(ctx context.Context, clt client.Client, in
fallthrough
default:
return NewIngressHostnameCollision(hostname)
return caperrors.NewIngressHostnameCollision(hostname)
}
case *networkingv1.IngressList:
for index, item := range list.Items {
@@ -170,7 +172,7 @@ func (r *collision) validateCollision(ctx context.Context, clt client.Client, in
fallthrough
default:
return NewIngressHostnameCollision(hostname)
return caperrors.NewIngressHostnameCollision(hostname)
}
case *networkingv1beta1.IngressList:
for index, item := range list.Items {
@@ -189,7 +191,7 @@ func (r *collision) validateCollision(ctx context.Context, clt client.Client, in
fallthrough
default:
return NewIngressHostnameCollision(hostname)
return caperrors.NewIngressHostnameCollision(hostname)
}
}
}

View File

@@ -10,43 +10,45 @@ import (
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/configuration"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type hostnames struct {
configuration configuration.Configuration
}
func Hostnames(configuration configuration.Configuration) capsulewebhook.Handler {
func Hostnames(configuration configuration.Configuration) handlers.Handler {
return &hostnames{configuration: configuration}
}
func (r *hostnames) OnCreate(c client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (r *hostnames) OnCreate(c client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return r.validate(ctx, c, req, decoder, recorder)
}
}
func (r *hostnames) OnUpdate(c client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (r *hostnames) OnUpdate(c client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return r.validate(ctx, c, req, decoder, recorder)
}
}
func (r *hostnames) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (r *hostnames) OnDelete(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (r *hostnames) validate(ctx context.Context, client client.Client, req admission.Request, decoder admission.Decoder, recorder record.EventRecorder) *admission.Response {
func (r *hostnames) validate(ctx context.Context, client client.Client, req admission.Request, decoder admission.Decoder, recorder events.EventRecorder) *admission.Response {
ingress, err := FromRequest(req, decoder)
if err != nil {
return utils.ErroredResponse(err)
@@ -67,9 +69,9 @@ func (r *hostnames) validate(ctx context.Context, client client.Client, req admi
for hostname := range ingress.HostnamePathsPairs() {
if len(hostname) == 0 {
recorder.Eventf(tenant, corev1.EventTypeWarning, "IngressHostnameEmpty", "Ingress %s/%s hostname is empty", ingress.Namespace(), ingress.Name())
recorder.Eventf(tenant, nil, corev1.EventTypeWarning, evt.ReasonIngressHostnameEmpty, evt.ActionValidationDenied, "Ingress %s/%s hostname is empty", ingress.Namespace(), ingress.Name())
return utils.ErroredResponse(NewEmptyIngressHostname(*tenant.Spec.IngressOptions.AllowedHostnames))
return utils.ErroredResponse(caperrors.NewEmptyIngressHostname(*tenant.Spec.IngressOptions.AllowedHostnames))
}
hostnameList.Insert(hostname)
@@ -79,9 +81,9 @@ func (r *hostnames) validate(ctx context.Context, client client.Client, req admi
return nil
}
var hostnameNotValidErr *ingressHostnameNotValidError
var hostnameNotValidErr *caperrors.IngressHostnameNotValidError
if errors.As(err, &hostnameNotValidErr) {
recorder.Eventf(tenant, corev1.EventTypeWarning, "IngressHostnameNotValid", "Ingress %s/%s hostname is not valid", ingress.Namespace(), ingress.Name())
recorder.Eventf(tenant, nil, corev1.EventTypeWarning, evt.ReasonIngressHostnameNotValid, evt.ActionValidationDenied, "Ingress %s/%s hostname is not valid", ingress.Namespace(), ingress.Name())
response := admission.Denied(err.Error())
@@ -129,7 +131,7 @@ func (r *hostnames) validateHostnames(tenant capsulev1beta2.Tenant, hostnames se
}
if !valid && !matched {
return NewIngressHostnamesNotValid(invalidHostnames, notMatchingHostnames, *tenant.Spec.IngressOptions.AllowedHostnames)
return caperrors.NewIngressHostnamesNotValid(invalidHostnames, notMatchingHostnames, *tenant.Spec.IngressOptions.AllowedHostnames)
}
return nil

View File

@@ -10,40 +10,41 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type wildcard struct{}
func Wildcard() capsulewebhook.Handler {
func Wildcard() handlers.Handler {
return &wildcard{}
}
func (h *wildcard) OnCreate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (h *wildcard) OnCreate(client client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.validate(ctx, client, req, recorder, decoder)
}
}
func (h *wildcard) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (h *wildcard) OnDelete(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *wildcard) OnUpdate(client client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (h *wildcard) OnUpdate(client client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.validate(ctx, client, req, recorder, decoder)
}
}
func (h *wildcard) validate(ctx context.Context, clt client.Client, req admission.Request, recorder record.EventRecorder, decoder admission.Decoder) *admission.Response {
func (h *wildcard) validate(ctx context.Context, clt client.Client, req admission.Request, recorder events.EventRecorder, decoder admission.Decoder) *admission.Response {
tntList := &capsulev1beta2.TenantList{}
if err := clt.List(ctx, tntList, client.MatchingFieldsSelector{
@@ -70,7 +71,7 @@ func (h *wildcard) validate(ctx context.Context, clt client.Client, req admissio
// Check if one of the host has wildcard.
if strings.HasPrefix(host, "*") {
// In case of wildcard, generate an event and then return.
recorder.Eventf(&tnt, corev1.EventTypeWarning, "Wildcard denied", "%s %s/%s cannot be %s", req.Kind.String(), req.Namespace, req.Name, strings.ToLower(string(req.Operation)))
recorder.Eventf(&tnt, nil, corev1.EventTypeWarning, evt.ReasonWildcardDenied, evt.ActionValidationDenied, "%s %s/%s cannot be %s", req.Kind.String(), req.Namespace, req.Name, strings.ToLower(string(req.Operation)))
response := admission.Denied(fmt.Sprintf("Wildcard denied for tenant %s\n", tnt.GetName()))

View File

@@ -0,0 +1,45 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package misc
import (
"context"
"fmt"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type managedValidatingHandler struct{}
func ManagedValidatingHandler() handlers.Handler {
return &managedValidatingHandler{}
}
func (h *managedValidatingHandler) OnCreate(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *managedValidatingHandler) OnDelete(client client.Client, _ admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.handler(ctx, client, req, recorder)
}
}
func (h *managedValidatingHandler) OnUpdate(client client.Client, _ admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.handler(ctx, client, req, recorder)
}
}
func (h *managedValidatingHandler) handler(ctx context.Context, clt client.Client, req admission.Request, recorder events.EventRecorder) *admission.Response {
response := admission.Denied(fmt.Sprintf("resource %s is managed by capsule and can not by modified by capsule users", req.Name))
return &response
}

View File

@@ -8,35 +8,35 @@ import (
"encoding/json"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/api/meta"
"github.com/projectcapsule/capsule/pkg/utils/tenant"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
"github.com/projectcapsule/capsule/pkg/tenant"
)
type tenantAssignmentHandler struct{}
func TenantAssignmentHandler() capsulewebhook.Handler {
func TenantAssignmentHandler() handlers.Handler {
return &tenantAssignmentHandler{}
}
func (r *tenantAssignmentHandler) OnCreate(c client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func {
func (r *tenantAssignmentHandler) OnCreate(c client.Client, decoder admission.Decoder, _ events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return r.handle(ctx, c, decoder, req)
}
}
func (r *tenantAssignmentHandler) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (r *tenantAssignmentHandler) OnDelete(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (r *tenantAssignmentHandler) OnUpdate(c client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func {
func (r *tenantAssignmentHandler) OnUpdate(c client.Client, decoder admission.Decoder, _ events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return r.handle(ctx, c, decoder, req)
}
@@ -66,11 +66,23 @@ func (r *tenantAssignmentHandler) handle(ctx context.Context, c client.Client, d
labels = map[string]string{}
}
if currentValue, exists := labels[meta.ManagedByCapsuleLabel]; exists && currentValue == tnt.GetName() {
want := tnt.GetName()
managedOK := labels[meta.ManagedByCapsuleLabel] == want
tenantOK := labels[meta.NewTenantLabel] == want
if managedOK && tenantOK {
return nil
}
labels[meta.ManagedByCapsuleLabel] = tnt.GetName()
if !managedOK {
labels[meta.ManagedByCapsuleLabel] = want
}
if !tenantOK {
labels[meta.NewTenantLabel] = want
}
obj.SetLabels(labels)
marshaledObj, err := json.Marshal(obj)

View File

@@ -11,14 +11,14 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/pkg/api/meta"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
capsuleutils "github.com/projectcapsule/capsule/pkg/utils"
)
@@ -26,19 +26,19 @@ type cordoningLabelHandler struct {
cfg configuration.Configuration
}
func CordoningLabelHandler(cfg configuration.Configuration) capsulewebhook.TypedHandler[*corev1.Namespace] {
func CordoningLabelHandler(cfg configuration.Configuration) handlers.TypedHandler[*corev1.Namespace] {
return &cordoningLabelHandler{
cfg: cfg,
}
}
func (h *cordoningLabelHandler) OnCreate(client.Client, *corev1.Namespace, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (h *cordoningLabelHandler) OnCreate(client.Client, *corev1.Namespace, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *cordoningLabelHandler) OnDelete(client.Client, *corev1.Namespace, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (h *cordoningLabelHandler) OnDelete(client.Client, *corev1.Namespace, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
@@ -49,8 +49,8 @@ func (h *cordoningLabelHandler) OnUpdate(
ns *corev1.Namespace,
old *corev1.Namespace,
decoder admission.Decoder,
_ record.EventRecorder,
) capsulewebhook.Func {
_ events.EventRecorder,
) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.handle(ctx, c, req, ns)
}

View File

@@ -7,18 +7,19 @@ import (
"context"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/utils/tenant"
"github.com/projectcapsule/capsule/pkg/utils/users"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
"github.com/projectcapsule/capsule/pkg/tenant"
"github.com/projectcapsule/capsule/pkg/users"
)
func NamespaceHandler(configuration configuration.Configuration, handlers ...webhook.TypedHandler[*corev1.Namespace]) webhook.Handler {
func NamespaceHandler(configuration configuration.Configuration, handlers ...handlers.TypedHandler[*corev1.Namespace]) handlers.Handler {
return &handler{
cfg: configuration,
handlers: handlers,
@@ -27,10 +28,10 @@ func NamespaceHandler(configuration configuration.Configuration, handlers ...web
type handler struct {
cfg configuration.Configuration
handlers []webhook.TypedHandler[*corev1.Namespace]
handlers []handlers.TypedHandler[*corev1.Namespace]
}
func (h *handler) OnCreate(c client.Client, decoder admission.Decoder, recorder record.EventRecorder) webhook.Func {
func (h *handler) OnCreate(c client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
userIsAdmin := users.IsAdminUser(req, h.cfg.Administrators())
@@ -62,13 +63,13 @@ func (h *handler) OnCreate(c client.Client, decoder admission.Decoder, recorder
}
}
func (h *handler) OnDelete(c client.Client, decoder admission.Decoder, recorder record.EventRecorder) webhook.Func {
func (h *handler) OnDelete(c client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *handler) OnUpdate(c client.Client, decoder admission.Decoder, recorder record.EventRecorder) webhook.Func {
func (h *handler) OnUpdate(c client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
userIsAdmin := users.IsAdminUser(req, h.cfg.Administrators())
@@ -105,7 +106,7 @@ func (h *handler) OnUpdate(c client.Client, decoder admission.Decoder, recorder
}
} else {
if owned := tenant.NamespaceIsOwned(ctx, c, h.cfg, oldNs, tnt, req.UserInfo); !owned {
recorder.Eventf(oldNs, corev1.EventTypeWarning, "NamespacePatch", "Namespace %s can not be patched", oldNs.GetName())
recorder.Eventf(tnt, oldNs, corev1.EventTypeWarning, "NamespacePatch", evt.ActionValidationDenied, "Namespace %s can not be patched", oldNs.GetName())
response := admission.Denied("Denied patch request for this namespace")

View File

@@ -10,27 +10,27 @@ import (
"net/http"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/utils/tenant"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
"github.com/projectcapsule/capsule/pkg/tenant"
)
type metadataHandler struct {
cfg configuration.Configuration
}
func MetadataHandler(cfg configuration.Configuration) capsulewebhook.TypedHandler[*corev1.Namespace] {
func MetadataHandler(cfg configuration.Configuration) handlers.TypedHandler[*corev1.Namespace] {
return &metadataHandler{
cfg: cfg,
}
}
func (h *metadataHandler) OnCreate(client client.Client, ns *corev1.Namespace, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (h *metadataHandler) OnCreate(client client.Client, ns *corev1.Namespace, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
tnt, errResponse := utils.GetNamespaceTenant(ctx, client, ns, req, h.cfg, recorder)
if errResponse != nil {
@@ -73,13 +73,13 @@ func (h *metadataHandler) OnCreate(client client.Client, ns *corev1.Namespace, d
}
}
func (h *metadataHandler) OnDelete(client.Client, *corev1.Namespace, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (h *metadataHandler) OnDelete(client.Client, *corev1.Namespace, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *metadataHandler) OnUpdate(c client.Client, newNs *corev1.Namespace, oldNs *corev1.Namespace, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (h *metadataHandler) OnUpdate(c client.Client, newNs *corev1.Namespace, oldNs *corev1.Namespace, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
tnt, errResponse := utils.GetNamespaceTenant(ctx, c, oldNs, req, h.cfg, recorder)
if errResponse != nil {

View File

@@ -11,30 +11,31 @@ import (
authenticationv1 "k8s.io/api/authentication/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/api/meta"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/utils/tenant"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
"github.com/projectcapsule/capsule/pkg/tenant"
)
type ownerReferenceHandler struct {
cfg configuration.Configuration
}
func OwnerReferenceHandler(cfg configuration.Configuration) capsulewebhook.TypedHandler[*corev1.Namespace] {
func OwnerReferenceHandler(cfg configuration.Configuration) handlers.TypedHandler[*corev1.Namespace] {
return &ownerReferenceHandler{
cfg: cfg,
}
}
func (h *ownerReferenceHandler) OnCreate(c client.Client, ns *corev1.Namespace, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (h *ownerReferenceHandler) OnCreate(c client.Client, ns *corev1.Namespace, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
tnt, errResponse := utils.GetNamespaceTenant(ctx, c, ns, req, h.cfg, recorder)
if errResponse != nil {
@@ -69,13 +70,13 @@ func (h *ownerReferenceHandler) OnCreate(c client.Client, ns *corev1.Namespace,
}
}
func (h *ownerReferenceHandler) OnDelete(client.Client, *corev1.Namespace, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (h *ownerReferenceHandler) OnDelete(client.Client, *corev1.Namespace, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *ownerReferenceHandler) OnUpdate(c client.Client, newNs *corev1.Namespace, oldNs *corev1.Namespace, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (h *ownerReferenceHandler) OnUpdate(c client.Client, newNs *corev1.Namespace, oldNs *corev1.Namespace, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
tnt, err := resolveTenantForNamespaceUpdate(ctx, c, h.cfg, oldNs, newNs, req.UserInfo)
if err != nil {
@@ -153,7 +154,7 @@ func assignToTenant(
c client.Client,
tnt *capsulev1beta2.Tenant,
ns *corev1.Namespace,
recorder record.EventRecorder,
recorder events.EventRecorder,
) error {
has, err := controllerutil.HasOwnerReference(ns.OwnerReferences, tnt, c.Scheme())
if err != nil {
@@ -165,12 +166,12 @@ func assignToTenant(
}
if err := controllerutil.SetOwnerReference(tnt, ns, c.Scheme()); err != nil {
recorder.Eventf(tnt, corev1.EventTypeWarning, "Error", "Namespace %s cannot be assigned to the desired Tenant", ns.GetName())
recorder.Eventf(ns, tnt, corev1.EventTypeWarning, evt.ReasonNamespaceHijack, evt.ActionValidationDenied, "Namespace %s cannot be assigned to the desired tenant %s", ns.GetName(), tnt.GetName())
return err
}
recorder.Eventf(tnt, corev1.EventTypeNormal, "NamespaceCreationWebhook", "Namespace %s has been assigned to the desired Tenant", ns.GetName())
recorder.Eventf(ns, tnt, corev1.EventTypeNormal, evt.ReasonTenantAssigned, evt.ActionValidationDenied, "Namespace %s has been assigned to the desired tenant %s", ns.GetName(), tnt.GetName())
return nil
}

View File

@@ -7,21 +7,22 @@ import (
"context"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/utils/users"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
"github.com/projectcapsule/capsule/pkg/users"
)
type freezedHandler struct {
cfg configuration.Configuration
}
func FreezeHandler(configuration configuration.Configuration) capsulewebhook.TypedHandlerWithTenant[*corev1.Namespace] {
func FreezeHandler(configuration configuration.Configuration) handlers.TypedHandlerWithTenant[*corev1.Namespace] {
return &freezedHandler{cfg: configuration}
}
@@ -29,12 +30,12 @@ func (h *freezedHandler) OnCreate(
c client.Client,
ns *corev1.Namespace,
decoder admission.Decoder,
recorder record.EventRecorder,
recorder events.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
if tnt.Spec.Cordoned {
recorder.Eventf(tnt, corev1.EventTypeWarning, "TenantFreezed", "Namespace %s cannot be attached, the current Tenant is freezed", ns.GetName())
recorder.Eventf(tnt, ns, corev1.EventTypeWarning, evt.ReasonCordoning, evt.ActionValidationDenied, "Namespace %s cannot be attached, the current Tenant is freezed", ns.GetName())
response := admission.Denied("the selected Tenant is freezed")
@@ -49,12 +50,12 @@ func (h *freezedHandler) OnDelete(
c client.Client,
ns *corev1.Namespace,
decoder admission.Decoder,
recorder record.EventRecorder,
recorder events.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
if tnt.Spec.Cordoned && users.IsCapsuleUser(ctx, c, h.cfg, req.UserInfo.Username, req.UserInfo.Groups) {
recorder.Eventf(tnt, corev1.EventTypeWarning, "TenantFreezed", "Namespace %s cannot be deleted, the current Tenant is freezed", req.Name)
recorder.Eventf(tnt, ns, corev1.EventTypeWarning, "TenantFreezed", "Denied", "Namespace %s cannot be deleted, the current Tenant is freezed", req.Name)
response := admission.Denied("the selected Tenant is freezed")
@@ -70,12 +71,12 @@ func (h *freezedHandler) OnUpdate(
ns *corev1.Namespace,
old *corev1.Namespace,
decoder admission.Decoder,
recorder record.EventRecorder,
recorder events.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
if tnt.Spec.Cordoned && users.IsCapsuleUser(ctx, c, h.cfg, req.UserInfo.Username, req.UserInfo.Groups) {
recorder.Eventf(tnt, corev1.EventTypeWarning, "TenantFreezed", "Namespace %s cannot be updated, the current Tenant is freezed", ns.GetName())
recorder.Eventf(tnt, ns, corev1.EventTypeWarning, "TenantFreezed", "Denied", "Namespace %s cannot be updated, the current Tenant is freezed", ns.GetName())
response := admission.Denied("the selected Tenant is freezed")

View File

@@ -8,32 +8,32 @@ import (
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
"github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/api/meta"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/utils/tenant"
"github.com/projectcapsule/capsule/pkg/utils/users"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
"github.com/projectcapsule/capsule/pkg/tenant"
"github.com/projectcapsule/capsule/pkg/users"
)
func NamespaceHandler(configuration configuration.Configuration, handlers ...webhook.TypedHandlerWithTenant[*corev1.Namespace]) webhook.Handler {
func NamespaceHandler(configuration configuration.Configuration, hndlers ...handlers.TypedHandlerWithTenant[*corev1.Namespace]) handlers.Handler {
return &handler{
cfg: configuration,
handlers: handlers,
handlers: hndlers,
}
}
type handler struct {
cfg configuration.Configuration
handlers []webhook.TypedHandlerWithTenant[*corev1.Namespace]
handlers []handlers.TypedHandlerWithTenant[*corev1.Namespace]
}
func (h *handler) OnCreate(c client.Client, decoder admission.Decoder, recorder record.EventRecorder) webhook.Func {
func (h *handler) OnCreate(c client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
userIsAdmin := users.IsAdminUser(req, h.cfg.Administrators())
@@ -65,7 +65,7 @@ func (h *handler) OnCreate(c client.Client, decoder admission.Decoder, recorder
}
}
func (h *handler) OnDelete(c client.Client, decoder admission.Decoder, recorder record.EventRecorder) webhook.Func {
func (h *handler) OnDelete(c client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
userIsAdmin := users.IsAdminUser(req, h.cfg.Administrators())
@@ -97,7 +97,7 @@ func (h *handler) OnDelete(c client.Client, decoder admission.Decoder, recorder
}
}
func (h *handler) OnUpdate(c client.Client, decoder admission.Decoder, recorder record.EventRecorder) webhook.Func {
func (h *handler) OnUpdate(c client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
userIsAdmin := users.IsAdminUser(req, h.cfg.Administrators())

View File

@@ -8,21 +8,22 @@ import (
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/utils/users"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
"github.com/projectcapsule/capsule/pkg/users"
)
type patchHandler struct {
cfg configuration.Configuration
}
func PatchHandler(configuration configuration.Configuration) capsulewebhook.TypedHandlerWithTenant[*corev1.Namespace] {
func PatchHandler(configuration configuration.Configuration) handlers.TypedHandlerWithTenant[*corev1.Namespace] {
return &patchHandler{cfg: configuration}
}
@@ -30,9 +31,9 @@ func (h *patchHandler) OnCreate(
client.Client,
*corev1.Namespace,
admission.Decoder,
record.EventRecorder,
events.EventRecorder,
*capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
@@ -42,9 +43,9 @@ func (h *patchHandler) OnDelete(
client.Client,
*corev1.Namespace,
admission.Decoder,
record.EventRecorder,
events.EventRecorder,
*capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
@@ -55,9 +56,9 @@ func (h *patchHandler) OnUpdate(
ns *corev1.Namespace,
old *corev1.Namespace,
decoder admission.Decoder,
recorder record.EventRecorder,
recorder events.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
e := fmt.Sprintf("namespace/%s can not be patched", ns.Name)
@@ -65,7 +66,7 @@ func (h *patchHandler) OnUpdate(
return nil
}
recorder.Eventf(ns, corev1.EventTypeWarning, "NamespacePatch", e)
recorder.Eventf(tnt, ns, corev1.EventTypeWarning, evt.ReasonNamespaceHijack, evt.ActionValidationDenied, e)
response := admission.Denied(e)
return &response

View File

@@ -9,20 +9,21 @@ import (
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/pkg/configuration"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type prefixHandler struct {
cfg configuration.Configuration
}
func PrefixHandler(configuration configuration.Configuration) capsulewebhook.TypedHandlerWithTenant[*corev1.Namespace] {
func PrefixHandler(configuration configuration.Configuration) handlers.TypedHandlerWithTenant[*corev1.Namespace] {
return &prefixHandler{
cfg: configuration,
}
@@ -32,9 +33,9 @@ func (h *prefixHandler) OnCreate(
c client.Client,
ns *corev1.Namespace,
decoder admission.Decoder,
recorder record.EventRecorder,
recorder events.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
if exp, _ := h.cfg.ProtectedNamespaceRegexp(); exp != nil {
if matched := exp.MatchString(ns.GetName()); matched {
@@ -50,7 +51,7 @@ func (h *prefixHandler) OnCreate(
}
if e := fmt.Sprintf("%s-%s", tnt.GetName(), ns.GetName()); !strings.HasPrefix(ns.GetName(), fmt.Sprintf("%s-", tnt.GetName())) {
recorder.Eventf(tnt, corev1.EventTypeWarning, "InvalidTenantPrefix", "Namespace %s does not match the expected prefix for the current Tenant", ns.GetName())
recorder.Eventf(tnt, ns, corev1.EventTypeWarning, evt.ReasonInvalidTenantPrefix, evt.ActionValidationDenied, "Namespace %s does not match the expected prefix for the current Tenant", ns.GetName())
response := admission.Denied(fmt.Sprintf("The namespace doesn't match the tenant prefix, expected %s", e))
@@ -67,9 +68,9 @@ func (h *prefixHandler) OnUpdate(
*corev1.Namespace,
*corev1.Namespace,
admission.Decoder,
record.EventRecorder,
events.EventRecorder,
*capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
@@ -79,9 +80,9 @@ func (h *prefixHandler) OnDelete(
client.Client,
*corev1.Namespace,
admission.Decoder,
record.EventRecorder,
events.EventRecorder,
*capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}

View File

@@ -8,17 +8,19 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type quotaHandler struct{}
func QuotaHandler() capsulewebhook.TypedHandlerWithTenant[*corev1.Namespace] {
func QuotaHandler() handlers.TypedHandlerWithTenant[*corev1.Namespace] {
return &quotaHandler{}
}
@@ -26,9 +28,9 @@ func (h *quotaHandler) OnCreate(
c client.Client,
ns *corev1.Namespace,
decoder admission.Decoder,
recorder record.EventRecorder,
recorder events.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.handle(ctx, c, recorder, ns, tnt)
}
@@ -38,9 +40,9 @@ func (h *quotaHandler) OnDelete(
client.Client,
*corev1.Namespace,
admission.Decoder,
record.EventRecorder,
events.EventRecorder,
*capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
@@ -51,9 +53,9 @@ func (h *quotaHandler) OnUpdate(
ns *corev1.Namespace,
_ *corev1.Namespace,
decoder admission.Decoder,
recorder record.EventRecorder,
recorder events.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.handle(ctx, c, recorder, ns, tnt)
}
@@ -62,7 +64,7 @@ func (h *quotaHandler) OnUpdate(
func (h *quotaHandler) handle(
ctx context.Context,
c client.Client,
recorder record.EventRecorder,
recorder events.EventRecorder,
ns *corev1.Namespace,
tnt *capsulev1beta2.Tenant,
) *admission.Response {
@@ -75,9 +77,9 @@ func (h *quotaHandler) handle(
return nil
}
recorder.Eventf(tnt, corev1.EventTypeWarning, "NamespaceQuotaExceded", "Namespace %s cannot be attached, quota exceeded for the current Tenant", ns.GetName())
recorder.Eventf(tnt, ns, corev1.EventTypeWarning, evt.ReasonOverprovision, evt.ActionValidationDenied, "Namespace %s cannot be attached, quota exceeded for the current Tenant", ns.GetName())
response := admission.Denied(NewNamespaceQuotaExceededError().Error())
response := admission.Denied(caperrors.NewNamespaceQuotaExceededError().Error())
return &response
}

View File

@@ -8,18 +8,19 @@ import (
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/pkg/api"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type userMetadataHandler struct{}
func UserMetadataHandler() capsulewebhook.TypedHandlerWithTenant[*corev1.Namespace] {
func UserMetadataHandler() handlers.TypedHandlerWithTenant[*corev1.Namespace] {
return &userMetadataHandler{}
}
@@ -27,15 +28,15 @@ func (h *userMetadataHandler) OnCreate(
c client.Client,
ns *corev1.Namespace,
decoder admission.Decoder,
recorder record.EventRecorder,
recorder events.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
if tnt.Spec.NamespaceOptions != nil {
err := api.ValidateForbidden(ns.Annotations, tnt.Spec.NamespaceOptions.ForbiddenAnnotations)
if err != nil {
err = errors.Wrap(err, "namespace annotations validation failed")
recorder.Eventf(tnt, corev1.EventTypeWarning, api.ForbiddenAnnotationReason, err.Error())
recorder.Eventf(tnt, ns, corev1.EventTypeWarning, evt.ReasonForbiddenAnnotation, evt.ActionValidationDenied, err.Error())
response := admission.Denied(err.Error())
return &response
@@ -44,7 +45,7 @@ func (h *userMetadataHandler) OnCreate(
err = api.ValidateForbidden(ns.Labels, tnt.Spec.NamespaceOptions.ForbiddenLabels)
if err != nil {
err = errors.Wrap(err, "namespace labels validation failed")
recorder.Eventf(tnt, corev1.EventTypeWarning, api.ForbiddenLabelReason, err.Error())
recorder.Eventf(tnt, ns, corev1.EventTypeWarning, evt.ReasonForbiddenLabel, evt.ActionValidationDenied, err.Error())
response := admission.Denied(err.Error())
return &response
@@ -60,16 +61,16 @@ func (h *userMetadataHandler) OnUpdate(
newNs *corev1.Namespace,
oldNs *corev1.Namespace,
decoder admission.Decoder,
recorder record.EventRecorder,
recorder events.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
if len(tnt.Spec.NodeSelector) > 0 {
v, ok := newNs.GetAnnotations()["scheduler.alpha.kubernetes.io/node-selector"]
if !ok {
response := admission.Denied("the node-selector annotation is enforced, cannot be removed")
recorder.Eventf(tnt, corev1.EventTypeWarning, "ForbiddenNodeSelectorDeletion", string(response.Result.Reason))
recorder.Eventf(tnt, oldNs, corev1.EventTypeWarning, "ForbiddenNodeSelectorDeletion", "Denied", string(response.Result.Reason))
return &response
}
@@ -77,7 +78,7 @@ func (h *userMetadataHandler) OnUpdate(
if v != oldNs.GetAnnotations()["scheduler.alpha.kubernetes.io/node-selector"] {
response := admission.Denied("the node-selector annotation is enforced, cannot be updated")
recorder.Eventf(tnt, corev1.EventTypeWarning, "ForbiddenNodeSelectorUpdate", string(response.Result.Reason))
recorder.Eventf(tnt, oldNs, corev1.EventTypeWarning, "ForbiddenNodeSelectorUpdate", "Denied", string(response.Result.Reason))
return &response
}
@@ -127,7 +128,7 @@ func (h *userMetadataHandler) OnUpdate(
err := api.ValidateForbidden(annotations, tnt.Spec.NamespaceOptions.ForbiddenAnnotations)
if err != nil {
err = errors.Wrap(err, "namespace annotations validation failed")
recorder.Eventf(tnt, corev1.EventTypeWarning, api.ForbiddenAnnotationReason, err.Error())
recorder.Eventf(tnt, oldNs, corev1.EventTypeWarning, evt.ReasonForbiddenAnnotation, evt.ActionValidationDenied, err.Error())
response := admission.Denied(err.Error())
return &response
@@ -136,7 +137,7 @@ func (h *userMetadataHandler) OnUpdate(
err = api.ValidateForbidden(labels, tnt.Spec.NamespaceOptions.ForbiddenLabels)
if err != nil {
err = errors.Wrap(err, "namespace labels validation failed")
recorder.Eventf(tnt, corev1.EventTypeWarning, api.ForbiddenLabelReason, err.Error())
recorder.Eventf(tnt, oldNs, corev1.EventTypeWarning, evt.ReasonForbiddenLabel, evt.ActionValidationDenied, err.Error())
response := admission.Denied(err.Error())
return &response
@@ -151,9 +152,9 @@ func (h *userMetadataHandler) OnDelete(
client.Client,
*corev1.Namespace,
admission.Decoder,
record.EventRecorder,
events.EventRecorder,
*capsulev1beta2.Tenant,
) capsulewebhook.Func {
) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}

View File

@@ -8,28 +8,28 @@ import (
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
capsuleutils "github.com/projectcapsule/capsule/pkg/utils"
)
type handler struct{}
func Handler() capsulewebhook.Handler {
func Handler() handlers.Handler {
return &handler{}
}
func (r *handler) OnCreate(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (r *handler) OnCreate(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (r *handler) OnDelete(client client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func {
func (r *handler) OnDelete(client client.Client, decoder admission.Decoder, _ events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
allowed, err := r.handle(ctx, req, client, decoder)
if err != nil {
@@ -46,7 +46,7 @@ func (r *handler) OnDelete(client client.Client, decoder admission.Decoder, _ re
}
}
func (r *handler) OnUpdate(client client.Client, decoder admission.Decoder, _ record.EventRecorder) capsulewebhook.Func {
func (r *handler) OnUpdate(client client.Client, decoder admission.Decoder, _ events.EventRecorder) handlers.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
allowed, err := r.handle(ctx, req, client, decoder)
if err != nil {

View File

@@ -9,13 +9,15 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/internal/webhook/utils"
"github.com/projectcapsule/capsule/pkg/configuration"
caperrors "github.com/projectcapsule/capsule/pkg/api/errors"
"github.com/projectcapsule/capsule/pkg/runtime/configuration"
evt "github.com/projectcapsule/capsule/pkg/runtime/events"
"github.com/projectcapsule/capsule/pkg/runtime/handlers"
)
type userMetadataHandler struct {
@@ -23,26 +25,26 @@ type userMetadataHandler struct {
version *version.Version
}
func UserMetadataHandler(configuration configuration.Configuration, ver *version.Version) capsulewebhook.Handler {
func UserMetadataHandler(configuration configuration.Configuration, ver *version.Version) handlers.Handler {
return &userMetadataHandler{
configuration: configuration,
version: ver,
}
}
func (r *userMetadataHandler) OnCreate(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (r *userMetadataHandler) OnCreate(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (r *userMetadataHandler) OnDelete(client.Client, admission.Decoder, record.EventRecorder) capsulewebhook.Func {
func (r *userMetadataHandler) OnDelete(client.Client, admission.Decoder, events.EventRecorder) handlers.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (r *userMetadataHandler) OnUpdate(_ client.Client, decoder admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
func (r *userMetadataHandler) OnUpdate(_ client.Client, decoder admission.Decoder, recorder events.EventRecorder) handlers.Func {
return func(_ context.Context, req admission.Request) *admission.Response {
nodeWebhookSupported, _ := utils.NodeWebhookSupported(r.version)
@@ -65,9 +67,9 @@ func (r *userMetadataHandler) OnUpdate(_ client.Client, decoder admission.Decode
newNodeForbiddenLabels := r.getForbiddenNodeLabels(newNode)
if !reflect.DeepEqual(oldNodeForbiddenLabels, newNodeForbiddenLabels) {
recorder.Eventf(newNode, corev1.EventTypeWarning, "ForbiddenNodeLabel", "Denied modifying forbidden labels on node")
recorder.Eventf(newNode, oldNode, corev1.EventTypeWarning, evt.ReasonForbiddenLabel, evt.ActionValidationDenied, "Denied modifying forbidden labels on node")
response := admission.Denied(NewNodeLabelForbiddenError(r.configuration.ForbiddenUserNodeLabels()).Error())
response := admission.Denied(caperrors.NewNodeLabelForbiddenError(r.configuration.ForbiddenUserNodeLabels()).Error())
return &response
}
@@ -78,9 +80,9 @@ func (r *userMetadataHandler) OnUpdate(_ client.Client, decoder admission.Decode
newNodeForbiddenAnnotations := r.getForbiddenNodeAnnotations(newNode)
if !reflect.DeepEqual(oldNodeForbiddenAnnotations, newNodeForbiddenAnnotations) {
recorder.Eventf(newNode, corev1.EventTypeWarning, "ForbiddenNodeLabel", "Denied modifying forbidden annotations on node")
recorder.Eventf(newNode, oldNode, corev1.EventTypeWarning, evt.ReasonForbiddenLabel, evt.ActionValidationDenied, "Denied modifying forbidden annotations on node")
response := admission.Denied(NewNodeAnnotationForbiddenError(r.configuration.ForbiddenUserNodeAnnotations()).Error())
response := admission.Denied(caperrors.NewNodeAnnotationForbiddenError(r.configuration.ForbiddenUserNodeAnnotations()).Error())
return &response
}

View File

@@ -1,128 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package pod
import (
"context"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta2 "github.com/projectcapsule/capsule/api/v1beta2"
capsulewebhook "github.com/projectcapsule/capsule/internal/webhook"
"github.com/projectcapsule/capsule/pkg/configuration"
)
type containerRegistryHandler struct {
configuration configuration.Configuration
}
func ContainerRegistry(configuration configuration.Configuration) capsulewebhook.TypedHandlerWithTenant[*corev1.Pod] {
return &containerRegistryHandler{
configuration: configuration,
}
}
func (h *containerRegistryHandler) OnCreate(
c client.Client,
pod *corev1.Pod,
decoder admission.Decoder,
recorder record.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.validate(req, pod, tnt, recorder)
}
}
func (h *containerRegistryHandler) OnUpdate(
c client.Client,
old *corev1.Pod,
pod *corev1.Pod,
decoder admission.Decoder,
recorder record.EventRecorder,
tnt *capsulev1beta2.Tenant,
) capsulewebhook.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return h.validate(req, pod, tnt, recorder)
}
}
func (h *containerRegistryHandler) OnDelete(
client.Client,
*corev1.Pod,
admission.Decoder,
record.EventRecorder,
*capsulev1beta2.Tenant,
) capsulewebhook.Func {
return func(context.Context, admission.Request) *admission.Response {
return nil
}
}
func (h *containerRegistryHandler) validate(
req admission.Request,
pod *corev1.Pod,
tnt *capsulev1beta2.Tenant,
recorder record.EventRecorder,
) *admission.Response {
if tnt.Spec.ContainerRegistries == nil {
return nil
}
for _, container := range pod.Spec.InitContainers {
if response := h.verifyContainerRegistry(recorder, req, container.Image, tnt); response != nil {
return response
}
}
for _, container := range pod.Spec.EphemeralContainers {
if response := h.verifyContainerRegistry(recorder, req, container.Image, tnt); response != nil {
return response
}
}
for _, container := range pod.Spec.Containers {
if response := h.verifyContainerRegistry(recorder, req, container.Image, tnt); response != nil {
return response
}
}
return nil
}
func (h *containerRegistryHandler) verifyContainerRegistry(
recorder record.EventRecorder,
req admission.Request,
image string,
tnt *capsulev1beta2.Tenant,
) *admission.Response {
var valid, matched bool
reg := NewRegistry(image, h.configuration)
if len(reg.Registry()) == 0 {
recorder.Eventf(tnt, corev1.EventTypeWarning, "MissingFQCI", "Pod %s/%s is not using a fully qualified container image, cannot enforce registry the current Tenant", req.Namespace, req.Name, reg.Registry())
response := admission.Denied(NewContainerRegistryForbidden(image, *tnt.Spec.ContainerRegistries).Error())
return &response
}
valid = tnt.Spec.ContainerRegistries.ExactMatch(reg.Registry())
matched = tnt.Spec.ContainerRegistries.RegexMatch(reg.Registry())
if !valid && !matched {
recorder.Eventf(tnt, corev1.EventTypeWarning, "ForbiddenContainerRegistry", "Pod %s/%s is using a container hosted on registry %s that is forbidden for the current Tenant", req.Namespace, req.Name, reg.Registry())
response := admission.Denied(NewContainerRegistryForbidden(reg.FQCI(), *tnt.Spec.ContainerRegistries).Error())
return &response
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More