mirror of
https://github.com/rancher/k3k.git
synced 2026-03-27 13:56:54 +00:00
* rename clusterset to policy * fixes * rename clusterset to policy * wip * go mod * cluster scoped * gomod * gomod * fix lint * wip * moved logic to vcp controller * update for clusters * small fixes * update cli * fix docs, updated spec * fix cleanup * added missing owns for limitranges
86 lines
3.1 KiB
Go
86 lines
3.1 KiB
Go
package policy
|
|
|
|
import (
|
|
"context"
|
|
|
|
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
|
v1 "k8s.io/api/core/v1"
|
|
networkingv1 "k8s.io/api/networking/v1"
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
"k8s.io/apimachinery/pkg/selection"
|
|
ctrl "sigs.k8s.io/controller-runtime"
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
)
|
|
|
|
// reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy
|
|
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) {
|
|
log := ctrl.LoggerFrom(ctx)
|
|
log.Info("reconciling PSA labels")
|
|
|
|
// cleanup of old labels
|
|
delete(namespace.Labels, "pod-security.kubernetes.io/enforce")
|
|
delete(namespace.Labels, "pod-security.kubernetes.io/enforce-version")
|
|
delete(namespace.Labels, "pod-security.kubernetes.io/warn")
|
|
delete(namespace.Labels, "pod-security.kubernetes.io/warn-version")
|
|
|
|
// if a PSA level is specified add the proper labels
|
|
if policy.Spec.PodSecurityAdmissionLevel != nil {
|
|
psaLevel := *policy.Spec.PodSecurityAdmissionLevel
|
|
|
|
namespace.Labels["pod-security.kubernetes.io/enforce"] = string(psaLevel)
|
|
namespace.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
|
|
|
|
// skip the 'warn' only for the privileged PSA level
|
|
if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel {
|
|
namespace.Labels["pod-security.kubernetes.io/warn"] = string(psaLevel)
|
|
namespace.Labels["pod-security.kubernetes.io/warn-version"] = "latest"
|
|
}
|
|
}
|
|
}
|
|
|
|
// cleanupNamespaces will cleanup the Namespaces without the "policy.k3k.io/policy-name" label
|
|
// deleting the resources in them with the "app.kubernetes.io/managed-by=k3k-policy-controller" label
|
|
func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context) error {
|
|
log := ctrl.LoggerFrom(ctx)
|
|
log.Info("deleting resources")
|
|
|
|
var namespaces v1.NamespaceList
|
|
if err := c.Client.List(ctx, &namespaces); err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, ns := range namespaces.Items {
|
|
deleteOpts := []client.DeleteAllOfOption{
|
|
client.InNamespace(ns.Name),
|
|
client.MatchingLabels{ManagedByLabelKey: VirtualPolicyControllerName},
|
|
}
|
|
|
|
// if the namespace is bound to a policy -> cleanup resources of other policies
|
|
if ns.Labels[PolicyNameLabelKey] != "" {
|
|
requirement, err := labels.NewRequirement(PolicyNameLabelKey, selection.NotEquals, []string{ns.Labels[PolicyNameLabelKey]})
|
|
|
|
// log the error but continue cleaning up the other namespaces
|
|
if err != nil {
|
|
log.Error(err, "error creating requirement", "policy", ns.Labels[PolicyNameLabelKey])
|
|
} else {
|
|
sel := labels.NewSelector().Add(*requirement)
|
|
deleteOpts = append(deleteOpts, client.MatchingLabelsSelector{Selector: sel})
|
|
}
|
|
}
|
|
|
|
if err := c.Client.DeleteAllOf(ctx, &networkingv1.NetworkPolicy{}, deleteOpts...); err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := c.Client.DeleteAllOf(ctx, &v1.ResourceQuota{}, deleteOpts...); err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := c.Client.DeleteAllOf(ctx, &v1.LimitRange{}, deleteOpts...); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|