Added Policy finalizer (#781)

* added policy finalizer

* check with a small timeout

* comments

* removed requeue, renamed log
This commit is contained in:
Enrico Candino
2026-04-15 11:14:22 +02:00
committed by GitHub
parent 0e8cca8147
commit 737dcf998e
4 changed files with 295 additions and 9 deletions

View File

@@ -10,6 +10,7 @@ import (
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
@@ -29,6 +30,8 @@ const (
PolicyNameLabelKey = "policy.k3k.io/policy-name"
ManagedByLabelKey = "app.kubernetes.io/managed-by"
VirtualPolicyControllerName = "k3k-policy-controller"
policyFinalizerName = "policy.k3k.io/finalizer"
)
type VirtualClusterPolicyReconciler struct {
@@ -256,6 +259,19 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
return reconcile.Result{}, client.IgnoreNotFound(err)
}
// if DeletionTimestamp is not Zero -> finalize the object
if !policy.DeletionTimestamp.IsZero() {
return c.finalizePolicy(ctx, &policy)
}
if controllerutil.AddFinalizer(&policy, policyFinalizerName) {
log.V(1).Info("Updating VirtualClusterPolicy adding finalizer")
if err := c.Client.Update(ctx, &policy); err != nil {
return reconcile.Result{}, err
}
}
orig := policy.DeepCopy()
reconcilerErr := c.reconcileVirtualClusterPolicy(ctx, &policy)

View File

@@ -0,0 +1,111 @@
package policy
import (
"context"
"errors"
"fmt"
"reflect"
"k8s.io/apimachinery/pkg/api/meta"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func (c *VirtualClusterPolicyReconciler) finalizePolicy(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Deleting VirtualClusterPolicy")
// Set Terminating condition
meta.SetStatusCondition(&policy.Status.Conditions, metav1.Condition{
Type: "Ready",
Status: metav1.ConditionFalse,
Reason: "Terminating",
Message: "Policy is being terminated",
})
// Update status to reflect terminating state
if err := c.Client.Status().Update(ctx, policy); err != nil {
log.Error(err, "failed to update policy status to Terminating")
// Continue with cleanup even if status update fails
}
// Perform cleanup operations (best-effort, don't block on errors)
if err := c.cleanupPolicyResources(ctx, policy); err != nil {
log.Error(err, "failed to cleanup policy resources, continuing with finalization")
}
// Remove finalizer from the policy
if controllerutil.RemoveFinalizer(policy, policyFinalizerName) {
log.Info("Deleting VirtualClusterPolicy removing finalizer")
if err := c.Client.Update(ctx, policy); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
func (c *VirtualClusterPolicyReconciler) cleanupPolicyResources(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Cleaning up policy resources")
// List all namespaces with this policy label
listOpts := client.MatchingLabels{
PolicyNameLabelKey: policy.Name,
}
var namespaces corev1.NamespaceList
if err := c.Client.List(ctx, &namespaces, listOpts); err != nil {
return err
}
var cleanupErrs []error
// For each namespace bound to this policy
for _, ns := range namespaces.Items {
log = log.WithValues("namespace", ns.Name)
ctx = ctrl.LoggerInto(ctx, log)
log.V(1).Info("Cleaning up namespace")
// Clear policy fields from all clusters in this namespace
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
cleanupErrs = append(cleanupErrs, fmt.Errorf("failed to clear cluster policy fields in namespace %s: %w", ns.Name, err))
// Continue cleanup even if this fails
}
// Remove policy label and PSA labels from namespace
orig := ns.DeepCopy()
delete(ns.Labels, PolicyNameLabelKey)
// Remove Pod Security Admission labels only if the policy set them
if policy.Spec.PodSecurityAdmissionLevel != nil {
delete(ns.Labels, "pod-security.kubernetes.io/enforce")
delete(ns.Labels, "pod-security.kubernetes.io/enforce-version")
delete(ns.Labels, "pod-security.kubernetes.io/warn")
delete(ns.Labels, "pod-security.kubernetes.io/warn-version")
}
if !reflect.DeepEqual(orig.Labels, ns.Labels) {
log.V(1).Info("Updating namespace to remove policy labels")
if err := c.Client.Update(ctx, &ns); err != nil {
cleanupErrs = append(cleanupErrs, fmt.Errorf("failed to update namespace %s labels: %w", ns.Name, err))
// Continue cleanup even if this fails
}
}
}
// Owned resources (NetworkPolicy, ResourceQuota, LimitRange) will be
// automatically deleted by Kubernetes garbage collection via owner references
return errors.Join(cleanupErrs...)
}

View File

@@ -131,9 +131,14 @@ var _ = When("using the k3kcli", Label("cli"), func() {
Expect(stdout).To(BeEmpty())
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policyName))
stdout, stderr, err = K3kcli("policy", "list")
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stdout).To(Not(ContainSubstring(policyName)))
Eventually(func(g Gomega) {
stdout, stderr, err = K3kcli("policy", "list")
g.Expect(err).To(Not(HaveOccurred()), string(stderr))
g.Expect(stdout).To(Not(ContainSubstring(policyName)))
}).
WithTimeout(time.Second * 5).
WithPolling(time.Second).
Should(Succeed())
})
It("can bound a policy to a namespace", func() {

View File

@@ -428,6 +428,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Should(Succeed())
// update the VirtualClusterPolicy
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(policy), policy)
Expect(err).To(Not(HaveOccurred()))
policy.Spec.DefaultNodeSelector["label-2"] = "value-2"
err = k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
@@ -481,8 +484,6 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
DefaultAgentAffinity: agentAffinity,
})
bindPolicyToNamespace(namespace, policy)
err := k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
@@ -495,7 +496,8 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Agents: ptr.To[int32](0),
},
}
err = k8sClient.Create(ctx, cluster)
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
// wait a bit
@@ -533,8 +535,6 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
DefaultAgentAffinity: agentAffinity,
})
bindPolicyToNamespace(namespace, policy)
err := k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
// Cluster values that will get overwritten by the policy in the cluster status
clusterAgentAffinity := agentAffinity.DeepCopy()
@@ -554,7 +554,8 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
ServerAffinity: clusterServerAffinity,
},
}
err = k8sClient.Create(ctx, cluster)
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
// wait a bit
@@ -699,6 +700,159 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
WithPolling(time.Second).
Should(BeTrue())
})
It("should clean up namespace labels and cluster status when policy is deleted", func() {
baseline := v1beta1.BaselinePodSecurityAdmissionLevel
vcp := newPolicy(v1beta1.VirtualClusterPolicySpec{
PodSecurityAdmissionLevel: &baseline,
DefaultPriorityClass: "test-priority",
})
bindPolicyToNamespace(namespace, vcp)
// Create a cluster in the namespace
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
}
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
var ns corev1.Namespace
// Verify namespace has policy label and PSA labels
Eventually(func(g Gomega) {
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(ns.Labels).Should(HaveKeyWithValue(policy.PolicyNameLabelKey, vcp.Name))
g.Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "baseline"))
g.Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
// Verify cluster has policy status set
Eventually(func(g Gomega) {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(cluster.Status.PolicyName).To(Equal(vcp.Name))
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.PriorityClass).To(Not(BeNil()))
g.Expect(*cluster.Status.Policy.PriorityClass).To(Equal("test-priority"))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
// Delete the policy
err = k8sClient.Delete(ctx, vcp)
Expect(err).To(Not(HaveOccurred()))
// Verify policy label is removed from namespace
Eventually(func(g Gomega) {
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(ns.Labels).Should(Not(HaveKey(policy.PolicyNameLabelKey)))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
// Verify PSA labels are removed from namespace (since policy set them)
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/enforce")))
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/enforce-version")))
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn")))
Expect(ns.Labels).Should(Not(HaveKey("pod-security.kubernetes.io/warn-version")))
// Verify Policy cleared from cluster status
Eventually(func(g Gomega) {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(cluster.Status.PolicyName).To(BeEmpty())
g.Expect(cluster.Status.Policy).To(BeNil())
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
// Verify policy is actually deleted
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(vcp), vcp)
return apierrors.IsNotFound(err)
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeTrue())
})
It("should NOT remove PSA labels if policy did not set them", func() {
// Create policy without PSA labels
vcp := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultPriorityClass: "test-priority",
})
bindPolicyToNamespace(namespace, vcp)
var ns corev1.Namespace
// Manually set PSA labels on namespace
Eventually(func(g Gomega) {
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(ns.Labels).Should(HaveKeyWithValue(policy.PolicyNameLabelKey, vcp.Name))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
// Add PSA labels manually
ns.Labels["pod-security.kubernetes.io/enforce"] = "restricted"
ns.Labels["pod-security.kubernetes.io/enforce-version"] = "v1.28"
err := k8sClient.Update(ctx, &ns)
Expect(err).To(Not(HaveOccurred()))
// Verify PSA labels are present
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
Expect(err).To(Not(HaveOccurred()))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "restricted"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "v1.28"))
// Delete the policy
err = k8sClient.Delete(ctx, vcp)
Expect(err).To(Not(HaveOccurred()))
// Verify policy label is removed
Eventually(func(g Gomega) {
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(ns.Labels).Should(Not(HaveKey(policy.PolicyNameLabelKey)))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
// Verify PSA labels are NOT removed (since policy didn't set them)
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "restricted"))
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "v1.28"))
})
})
})
})