mirror of
https://github.com/rancher/k3k.git
synced 2026-05-06 01:16:52 +00:00
Change VirtualClusterPolicy scope to Cluster (#358)
* rename clusterset to policy * fixes * rename clusterset to policy * wip * go mod * cluster scoped * gomod * gomod * fix lint * wip * moved logic to vcp controller * update for clusters * small fixes * update cli * fix docs, updated spec * fix cleanup * added missing owns for limitranges
This commit is contained in:
@@ -333,12 +333,11 @@ type ClusterList struct {
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:validation:XValidation:rule="self.metadata.name == \"default\"",message="Name must match 'default'"
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.displayName",name=Display Name,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name=Age,type=date
|
||||
// +kubebuilder:resource:scope=Cluster
|
||||
|
||||
// VirtualClusterPolicy allows defining common configurations and constraints
|
||||
// for the clusters in a namespace activated by an annotation on that Namespace.
|
||||
// for clusters within a clusterpolicy.
|
||||
type VirtualClusterPolicy struct {
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
@@ -357,17 +356,12 @@ type VirtualClusterPolicy struct {
|
||||
// VirtualClusterPolicySpec defines the desired state of a VirtualClusterPolicy.
|
||||
type VirtualClusterPolicySpec struct {
|
||||
|
||||
// DisplayName is the human-readable name for the policy.
|
||||
//
|
||||
// +optional
|
||||
DisplayName string `json:"displayName,omitempty"`
|
||||
|
||||
// Quota defines the ResourceQuotaSpec to be applied to the target Namespace.
|
||||
// Quota specifies the resource limits for clusters within a clusterpolicy.
|
||||
//
|
||||
// +optional
|
||||
Quota *v1.ResourceQuotaSpec `json:"quota,omitempty"`
|
||||
|
||||
// Limit specifies the LimitRange that will be applied to all pods in the target Namespace
|
||||
// Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy
|
||||
// to set defaults and constraints (min/max)
|
||||
//
|
||||
// +optional
|
||||
|
||||
85
pkg/controller/policy/namespace.go
Normal file
85
pkg/controller/policy/namespace.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling PSA labels")
|
||||
|
||||
// cleanup of old labels
|
||||
delete(namespace.Labels, "pod-security.kubernetes.io/enforce")
|
||||
delete(namespace.Labels, "pod-security.kubernetes.io/enforce-version")
|
||||
delete(namespace.Labels, "pod-security.kubernetes.io/warn")
|
||||
delete(namespace.Labels, "pod-security.kubernetes.io/warn-version")
|
||||
|
||||
// if a PSA level is specified add the proper labels
|
||||
if policy.Spec.PodSecurityAdmissionLevel != nil {
|
||||
psaLevel := *policy.Spec.PodSecurityAdmissionLevel
|
||||
|
||||
namespace.Labels["pod-security.kubernetes.io/enforce"] = string(psaLevel)
|
||||
namespace.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
|
||||
|
||||
// skip the 'warn' only for the privileged PSA level
|
||||
if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel {
|
||||
namespace.Labels["pod-security.kubernetes.io/warn"] = string(psaLevel)
|
||||
namespace.Labels["pod-security.kubernetes.io/warn-version"] = "latest"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupNamespaces will cleanup the Namespaces without the "policy.k3k.io/policy-name" label
|
||||
// deleting the resources in them with the "app.kubernetes.io/managed-by=k3k-policy-controller" label
|
||||
func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("deleting resources")
|
||||
|
||||
var namespaces v1.NamespaceList
|
||||
if err := c.Client.List(ctx, &namespaces); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
deleteOpts := []client.DeleteAllOfOption{
|
||||
client.InNamespace(ns.Name),
|
||||
client.MatchingLabels{ManagedByLabelKey: VirtualPolicyControllerName},
|
||||
}
|
||||
|
||||
// if the namespace is bound to a policy -> cleanup resources of other policies
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
requirement, err := labels.NewRequirement(PolicyNameLabelKey, selection.NotEquals, []string{ns.Labels[PolicyNameLabelKey]})
|
||||
|
||||
// log the error but continue cleaning up the other namespaces
|
||||
if err != nil {
|
||||
log.Error(err, "error creating requirement", "policy", ns.Labels[PolicyNameLabelKey])
|
||||
} else {
|
||||
sel := labels.NewSelector().Add(*requirement)
|
||||
deleteOpts = append(deleteOpts, client.MatchingLabelsSelector{Selector: sel})
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.Client.DeleteAllOf(ctx, &networkingv1.NetworkPolicy{}, deleteOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Client.DeleteAllOf(ctx, &v1.ResourceQuota{}, deleteOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Client.DeleteAllOf(ctx, &v1.LimitRange{}, deleteOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
115
pkg/controller/policy/networkpolicy.go
Normal file
115
pkg/controller/policy/networkpolicy.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling NetworkPolicy")
|
||||
|
||||
var cidrList []string
|
||||
|
||||
if c.ClusterCIDR != "" {
|
||||
cidrList = []string{c.ClusterCIDR}
|
||||
} else {
|
||||
var nodeList v1.NodeList
|
||||
if err := c.Client.List(ctx, &nodeList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
if len(node.Spec.PodCIDRs) > 0 {
|
||||
cidrList = append(cidrList, node.Spec.PodCIDRs...)
|
||||
} else {
|
||||
cidrList = append(cidrList, node.Spec.PodCIDR)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
networkPolicy := networkPolicy(namespace, policy, cidrList)
|
||||
|
||||
if err := ctrl.SetControllerReference(policy, networkPolicy, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if disabled then delete the existing network policy
|
||||
if policy.Spec.DisableNetworkPolicy {
|
||||
err := c.Client.Delete(ctx, networkPolicy)
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// otherwise try to create/update
|
||||
err := c.Client.Create(ctx, networkPolicy)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, networkPolicy)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func networkPolicy(namespaceName string, policy *v1alpha1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy {
|
||||
return &networkingv1.NetworkPolicy{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "NetworkPolicy",
|
||||
APIVersion: "networking.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespaceName,
|
||||
Labels: map[string]string{
|
||||
ManagedByLabelKey: VirtualPolicyControllerName,
|
||||
PolicyNameLabelKey: policy.Name,
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PolicyTypes: []networkingv1.PolicyType{
|
||||
networkingv1.PolicyTypeIngress,
|
||||
networkingv1.PolicyTypeEgress,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{
|
||||
{},
|
||||
},
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
To: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
IPBlock: &networkingv1.IPBlock{
|
||||
CIDR: "0.0.0.0/0",
|
||||
Except: cidrList,
|
||||
},
|
||||
},
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": namespaceName,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": metav1.NamespaceSystem,
|
||||
},
|
||||
},
|
||||
PodSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"k8s-app": "kube-dns",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
nodeController = "k3k-node-controller"
|
||||
)
|
||||
|
||||
type NodeReconciler struct {
|
||||
Client ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
ClusterCIDR string
|
||||
}
|
||||
|
||||
// AddNodeController adds a new controller to the manager
|
||||
func AddNodeController(ctx context.Context, mgr manager.Manager) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := NodeReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1.Node{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Named(nodeController).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (n *NodeReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("node", req.NamespacedName)
|
||||
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
|
||||
|
||||
log.Info("reconciling node")
|
||||
|
||||
var clusterPolicyList v1alpha1.VirtualClusterPolicyList
|
||||
if err := n.Client.List(ctx, &clusterPolicyList); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if len(clusterPolicyList.Items) <= 0 {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if err := n.ensureNetworkPolicies(ctx, clusterPolicyList); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (n *NodeReconciler) ensureNetworkPolicies(ctx context.Context, clusterPolicyList v1alpha1.VirtualClusterPolicyList) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring network policies")
|
||||
|
||||
var setNetworkPolicy *networkingv1.NetworkPolicy
|
||||
|
||||
for _, cs := range clusterPolicyList.Items {
|
||||
if cs.Spec.DisableNetworkPolicy {
|
||||
continue
|
||||
}
|
||||
|
||||
log = log.WithValues("clusterpolicy", cs.Namespace+"/"+cs.Name)
|
||||
log.Info("updating NetworkPolicy for VirtualClusterPolicy")
|
||||
|
||||
var err error
|
||||
setNetworkPolicy, err = netpol(ctx, "", &cs, n.Client)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("new NetworkPolicy for clusterpolicy")
|
||||
|
||||
if err := n.Client.Update(ctx, setNetworkPolicy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -13,21 +13,19 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
clusterPolicyController = "k3k-clusterpolicy-controller"
|
||||
allTrafficCIDR = "0.0.0.0/0"
|
||||
maxConcurrentReconciles = 1
|
||||
PolicyNameLabelKey = "policy.k3k.io/policy-name"
|
||||
ManagedByLabelKey = "app.kubernetes.io/managed-by"
|
||||
VirtualPolicyControllerName = "k3k-policy-controller"
|
||||
)
|
||||
|
||||
type VirtualClusterPolicyReconciler struct {
|
||||
@@ -36,9 +34,8 @@ type VirtualClusterPolicyReconciler struct {
|
||||
ClusterCIDR string
|
||||
}
|
||||
|
||||
// Add adds a new controller to the manager
|
||||
func Add(ctx context.Context, mgr manager.Manager, clusterCIDR string) error {
|
||||
// initialize a new Reconciler
|
||||
// Add the controller to manage the Virtual Cluster policies
|
||||
func Add(mgr manager.Manager, clusterCIDR string) error {
|
||||
reconciler := VirtualClusterPolicyReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
@@ -47,63 +44,207 @@ func Add(ctx context.Context, mgr manager.Manager, clusterCIDR string) error {
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.VirtualClusterPolicy{}).
|
||||
Watches(&v1.Namespace{}, namespaceEventHandler()).
|
||||
Watches(&v1.Node{}, nodeEventHandler(&reconciler)).
|
||||
Watches(&v1alpha1.Cluster{}, clusterEventHandler(&reconciler)).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&v1.ResourceQuota{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Watches(
|
||||
&v1.Namespace{},
|
||||
handler.EnqueueRequestsFromMapFunc(namespaceEventHandler(reconciler)),
|
||||
builder.WithPredicates(namespaceLabelsPredicate()),
|
||||
).
|
||||
Watches(
|
||||
&v1alpha1.Cluster{},
|
||||
handler.EnqueueRequestsFromMapFunc(namespaceEventHandler(reconciler)),
|
||||
).
|
||||
Owns(&v1.LimitRange{}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
// namespaceEventHandler will enqueue a reconcile request for the VirtualClusterPolicy in the given namespace
|
||||
func namespaceEventHandler(reconciler VirtualClusterPolicyReconciler) handler.MapFunc {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
// if the object is a Namespace, use the name as the namespace
|
||||
namespace := obj.GetName()
|
||||
// namespaceEventHandler will enqueue a reconciliation of VCP when a Namespace changes
|
||||
func namespaceEventHandler() handler.Funcs {
|
||||
return handler.Funcs{
|
||||
// When a Namespace is created, if it has the "policy.k3k.io/policy-name" label
|
||||
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
ns, ok := e.Object.(*v1.Namespace)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// if the object is a namespaced resource, use the namespace
|
||||
if obj.GetNamespace() != "" {
|
||||
namespace = obj.GetNamespace()
|
||||
}
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.Labels[PolicyNameLabelKey]}})
|
||||
}
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
}
|
||||
},
|
||||
// When a Namespace is updated, if it has the "policy.k3k.io/policy-name" label
|
||||
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
oldNs, okOld := e.ObjectOld.(*v1.Namespace)
|
||||
newNs, okNew := e.ObjectNew.(*v1.Namespace)
|
||||
|
||||
var policy v1alpha1.VirtualClusterPolicy
|
||||
if err := reconciler.Client.Get(ctx, key, &policy); err != nil {
|
||||
return nil
|
||||
}
|
||||
if !okOld || !okNew {
|
||||
return
|
||||
}
|
||||
|
||||
return []reconcile.Request{{NamespacedName: key}}
|
||||
}
|
||||
}
|
||||
oldVCPName := oldNs.Labels[PolicyNameLabelKey]
|
||||
newVCPName := newNs.Labels[PolicyNameLabelKey]
|
||||
|
||||
// namespaceLabelsPredicate returns a predicate that will allow a reconciliation if the labels of a Namespace changed
|
||||
func namespaceLabelsPredicate() predicate.Predicate {
|
||||
return predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
oldObj := e.ObjectOld.(*v1.Namespace)
|
||||
newObj := e.ObjectNew.(*v1.Namespace)
|
||||
// If labels haven't changed we can skip the reconciliation
|
||||
if reflect.DeepEqual(oldNs.Labels, newNs.Labels) {
|
||||
return
|
||||
}
|
||||
|
||||
return !reflect.DeepEqual(oldObj.Labels, newObj.Labels)
|
||||
// If No VCP before and after we can skip the reconciliation
|
||||
if oldVCPName == "" && newVCPName == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// The VCP has not changed, but we enqueue a reconciliation because the PSA or other labels have changed
|
||||
if oldVCPName == newVCPName {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: oldVCPName}})
|
||||
return
|
||||
}
|
||||
|
||||
// Enqueue the old VCP name for cleanup
|
||||
if oldVCPName != "" {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: oldVCPName}})
|
||||
}
|
||||
|
||||
// Enqueue the new VCP name
|
||||
if newVCPName != "" {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: newVCPName}})
|
||||
}
|
||||
},
|
||||
// When a namespace is deleted all the resources in the namespace are deleted
|
||||
// but we trigger the reconciliation to eventually perform some cluster-wide cleanup if necessary
|
||||
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
ns, ok := e.Object.(*v1.Namespace)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.Labels[PolicyNameLabelKey]}})
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// nodeEventHandler will enqueue a reconciliation of all the VCPs when a Node changes.
|
||||
// This happens only if the ClusterCIDR is NOT specified, to handle the PodCIDRs in the NetworkPolicies.
|
||||
func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
|
||||
// enqueue all the available VirtualClusterPolicies
|
||||
enqueueAllVCPs := func(ctx context.Context, q workqueue.RateLimitingInterface) {
|
||||
vcpList := &v1alpha1.VirtualClusterPolicyList{}
|
||||
if err := r.Client.List(ctx, vcpList); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, vcp := range vcpList.Items {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: vcp.Name}})
|
||||
}
|
||||
}
|
||||
|
||||
return handler.Funcs{
|
||||
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
if r.ClusterCIDR != "" {
|
||||
return
|
||||
}
|
||||
|
||||
enqueueAllVCPs(ctx, q)
|
||||
},
|
||||
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
if r.ClusterCIDR != "" {
|
||||
return
|
||||
}
|
||||
|
||||
oldNode, okOld := e.ObjectOld.(*v1.Node)
|
||||
newNode, okNew := e.ObjectNew.(*v1.Node)
|
||||
|
||||
if !okOld || !okNew {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if PodCIDR or PodCIDRs fields have changed.
|
||||
|
||||
var podCIDRChanged bool
|
||||
if oldNode.Spec.PodCIDR != newNode.Spec.PodCIDR {
|
||||
podCIDRChanged = true
|
||||
}
|
||||
if !reflect.DeepEqual(oldNode.Spec.PodCIDRs, newNode.Spec.PodCIDRs) {
|
||||
podCIDRChanged = true
|
||||
}
|
||||
|
||||
if podCIDRChanged {
|
||||
enqueueAllVCPs(ctx, q)
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
if r.ClusterCIDR != "" {
|
||||
return
|
||||
}
|
||||
|
||||
enqueueAllVCPs(ctx, q)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// clusterEventHandler will enqueue a reconciliation of the VCP associated to the Namespace when a Cluster changes.
|
||||
func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
|
||||
type clusterSubSpec struct {
|
||||
PriorityClass string
|
||||
NodeSelector map[string]string
|
||||
}
|
||||
|
||||
return handler.Funcs{
|
||||
// When a Cluster is created, if its Namespace has the "policy.k3k.io/policy-name" label
|
||||
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
cluster, ok := e.Object.(*v1alpha1.Cluster)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var ns v1.Namespace
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Name: cluster.Namespace}, &ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.Labels[PolicyNameLabelKey]}})
|
||||
}
|
||||
},
|
||||
// When a Cluster is updated, if its Namespace has the "policy.k3k.io/policy-name" label
|
||||
// and if some of its spec influenced by the policy changed
|
||||
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
oldCluster, okOld := e.ObjectOld.(*v1alpha1.Cluster)
|
||||
newCluster, okNew := e.ObjectNew.(*v1alpha1.Cluster)
|
||||
|
||||
if !okOld || !okNew {
|
||||
return
|
||||
}
|
||||
|
||||
var ns v1.Namespace
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Name: oldCluster.Namespace}, &ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ns.Labels[PolicyNameLabelKey] == "" {
|
||||
return
|
||||
}
|
||||
|
||||
clusterSubSpecOld := clusterSubSpec{
|
||||
PriorityClass: oldCluster.Spec.PriorityClass,
|
||||
NodeSelector: oldCluster.Spec.NodeSelector,
|
||||
}
|
||||
|
||||
clusterSubSpecNew := clusterSubSpec{
|
||||
PriorityClass: newCluster.Spec.PriorityClass,
|
||||
NodeSelector: newCluster.Spec.NodeSelector,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(clusterSubSpecOld, clusterSubSpecNew) {
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.Labels[PolicyNameLabelKey]}})
|
||||
}
|
||||
},
|
||||
// When a Cluster is deleted -> nothing to do
|
||||
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("clusterpolicy", req.NamespacedName)
|
||||
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling VirtualClusterPolicy")
|
||||
|
||||
var policy v1alpha1.VirtualClusterPolicy
|
||||
if err := c.Client.Get(ctx, req.NamespacedName, &policy); err != nil {
|
||||
@@ -127,7 +268,7 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
|
||||
}
|
||||
|
||||
// update VirtualClusterPolicy if needed
|
||||
if !reflect.DeepEqual(orig.Spec, policy.Spec) {
|
||||
if !reflect.DeepEqual(orig, policy) {
|
||||
if err := c.Client.Update(ctx, &policy); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -137,211 +278,75 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
if err := c.reconcileNetworkPolicy(ctx, policy); err != nil {
|
||||
if err := c.reconcileMatchingNamespaces(ctx, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileNamespacePodSecurityLabels(ctx, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileLimit(ctx, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileQuota(ctx, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileClusters(ctx, policy); err != nil {
|
||||
if err := c.cleanupNamespaces(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling NetworkPolicy")
|
||||
log.Info("reconciling matching Namespaces")
|
||||
|
||||
networkPolicy, err := netpol(ctx, c.ClusterCIDR, policy, c.Client)
|
||||
if err != nil {
|
||||
listOpts := client.MatchingLabels{
|
||||
PolicyNameLabelKey: policy.Name,
|
||||
}
|
||||
|
||||
var namespaces v1.NamespaceList
|
||||
if err := c.Client.List(ctx, &namespaces, listOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ctrl.SetControllerReference(policy, networkPolicy, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ns := range namespaces.Items {
|
||||
ctx = ctrl.LoggerInto(ctx, log.WithValues("namespace", ns.Name))
|
||||
log.Info("reconciling Namespace")
|
||||
|
||||
// if disabled then delete the existing network policy
|
||||
if policy.Spec.DisableNetworkPolicy {
|
||||
err := c.Client.Delete(ctx, networkPolicy)
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
orig := ns.DeepCopy()
|
||||
|
||||
// otherwise try to create/update
|
||||
err = c.Client.Create(ctx, networkPolicy)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, networkPolicy)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func netpol(ctx context.Context, clusterCIDR string, policy *v1alpha1.VirtualClusterPolicy, client client.Client) (*networkingv1.NetworkPolicy, error) {
|
||||
var cidrList []string
|
||||
|
||||
if clusterCIDR != "" {
|
||||
cidrList = []string{clusterCIDR}
|
||||
} else {
|
||||
var nodeList v1.NodeList
|
||||
if err := client.List(ctx, &nodeList); err != nil {
|
||||
return nil, err
|
||||
if err := c.reconcileNetworkPolicy(ctx, ns.Name, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
cidrList = append(cidrList, node.Spec.PodCIDRs...)
|
||||
if err := c.reconcileQuota(ctx, ns.Name, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: policy.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "NetworkPolicy",
|
||||
APIVersion: "networking.k8s.io/v1",
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PolicyTypes: []networkingv1.PolicyType{
|
||||
networkingv1.PolicyTypeIngress,
|
||||
networkingv1.PolicyTypeEgress,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{
|
||||
{},
|
||||
},
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
To: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
IPBlock: &networkingv1.IPBlock{
|
||||
CIDR: allTrafficCIDR,
|
||||
Except: cidrList,
|
||||
},
|
||||
},
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": policy.Namespace,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": metav1.NamespaceSystem,
|
||||
},
|
||||
},
|
||||
PodSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"k8s-app": "kube-dns",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling Namespace")
|
||||
|
||||
var ns v1.Namespace
|
||||
|
||||
key := types.NamespacedName{Name: policy.Namespace}
|
||||
if err := c.Client.Get(ctx, key, &ns); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newLabels := map[string]string{}
|
||||
for k, v := range ns.Labels {
|
||||
newLabels[k] = v
|
||||
}
|
||||
|
||||
// cleanup of old labels
|
||||
delete(newLabels, "pod-security.kubernetes.io/enforce")
|
||||
delete(newLabels, "pod-security.kubernetes.io/enforce-version")
|
||||
delete(newLabels, "pod-security.kubernetes.io/warn")
|
||||
delete(newLabels, "pod-security.kubernetes.io/warn-version")
|
||||
|
||||
// if a PSA level is specified add the proper labels
|
||||
if policy.Spec.PodSecurityAdmissionLevel != nil {
|
||||
psaLevel := *policy.Spec.PodSecurityAdmissionLevel
|
||||
|
||||
newLabels["pod-security.kubernetes.io/enforce"] = string(psaLevel)
|
||||
newLabels["pod-security.kubernetes.io/enforce-version"] = "latest"
|
||||
|
||||
// skip the 'warn' only for the privileged PSA level
|
||||
if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel {
|
||||
newLabels["pod-security.kubernetes.io/warn"] = string(psaLevel)
|
||||
newLabels["pod-security.kubernetes.io/warn-version"] = "latest"
|
||||
if err := c.reconcileLimit(ctx, ns.Name, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ns.Labels, newLabels) {
|
||||
log.V(1).Info("labels changed, updating namespace")
|
||||
if err := c.reconcileClusters(ctx, &ns, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ns.Labels = newLabels
|
||||
c.reconcileNamespacePodSecurityLabels(ctx, &ns, policy)
|
||||
|
||||
return c.Client.Update(ctx, &ns)
|
||||
if !reflect.DeepEqual(orig, &ns) {
|
||||
if err := c.Client.Update(ctx, &ns); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling Clusters")
|
||||
log.Info("reconciling ResourceQuota")
|
||||
|
||||
var clusters v1alpha1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, client.InNamespace(policy.Namespace)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
for _, cluster := range clusters.Items {
|
||||
oldClusterSpec := cluster.Spec
|
||||
|
||||
if cluster.Spec.PriorityClass != policy.Spec.DefaultPriorityClass {
|
||||
cluster.Spec.PriorityClass = policy.Spec.DefaultPriorityClass
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector) {
|
||||
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(oldClusterSpec, cluster.Spec) {
|
||||
// continue updating also the other clusters even if an error occurred
|
||||
err = errors.Join(c.Client.Update(ctx, &cluster))
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
if policy.Spec.Quota == nil {
|
||||
// check if resourceQuota object exists and deletes it.
|
||||
var toDeleteResourceQuota v1.ResourceQuota
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: policy.Namespace,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
if err := c.Client.Get(ctx, key, &toDeleteResourceQuota); err != nil {
|
||||
@@ -352,38 +357,37 @@ func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, pol
|
||||
}
|
||||
|
||||
// create/update resource Quota
|
||||
resourceQuota := resourceQuota(policy)
|
||||
|
||||
if err := ctrl.SetControllerReference(policy, &resourceQuota, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Client.Create(ctx, &resourceQuota); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, &resourceQuota)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceQuota(policy *v1alpha1.VirtualClusterPolicy) v1.ResourceQuota {
|
||||
return v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: policy.Namespace,
|
||||
},
|
||||
resourceQuota := &v1.ResourceQuota{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ResourceQuota",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
ManagedByLabelKey: VirtualPolicyControllerName,
|
||||
PolicyNameLabelKey: policy.Name,
|
||||
},
|
||||
},
|
||||
Spec: *policy.Spec.Quota,
|
||||
}
|
||||
|
||||
if err := ctrl.SetControllerReference(policy, resourceQuota, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := c.Client.Create(ctx, resourceQuota)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, resourceQuota)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("Reconciling VirtualClusterPolicy Limit")
|
||||
log.Info("reconciling LimitRange")
|
||||
|
||||
// delete limitrange if spec.limits isnt specified.
|
||||
if policy.Spec.Limit == nil {
|
||||
@@ -391,7 +395,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, pol
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: policy.Namespace,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
if err := c.Client.Get(ctx, key, &toDeleteLimitRange); err != nil {
|
||||
@@ -401,30 +405,56 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, pol
|
||||
return c.Client.Delete(ctx, &toDeleteLimitRange)
|
||||
}
|
||||
|
||||
limitRange := limitRange(policy)
|
||||
if err := ctrl.SetControllerReference(policy, &limitRange, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Client.Create(ctx, &limitRange); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, &limitRange)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func limitRange(policy *v1alpha1.VirtualClusterPolicy) v1.LimitRange {
|
||||
return v1.LimitRange{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: policy.Namespace,
|
||||
},
|
||||
limitRange := &v1.LimitRange{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "LimitRange",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
ManagedByLabelKey: VirtualPolicyControllerName,
|
||||
PolicyNameLabelKey: policy.Name,
|
||||
},
|
||||
},
|
||||
Spec: *policy.Spec.Limit,
|
||||
}
|
||||
|
||||
if err := ctrl.SetControllerReference(policy, limitRange, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := c.Client.Create(ctx, limitRange)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, limitRange)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling Clusters")
|
||||
|
||||
var clusters v1alpha1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace.Name)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var clusterUpdateErrs []error
|
||||
|
||||
for _, cluster := range clusters.Items {
|
||||
orig := cluster.DeepCopy()
|
||||
|
||||
cluster.Spec.PriorityClass = policy.Spec.DefaultPriorityClass
|
||||
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
|
||||
|
||||
if !reflect.DeepEqual(orig, cluster) {
|
||||
// continue updating also the other clusters even if an error occurred
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, c.Client.Update(ctx, &cluster))
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(clusterUpdateErrs...)
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ var _ = BeforeSuite(func() {
|
||||
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
err = policy.Add(ctx, mgr, "")
|
||||
err = policy.Add(mgr, "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -2,10 +2,12 @@ package policy_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -24,72 +26,78 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
|
||||
Context("creating a VirtualClusterPolicy", func() {
|
||||
|
||||
var (
|
||||
namespace string
|
||||
)
|
||||
It("should have only the 'shared' allowedModeTypes", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
|
||||
|
||||
BeforeEach(func() {
|
||||
createdNS := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}}
|
||||
err := k8sClient.Create(context.Background(), createdNS)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
namespace = createdNS.Name
|
||||
allowedModeTypes := policy.Spec.AllowedModeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(1))
|
||||
Expect(allowedModeTypes).To(ContainElement(v1alpha1.SharedClusterMode))
|
||||
})
|
||||
|
||||
When("created with a default spec", func() {
|
||||
It("should have only the 'shared' allowedModeTypes", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
It("should have the 'virtual' mode if specified", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.VirtualClusterMode,
|
||||
},
|
||||
})
|
||||
|
||||
allowedModeTypes := policy.Spec.AllowedModeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(1))
|
||||
Expect(allowedModeTypes).To(ContainElement(v1alpha1.VirtualClusterMode))
|
||||
})
|
||||
|
||||
It("should have both modes if specified", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
},
|
||||
})
|
||||
|
||||
allowedModeTypes := policy.Spec.AllowedModeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(2))
|
||||
Expect(allowedModeTypes).To(ContainElements(
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
))
|
||||
})
|
||||
|
||||
It("should fail for a non-existing mode", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "policy-",
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
v1alpha1.ClusterMode("non-existing"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
When("bound to a namespace", func() {
|
||||
|
||||
var namespace *v1.Namespace
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace = &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
GenerateName: "ns-",
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
err := k8sClient.Create(ctx, namespace)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
allowedModeTypes := policy.Spec.AllowedModeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(1))
|
||||
Expect(allowedModeTypes).To(ContainElement(v1alpha1.SharedClusterMode))
|
||||
})
|
||||
|
||||
It("should not be able to create a cluster with a non 'default' name", func() {
|
||||
err := k8sClient.Create(ctx, &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "another-name",
|
||||
Namespace: namespace,
|
||||
},
|
||||
})
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should not be able to create two VirtualClusterPolicys in the same namespace", func() {
|
||||
err := k8sClient.Create(ctx, &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = k8sClient.Create(ctx, &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default-2",
|
||||
Namespace: namespace,
|
||||
},
|
||||
})
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should create a NetworkPolicy", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
// look for network policies etc
|
||||
networkPolicy := &networkingv1.NetworkPolicy{}
|
||||
@@ -97,7 +105,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, networkPolicy)
|
||||
}).
|
||||
@@ -123,7 +131,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
// allow networking in the same namespace
|
||||
namespaceRule := networkingv1.NetworkPolicyPeer{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"kubernetes.io/metadata.name": namespace},
|
||||
MatchLabels: map[string]string{"kubernetes.io/metadata.name": namespace.Name},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -141,92 +149,10 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
ipBlockRule, namespaceRule, kubeDNSRule,
|
||||
))
|
||||
})
|
||||
})
|
||||
|
||||
When("created with DisableNetworkPolicy", func() {
|
||||
It("should not create a NetworkPolicy if true", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
DisableNetworkPolicy: true,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the network policy, but it should not be created
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, &networkingv1.NetworkPolicy{})
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should delete the NetworkPolicy if changed to false", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// look for network policy
|
||||
networkPolicy := &networkingv1.NetworkPolicy{}
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, networkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
policy.Spec.DisableNetworkPolicy = true
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the network policy to being deleted
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, networkPolicy)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should recreate the NetworkPolicy if deleted", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
// look for network policy
|
||||
networkPolicy := &networkingv1.NetworkPolicy{}
|
||||
@@ -234,7 +160,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
return k8sClient.Get(context.Background(), key, networkPolicy)
|
||||
}).
|
||||
@@ -242,12 +168,12 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
err = k8sClient.Delete(ctx, networkPolicy)
|
||||
err := k8sClient.Delete(ctx, networkPolicy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
err = k8sClient.Get(ctx, key, networkPolicy)
|
||||
Expect(apierrors.IsNotFound(err)).Should(BeTrue())
|
||||
@@ -256,7 +182,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, networkPolicy)
|
||||
}).
|
||||
@@ -265,76 +191,6 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Should(BeNil())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
When("created specifying the mode", func() {
|
||||
It("should have the 'virtual' mode if specified", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.VirtualClusterMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
allowedModeTypes := policy.Spec.AllowedModeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(1))
|
||||
Expect(allowedModeTypes).To(ContainElement(v1alpha1.VirtualClusterMode))
|
||||
})
|
||||
|
||||
It("should have both modes if specified", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
allowedModeTypes := policy.Spec.AllowedModeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(2))
|
||||
Expect(allowedModeTypes).To(ContainElements(
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
))
|
||||
})
|
||||
|
||||
It("should fail for a non-existing mode", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
v1alpha1.ClusterMode("non-existing"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
When("created specifying the podSecurityAdmissionLevel", func() {
|
||||
It("should add and update the proper pod-security labels to the namespace", func() {
|
||||
var (
|
||||
privileged = v1alpha1.PrivilegedPodSecurityAdmissionLevel
|
||||
@@ -342,33 +198,25 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
restricted = v1alpha1.RestrictedPodSecurityAdmissionLevel
|
||||
)
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
},
|
||||
}
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
})
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
var ns v1.Namespace
|
||||
|
||||
// Check privileged
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
Eventually(func() string {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "privileged"
|
||||
return ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Equal("privileged"))
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
@@ -378,19 +226,18 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
// Check baseline
|
||||
|
||||
policy.Spec.PodSecurityAdmissionLevel = &baseline
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
Eventually(func() string {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "baseline"
|
||||
return ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Equal("baseline"))
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "baseline"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
@@ -404,15 +251,14 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
Eventually(func() string {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "restricted"
|
||||
return ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Equal("restricted"))
|
||||
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "restricted"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
@@ -427,7 +273,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
_, found := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return found
|
||||
@@ -445,24 +291,17 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
It("should restore the labels if Namespace is updated", func() {
|
||||
privileged := v1alpha1.PrivilegedPodSecurityAdmissionLevel
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
},
|
||||
}
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
})
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
var ns v1.Namespace
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "privileged"
|
||||
@@ -475,12 +314,12 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
|
||||
ns.Labels["pod-security.kubernetes.io/enforce"] = "baseline"
|
||||
err = k8sClient.Update(ctx, &ns)
|
||||
err := k8sClient.Update(ctx, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be restored
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
return enforceValue == "privileged"
|
||||
@@ -492,27 +331,18 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce", "privileged"))
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
})
|
||||
})
|
||||
|
||||
When("a cluster in the same namespace is present", func() {
|
||||
It("should update it if needed", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
},
|
||||
}
|
||||
It("should update Cluster's PriorityClass", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
})
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
@@ -521,7 +351,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, cluster)
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
@@ -536,24 +366,19 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should update the nodeSelector", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
},
|
||||
}
|
||||
It("should update Cluster's NodeSelector", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
@@ -578,23 +403,15 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should update the nodeSelector if changed", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
@@ -604,7 +421,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, cluster)
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(cluster.Spec.NodeSelector).To(Equal(policy.Spec.DefaultNodeSelector))
|
||||
@@ -645,81 +462,24 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
When("a cluster in a different namespace is present", func() {
|
||||
It("should not be update", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
namespace2 := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}}
|
||||
err = k8sClient.Create(ctx, namespace2)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace2.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// it should not change!
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return cluster.Spec.PriorityClass != policy.Spec.DefaultPriorityClass
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
When("created with ResourceQuota", func() {
|
||||
It("should create resourceQuota if Quota is enabled", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
It("should create a ResourceQuota if Quota is enabled", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
var resourceQuota v1.ResourceQuota
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
return k8sClient.Get(ctx, key, &resourceQuota)
|
||||
@@ -732,30 +492,23 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should delete the ResourceQuota if Quota is deleted", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
var resourceQuota v1.ResourceQuota
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, &resourceQuota)
|
||||
}).
|
||||
@@ -764,14 +517,14 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Should(BeNil())
|
||||
|
||||
policy.Spec.Quota = nil
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the resourceQuota to be deleted
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, &resourceQuota)
|
||||
return apierrors.IsNotFound(err)
|
||||
@@ -781,47 +534,79 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should create resourceQuota if Quota is enabled", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
Limit: &v1.LimitRangeSpec{
|
||||
Limits: []v1.LimitRangeItem{
|
||||
{
|
||||
Type: v1.LimitTypeContainer,
|
||||
DefaultRequest: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("500m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
It("should delete the ResourceQuota if unbound", func() {
|
||||
clusterPolicy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
bindPolicyToNamespace(namespace, clusterPolicy)
|
||||
|
||||
var limitRange v1.LimitRange
|
||||
var resourceQuota v1.ResourceQuota
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace,
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterPolicy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, &limitRange)
|
||||
return k8sClient.Get(ctx, key, &resourceQuota)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
// make sure that default limit range has the default requet values.
|
||||
Expect(limitRange.Spec.Limits).ShouldNot(BeEmpty())
|
||||
cpu := limitRange.Spec.Limits[0].DefaultRequest.Cpu().String()
|
||||
Expect(cpu).To(BeEquivalentTo("500m"))
|
||||
fmt.Printf("%+v\n", resourceQuota)
|
||||
|
||||
delete(namespace.Labels, policy.PolicyNameLabelKey)
|
||||
err := k8sClient.Update(ctx, namespace)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the resourceQuota to be deleted
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterPolicy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, &resourceQuota)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newPolicy(spec v1alpha1.VirtualClusterPolicySpec) *v1alpha1.VirtualClusterPolicy {
|
||||
GinkgoHelper()
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "policy-",
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return policy
|
||||
}
|
||||
|
||||
func bindPolicyToNamespace(namespace *v1.Namespace, pol *v1alpha1.VirtualClusterPolicy) {
|
||||
GinkgoHelper()
|
||||
|
||||
if len(namespace.Labels) == 0 {
|
||||
namespace.Labels = map[string]string{}
|
||||
}
|
||||
|
||||
namespace.Labels[policy.PolicyNameLabelKey] = pol.Name
|
||||
|
||||
err := k8sClient.Update(ctx, namespace)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user