mirror of
https://github.com/rancher/k3k.git
synced 2026-02-17 19:40:17 +00:00
Compare commits
3 Commits
chart-1.0.
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9836f8376d | ||
|
|
dba054786e | ||
|
|
c94f7c7a30 |
@@ -935,6 +935,29 @@ spec:
|
||||
- Terminating
|
||||
- Unknown
|
||||
type: string
|
||||
policy:
|
||||
description: |-
|
||||
policy represents the status of the policy applied to this cluster.
|
||||
This field is set by the VirtualClusterPolicy controller.
|
||||
properties:
|
||||
name:
|
||||
description: name is the name of the VirtualClusterPolicy currently
|
||||
applied to this cluster.
|
||||
minLength: 1
|
||||
type: string
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: nodeSelector is a node selector enforced by the active
|
||||
VirtualClusterPolicy.
|
||||
type: object
|
||||
priorityClass:
|
||||
description: priorityClass is the priority class enforced by the
|
||||
active VirtualClusterPolicy.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
policyName:
|
||||
description: PolicyName specifies the virtual cluster policy name
|
||||
bound to the virtual cluster.
|
||||
|
||||
@@ -41,6 +41,29 @@ _Appears In:_
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy"]
|
||||
=== AppliedPolicy
|
||||
|
||||
|
||||
|
||||
AppliedPolicy defines the observed state of an applied policy.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus[$$ClusterStatus$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`name`* __string__ | name is the name of the VirtualClusterPolicy currently applied to this cluster. + | | MinLength: 1 +
|
||||
|
||||
| *`priorityClass`* __string__ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. + | |
|
||||
| *`nodeSelector`* __object (keys:string, values:string)__ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster"]
|
||||
=== Cluster
|
||||
|
||||
|
||||
@@ -32,6 +32,24 @@ _Appears in:_
|
||||
| `secretRef` _string_ | SecretRef is the name of the Secret. | | |
|
||||
|
||||
|
||||
#### AppliedPolicy
|
||||
|
||||
|
||||
|
||||
AppliedPolicy defines the observed state of an applied policy.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterStatus](#clusterstatus)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `name` _string_ | name is the name of the VirtualClusterPolicy currently applied to this cluster. | | MinLength: 1 <br /> |
|
||||
| `priorityClass` _string_ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. | | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. | | |
|
||||
|
||||
|
||||
#### Cluster
|
||||
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
|
||||
|
||||
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
syncedIngress := r.ingress(&virtIngress)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Reque
|
||||
|
||||
hostPriorityClass := r.translatePriorityClass(priorityClass)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
|
||||
|
||||
syncedSecret := s.translateSecret(&virtualSecret)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
syncedService := r.service(&virtService)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -401,28 +401,45 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
// Schedule the host pod in the same host node of the virtual kubelet
|
||||
hostPod.Spec.NodeName = p.agentHostname
|
||||
|
||||
// The pod's own nodeSelector is ignored.
|
||||
// The final selector is determined by the cluster spec, but overridden by a policy if present.
|
||||
hostPod.Spec.NodeSelector = cluster.Spec.NodeSelector
|
||||
if cluster.Status.Policy != nil && len(cluster.Status.Policy.NodeSelector) > 0 {
|
||||
hostPod.Spec.NodeSelector = cluster.Status.Policy.NodeSelector
|
||||
}
|
||||
|
||||
// setting the hostname for the pod if its not set
|
||||
if virtualPod.Spec.Hostname == "" {
|
||||
hostPod.Spec.Hostname = k3kcontroller.SafeConcatName(virtualPod.Name)
|
||||
}
|
||||
|
||||
// if the priorityClass for the virtual cluster is set then override the provided value
|
||||
// When a PriorityClass is set we will use the translated one in the HostCluster.
|
||||
// If the Cluster or a Policy defines a PriorityClass of the host we are going to use that one.
|
||||
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
|
||||
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
|
||||
if !strings.HasPrefix(hostPod.Spec.PriorityClassName, "system-") {
|
||||
if hostPod.Spec.PriorityClassName != "" {
|
||||
tPriorityClassName := p.Translator.TranslateName("", hostPod.Spec.PriorityClassName)
|
||||
hostPod.Spec.PriorityClassName = tPriorityClassName
|
||||
//
|
||||
// TODO: we probably need to define a custom "intermediate" k3k-system-* priority
|
||||
if strings.HasPrefix(virtualPod.Spec.PriorityClassName, "system-") {
|
||||
hostPod.Spec.PriorityClassName = virtualPod.Spec.PriorityClassName
|
||||
} else {
|
||||
enforcedPriorityClassName := cluster.Spec.PriorityClass
|
||||
if cluster.Status.Policy != nil && cluster.Status.Policy.PriorityClass != nil {
|
||||
enforcedPriorityClassName = *cluster.Status.Policy.PriorityClass
|
||||
}
|
||||
|
||||
if cluster.Spec.PriorityClass != "" {
|
||||
hostPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
if enforcedPriorityClassName != "" {
|
||||
hostPod.Spec.PriorityClassName = enforcedPriorityClassName
|
||||
} else if virtualPod.Spec.PriorityClassName != "" {
|
||||
hostPod.Spec.PriorityClassName = p.Translator.TranslateName("", virtualPod.Spec.PriorityClassName)
|
||||
hostPod.Spec.Priority = nil
|
||||
}
|
||||
}
|
||||
|
||||
// if the priority class is set we need to remove the priority
|
||||
if hostPod.Spec.PriorityClassName != "" {
|
||||
hostPod.Spec.Priority = nil
|
||||
}
|
||||
|
||||
p.configurePodEnvs(hostPod, &virtualPod)
|
||||
|
||||
// fieldpath annotations
|
||||
|
||||
@@ -538,6 +538,12 @@ type ClusterStatus struct {
|
||||
// +optional
|
||||
PolicyName string `json:"policyName,omitempty"`
|
||||
|
||||
// policy represents the status of the policy applied to this cluster.
|
||||
// This field is set by the VirtualClusterPolicy controller.
|
||||
//
|
||||
// +optional
|
||||
Policy *AppliedPolicy `json:"policy,omitempty"`
|
||||
|
||||
// KubeletPort specefies the port used by k3k-kubelet in shared mode.
|
||||
//
|
||||
// +optional
|
||||
@@ -561,6 +567,25 @@ type ClusterStatus struct {
|
||||
Phase ClusterPhase `json:"phase,omitempty"`
|
||||
}
|
||||
|
||||
// AppliedPolicy defines the observed state of an applied policy.
|
||||
type AppliedPolicy struct {
|
||||
// name is the name of the VirtualClusterPolicy currently applied to this cluster.
|
||||
//
|
||||
// +kubebuilder:validation:MinLength:=1
|
||||
// +required
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// priorityClass is the priority class enforced by the active VirtualClusterPolicy.
|
||||
//
|
||||
// +optional
|
||||
PriorityClass *string `json:"priorityClass,omitempty"`
|
||||
|
||||
// nodeSelector is a node selector enforced by the active VirtualClusterPolicy.
|
||||
//
|
||||
// +optional
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterPhase is a high-level summary of the cluster's current lifecycle state.
|
||||
type ClusterPhase string
|
||||
|
||||
|
||||
@@ -25,6 +25,33 @@ func (in *Addon) DeepCopy() *Addon {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AppliedPolicy) DeepCopyInto(out *AppliedPolicy) {
|
||||
*out = *in
|
||||
if in.PriorityClass != nil {
|
||||
in, out := &in.PriorityClass, &out.PriorityClass
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedPolicy.
|
||||
func (in *AppliedPolicy) DeepCopy() *AppliedPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AppliedPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Cluster) DeepCopyInto(out *Cluster) {
|
||||
*out = *in
|
||||
@@ -200,6 +227,11 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Policy != nil {
|
||||
in, out := &in.Policy, &out.Policy
|
||||
*out = new(AppliedPolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
|
||||
@@ -2,13 +2,17 @@ package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -52,15 +56,36 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
selector := labels.NewSelector()
|
||||
currentPolicyName := ns.Labels[PolicyNameLabelKey]
|
||||
|
||||
if req, err := labels.NewRequirement(ManagedByLabelKey, selection.Equals, []string{VirtualPolicyControllerName}); err == nil {
|
||||
selector = selector.Add(*req)
|
||||
}
|
||||
// This will match all the resources managed by the K3k Policy controller
|
||||
// that have the app.kubernetes.io/managed-by=k3k-policy-controller label
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
ManagedByLabelKey: VirtualPolicyControllerName,
|
||||
})
|
||||
|
||||
// if the namespace is bound to a policy -> cleanup resources of other policies
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
requirement, err := labels.NewRequirement(PolicyNameLabelKey, selection.NotEquals, []string{ns.Labels[PolicyNameLabelKey]})
|
||||
// If the namespace is not bound to any policy, or if the policy it was bound to no longer exists,
|
||||
// we need to clear policy-related fields on its Cluster objects.
|
||||
if currentPolicyName == "" {
|
||||
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
|
||||
log.Error(err, "error clearing policy fields for clusters in unbound namespace", "namespace", ns.Name)
|
||||
}
|
||||
} else {
|
||||
var policy v1beta1.VirtualClusterPolicy
|
||||
if err := c.Client.Get(ctx, types.NamespacedName{Name: currentPolicyName}, &policy); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
|
||||
log.Error(err, "error clearing policy fields for clusters in namespace with non-existent policy", "namespace", ns.Name, "policy", currentPolicyName)
|
||||
}
|
||||
} else {
|
||||
log.Error(err, "error getting policy for namespace", "namespace", ns.Name, "policy", currentPolicyName)
|
||||
}
|
||||
}
|
||||
|
||||
// if the namespace is bound to a policy -> cleanup resources of other policies
|
||||
requirement, err := labels.NewRequirement(
|
||||
PolicyNameLabelKey, selection.NotEquals, []string{currentPolicyName},
|
||||
)
|
||||
|
||||
// log the error but continue cleaning up the other namespaces
|
||||
if err != nil {
|
||||
@@ -90,3 +115,30 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearPolicyFieldsForClustersInNamespace sets the policy status on Cluster objects in the given namespace to nil.
|
||||
func (c *VirtualClusterPolicyReconciler) clearPolicyFieldsForClustersInNamespace(ctx context.Context, namespace string) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
var clusters v1beta1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace)); err != nil {
|
||||
return fmt.Errorf("failed listing clusters in namespace %s: %w", namespace, err)
|
||||
}
|
||||
|
||||
var updateErrs []error
|
||||
|
||||
for i := range clusters.Items {
|
||||
cluster := clusters.Items[i]
|
||||
if cluster.Status.Policy != nil {
|
||||
log.V(1).Info("Clearing policy status for Cluster", "cluster", cluster.Name, "namespace", namespace)
|
||||
cluster.Status.Policy = nil
|
||||
|
||||
if updateErr := c.Client.Status().Update(ctx, &cluster); updateErr != nil {
|
||||
updateErr = fmt.Errorf("failed updating Status for Cluster %s: %w", cluster.Name, updateErr)
|
||||
updateErrs = append(updateErrs, updateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(updateErrs...)
|
||||
}
|
||||
|
||||
@@ -470,16 +470,21 @@ func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context,
|
||||
var clusterUpdateErrs []error
|
||||
|
||||
for _, cluster := range clusters.Items {
|
||||
orig := cluster.DeepCopy()
|
||||
origStatus := cluster.Status.DeepCopy()
|
||||
|
||||
cluster.Spec.PriorityClass = policy.Spec.DefaultPriorityClass
|
||||
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
|
||||
cluster.Status.Policy = &v1beta1.AppliedPolicy{
|
||||
Name: policy.Name,
|
||||
PriorityClass: &policy.Spec.DefaultPriorityClass,
|
||||
NodeSelector: policy.Spec.DefaultNodeSelector,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(orig, cluster) {
|
||||
if !reflect.DeepEqual(origStatus, &cluster.Status) {
|
||||
log.V(1).Info("Updating Cluster", "cluster", cluster.Name, "namespace", namespace.Name)
|
||||
|
||||
// continue updating also the other clusters even if an error occurred
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, c.Client.Update(ctx, &cluster))
|
||||
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package policy_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -307,7 +306,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
})
|
||||
|
||||
It("should update Cluster's PriorityClass", func() {
|
||||
It("updates the Cluster's policy status with the DefaultPriorityClass", func() {
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
})
|
||||
@@ -329,19 +328,22 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return cluster.Spec.PriorityClass == policy.Spec.DefaultPriorityClass
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
g.Expect(cluster.Spec.PriorityClass).To(BeEmpty())
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.PriorityClass).To(Not(BeNil()))
|
||||
g.Expect(*cluster.Status.Policy.PriorityClass).To(Equal(policy.Spec.DefaultPriorityClass))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should update Cluster's NodeSelector", func() {
|
||||
It("updates the Cluster's policy status with the DefaultNodeSelector", func() {
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
@@ -366,18 +368,21 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
|
||||
g.Expect(cluster.Spec.NodeSelector).To(BeEmpty())
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should update the nodeSelector if changed", func() {
|
||||
It("updates the Cluster's policy status when the VCP nodeSelector changes", func() {
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
@@ -399,43 +404,56 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(cluster.Spec.NodeSelector).To(Equal(policy.Spec.DefaultNodeSelector))
|
||||
// Cluster Spec should not change, VCP NodeSelector should be present in the Status
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
|
||||
// update the VirtualClusterPolicy
|
||||
policy.Spec.DefaultNodeSelector["label-2"] = "value-2"
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.NodeSelector).To(Not(Equal(policy.Spec.DefaultNodeSelector)))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
|
||||
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1", "label-2": "value-2"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
|
||||
// Update the Cluster
|
||||
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
cluster.Spec.NodeSelector["label-3"] = "value-3"
|
||||
err = k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.NodeSelector).To(Not(Equal(policy.Spec.DefaultNodeSelector)))
|
||||
|
||||
// wait a bit and check it's restored
|
||||
Eventually(func() bool {
|
||||
var updatedCluster v1beta1.Cluster
|
||||
|
||||
Consistently(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, &updatedCluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(updatedCluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1", "label-3": "value-3"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should create a ResourceQuota if Quota is enabled", func() {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
schedv1 "k8s.io/api/scheduling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -99,6 +100,100 @@ var _ = When("a cluster's status is tracked", Label(e2eTestLabel), Label(statusT
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("created with field controlled from a policy", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pc-",
|
||||
},
|
||||
Value: 100,
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, priorityClass)).To(Succeed())
|
||||
|
||||
clusterObj := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "status-cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
PriorityClass: priorityClass.Name,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed())
|
||||
|
||||
DeferCleanup(func() {
|
||||
Expect(k8sClient.Delete(ctx, priorityClass)).To(Succeed())
|
||||
})
|
||||
|
||||
clusterKey := client.ObjectKeyFromObject(clusterObj)
|
||||
|
||||
// Check for the initial status to be set
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterProvisioning))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionFalse))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioning))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Second * 20).
|
||||
Should(Succeed())
|
||||
|
||||
// Check for the status to be updated to Ready
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterReady))
|
||||
g.Expect(clusterObj.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(clusterObj.Status.Policy.Name).To(Equal(vcp.Name))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionTrue))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioned))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
|
||||
// update policy
|
||||
|
||||
priorityClassVCP := &schedv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pc-",
|
||||
},
|
||||
Value: 100,
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, priorityClassVCP)).To(Succeed())
|
||||
|
||||
DeferCleanup(func() {
|
||||
Expect(k8sClient.Delete(ctx, priorityClassVCP)).To(Succeed())
|
||||
})
|
||||
|
||||
vcp.Spec.DefaultPriorityClass = priorityClassVCP.Name
|
||||
Expect(k8sClient.Update(ctx, vcp)).To(Succeed())
|
||||
|
||||
// Check for the status to be updated to Ready
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(clusterObj.Status.Policy.PriorityClass).To(Not(BeNil()))
|
||||
g.Expect(*clusterObj.Status.Policy.PriorityClass).To(Equal(priorityClassVCP.Name))
|
||||
g.Expect(clusterObj.Spec.PriorityClass).To(Equal(priorityClass.Name))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
Context("and the cluster has validation errors", func() {
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
@@ -138,11 +139,7 @@ func initKubernetesClient(ctx context.Context) {
|
||||
func buildScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
err := v1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = appsv1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = networkingv1.AddToScheme(scheme)
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Reference in New Issue
Block a user