mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-05-06 01:07:03 +00:00
only set work-agent replica 0 when hub-kubeconfig-secret is missing (#213)
Signed-off-by: Zhiwei Yin <zyin@redhat.com>
This commit is contained in:
@@ -44,6 +44,7 @@ const (
|
||||
klusterletApplied = "Applied"
|
||||
klusterletReadyToApply = "ReadyToApply"
|
||||
hubConnectionDegraded = "HubConnectionDegraded"
|
||||
hubKubeConfigSecretMissing = "HubKubeConfigSecretMissing"
|
||||
appliedManifestWorkFinalizer = "cluster.open-cluster-management.io/applied-manifest-work-cleanup"
|
||||
)
|
||||
|
||||
@@ -338,13 +339,20 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Deploy work agent
|
||||
// scale up the work agent deployment when the hubConnectionDegraded condition is False.
|
||||
// because the work agent deployment has a dependency of hub-kubeconfig-secret secret.
|
||||
// Deploy work agent.
|
||||
// * work agent is scaled to 0 only when degrade is true with the reason is HubKubeConfigSecretMissing.
|
||||
// It is to ensure a fast startup of work agent when the klusterlet is bootstrapped at the first time.
|
||||
// * The work agent should not be scaled to 0 in degraded condition with other reasons,
|
||||
// because we still need work agent running even though the hub kubconfig is missing some certain permission.
|
||||
// It can ensure work agent to clean up the resources defined in manifestworks when cluster is detaching from the hub.
|
||||
workConfig := config
|
||||
if !meta.IsStatusConditionFalse(klusterlet.Status.Conditions, hubConnectionDegraded) {
|
||||
hubConnectionDegradedCondition := meta.FindStatusCondition(klusterlet.Status.Conditions, hubConnectionDegraded)
|
||||
if hubConnectionDegradedCondition == nil {
|
||||
workConfig.Replica = 0
|
||||
} else if hubConnectionDegradedCondition.Status == metav1.ConditionTrue && strings.Contains(hubConnectionDegradedCondition.Reason, hubKubeConfigSecretMissing) {
|
||||
workConfig.Replica = 0
|
||||
}
|
||||
|
||||
statuses, workGeneration, err := n.applyDeployment(ctx, klusterlet, &workConfig, "klusterlet/management/klusterlet-work-deployment.yaml", controllerContext.Recorder())
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -433,7 +433,9 @@ var _ = ginkgo.Describe("Klusterlet", func() {
|
||||
return false
|
||||
}
|
||||
gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1))
|
||||
gomega.Expect(len(actual.Spec.Template.Spec.Containers[0].Args)).Should(gomega.Equal(4))
|
||||
// klusterlet has no condition, replica is 0
|
||||
gomega.Expect(actual.Status.Replicas).Should(gomega.Equal(int32(0)))
|
||||
gomega.Expect(len(actual.Spec.Template.Spec.Containers[0].Args)).Should(gomega.Equal(5))
|
||||
if actual.Spec.Template.Spec.Containers[0].Args[2] != "--spoke-cluster-name=cluster2" {
|
||||
return false
|
||||
}
|
||||
@@ -597,7 +599,7 @@ var _ = ginkgo.Describe("Klusterlet", func() {
|
||||
|
||||
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "HubConnectionDegraded", "BootstrapSecretMissing,HubKubeConfigMissing", metav1.ConditionTrue)
|
||||
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "RegistrationDesiredDegraded", "UnavailablePods", metav1.ConditionTrue)
|
||||
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "WorkDesiredDegraded", "DeploymentsFunctional", metav1.ConditionFalse)
|
||||
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "WorkDesiredDegraded", "UnavailablePods", metav1.ConditionTrue)
|
||||
|
||||
// Create a bootstrap secret and make sure the kubeconfig can work
|
||||
bootStrapSecret := &corev1.Secret{
|
||||
@@ -614,7 +616,7 @@ var _ = ginkgo.Describe("Klusterlet", func() {
|
||||
|
||||
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigMissing", metav1.ConditionTrue)
|
||||
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "RegistrationDesiredDegraded", "UnavailablePods", metav1.ConditionTrue)
|
||||
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "WorkDesiredDegraded", "DeploymentsFunctional", metav1.ConditionFalse)
|
||||
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "WorkDesiredDegraded", "UnavailablePods", metav1.ConditionTrue)
|
||||
|
||||
hubSecret, err := kubeClient.CoreV1().Secrets(klusterletNamespace).Get(context.Background(), helpers.HubKubeConfig, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
Reference in New Issue
Block a user