diff --git a/pkg/operators/clustermanager/controller.go b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go similarity index 99% rename from pkg/operators/clustermanager/controller.go rename to pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go index 9ada8a208..291af685e 100644 --- a/pkg/operators/clustermanager/controller.go +++ b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go @@ -1,4 +1,4 @@ -package clustermanager +package clustermanagercontroller import ( "context" @@ -224,7 +224,6 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f }) } - //TODO Check if all the pods are running. // Update status _, _, updatedErr := helpers.UpdateClusterManagerStatus( ctx, n.clusterManagerClient, clusterManager.Name, diff --git a/pkg/operators/clustermanager/controller_test.go b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go similarity index 99% rename from pkg/operators/clustermanager/controller_test.go rename to pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go index 2944a935d..aa4c6a07c 100644 --- a/pkg/operators/clustermanager/controller_test.go +++ b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go @@ -1,4 +1,4 @@ -package clustermanager +package clustermanagercontroller import ( "context" diff --git a/pkg/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go b/pkg/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go new file mode 100644 index 000000000..7d1af60c7 --- /dev/null +++ b/pkg/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go @@ -0,0 +1,109 @@ +package statuscontroller + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + appsinformer "k8s.io/client-go/informers/apps/v1" + appslister "k8s.io/client-go/listers/apps/v1" + "k8s.io/klog" + + operatorv1client "github.com/open-cluster-management/api/client/operator/clientset/versioned/typed/operator/v1" + operatorinformer "github.com/open-cluster-management/api/client/operator/informers/externalversions/operator/v1" + operatorlister "github.com/open-cluster-management/api/client/operator/listers/operator/v1" + operatorapiv1 "github.com/open-cluster-management/api/operator/v1" + "github.com/open-cluster-management/registration-operator/pkg/helpers" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" +) + +const registrationDegraded = "HubRegistrationDegraded" + +type clusterManagerStatusController struct { + deploymentLister appslister.DeploymentLister + clusterManagerClient operatorv1client.ClusterManagerInterface + clusterManagerLister operatorlister.ClusterManagerLister +} + +// NewClusterManagerStatusController creates hub cluster manager status controller +func NewClusterManagerStatusController( + clusterManagerClient operatorv1client.ClusterManagerInterface, + clusterManagerInformer operatorinformer.ClusterManagerInformer, + deploymentInformer appsinformer.DeploymentInformer, + recorder events.Recorder) factory.Controller { + controller := &clusterManagerStatusController{ + deploymentLister: deploymentInformer.Lister(), + clusterManagerClient: clusterManagerClient, + clusterManagerLister: clusterManagerInformer.Lister(), + } + + return factory.New().WithSync(controller.sync). + WithInformersQueueKeyFunc( + helpers.ClusterManagerDeploymentQueueKeyFunc(controller.clusterManagerLister), deploymentInformer.Informer()). + WithInformersQueueKeyFunc(func(obj runtime.Object) string { + accessor, _ := meta.Accessor(obj) + return accessor.GetName() + }, clusterManagerInformer.Informer()). + ToController("ClusterManagerStatusController", recorder) +} + +func (s *clusterManagerStatusController) sync(ctx context.Context, controllerContext factory.SyncContext) error { + clusterManagerName := controllerContext.QueueKey() + if clusterManagerName == "" { + return nil + } + + klog.Infof("Reconciling ClusterManager %q", clusterManagerName) + + clusterManager, err := s.clusterManagerLister.Get(clusterManagerName) + // ClusterManager not found, could have been deleted, do nothing. + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + // Check registration deployment status + registrationDeploymentName := fmt.Sprintf("%s-registration-controller", clusterManager.Name) + registrationDeployment, err := s.deploymentLister.Deployments(helpers.ClusterManagerNamespace).Get(registrationDeploymentName) + if err != nil { + _, _, err := helpers.UpdateClusterManagerStatus(ctx, s.clusterManagerClient, clusterManager.Name, + helpers.UpdateClusterManagerConditionFn(operatorapiv1.StatusCondition{ + Type: registrationDegraded, + Status: metav1.ConditionTrue, + Reason: "GetRegistrationDeploymentFailed", + Message: fmt.Sprintf("Failed to get registration deployment %q %q: %v", helpers.ClusterManagerNamespace, registrationDeploymentName, err), + }), + ) + return err + } + + if unavailablePod := helpers.NumOfUnavailablePod(registrationDeployment); unavailablePod > 0 { + _, _, err := helpers.UpdateClusterManagerStatus(ctx, s.clusterManagerClient, clusterManager.Name, + helpers.UpdateClusterManagerConditionFn(operatorapiv1.StatusCondition{ + Type: registrationDegraded, + Status: metav1.ConditionTrue, + Reason: "UnavailableRegistrationPod", + Message: fmt.Sprintf("%v of requested instances are unavailable of registration deployment %q %q", unavailablePod, helpers.ClusterManagerNamespace, registrationDeploymentName), + }), + ) + return err + } + + _, _, err = helpers.UpdateClusterManagerStatus(ctx, s.clusterManagerClient, clusterManager.Name, + helpers.UpdateClusterManagerConditionFn(operatorapiv1.StatusCondition{ + Type: registrationDegraded, + Status: metav1.ConditionFalse, + Reason: "RegistrationFunctional", + Message: "Registration is managing credentials", + }), + ) + return err +} diff --git a/pkg/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go b/pkg/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go new file mode 100644 index 000000000..7201c2ef2 --- /dev/null +++ b/pkg/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go @@ -0,0 +1,148 @@ +package statuscontroller + +import ( + "context" + "fmt" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubeinformers "k8s.io/client-go/informers" + fakekube "k8s.io/client-go/kubernetes/fake" + clienttesting "k8s.io/client-go/testing" + + fakeoperatorclient "github.com/open-cluster-management/api/client/operator/clientset/versioned/fake" + operatorinformers "github.com/open-cluster-management/api/client/operator/informers/externalversions" + operatorapiv1 "github.com/open-cluster-management/api/operator/v1" + testinghelper "github.com/open-cluster-management/registration-operator/pkg/helpers/testing" +) + +const testClusterManagerName = "testclustermanager" + +func newClusterManager() *operatorapiv1.ClusterManager { + return &operatorapiv1.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: testClusterManagerName, + }, + Spec: operatorapiv1.ClusterManagerSpec{ + RegistrationImagePullSpec: "testregistration", + }, + } +} + +func newDeployment(desiredReplica, availableReplica int32) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-registration-controller", testClusterManagerName), + Namespace: "open-cluster-management-hub", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &desiredReplica, + }, + Status: appsv1.DeploymentStatus{ + AvailableReplicas: availableReplica, + }, + } +} + +func TestSyncStatus(t *testing.T) { + cases := []struct { + name string + queueKey string + clusterManagers []runtime.Object + deployments []runtime.Object + validateActions func(t *testing.T, actions []clienttesting.Action) + }{ + { + name: "empty queue key", + queueKey: "", + clusterManagers: []runtime.Object{}, + deployments: []runtime.Object{}, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + testinghelper.AssertEqualNumber(t, len(actions), 0) + }, + }, + { + name: "no cluster manager", + queueKey: testClusterManagerName, + clusterManagers: []runtime.Object{}, + deployments: []runtime.Object{}, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + testinghelper.AssertEqualNumber(t, len(actions), 0) + }, + }, + { + name: "failed to get registration deployment", + queueKey: testClusterManagerName, + clusterManagers: []runtime.Object{newClusterManager()}, + deployments: []runtime.Object{}, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + testinghelper.AssertEqualNumber(t, len(actions), 2) + testinghelper.AssertGet(t, actions[0], "operator.open-cluster-management.io", "v1", "clustermanagers") + testinghelper.AssertAction(t, actions[1], "update") + expectedCondition := testinghelper.NamedCondition(registrationDegraded, "GetRegistrationDeploymentFailed", metav1.ConditionTrue) + testinghelper.AssertOnlyConditions(t, actions[1].(clienttesting.UpdateActionImpl).Object, expectedCondition) + }, + }, + { + name: "unavailable registration pods", + queueKey: testClusterManagerName, + clusterManagers: []runtime.Object{newClusterManager()}, + deployments: []runtime.Object{newDeployment(3, 0)}, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + testinghelper.AssertEqualNumber(t, len(actions), 2) + testinghelper.AssertGet(t, actions[0], "operator.open-cluster-management.io", "v1", "clustermanagers") + testinghelper.AssertAction(t, actions[1], "update") + expectedCondition := testinghelper.NamedCondition(registrationDegraded, "UnavailableRegistrationPod", metav1.ConditionTrue) + testinghelper.AssertOnlyConditions(t, actions[1].(clienttesting.UpdateActionImpl).Object, expectedCondition) + }, + }, + { + name: "registration functional", + queueKey: testClusterManagerName, + clusterManagers: []runtime.Object{newClusterManager()}, + deployments: []runtime.Object{newDeployment(3, 3)}, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + testinghelper.AssertEqualNumber(t, len(actions), 2) + testinghelper.AssertGet(t, actions[0], "operator.open-cluster-management.io", "v1", "clustermanagers") + testinghelper.AssertAction(t, actions[1], "update") + expectedCondition := testinghelper.NamedCondition(registrationDegraded, "RegistrationFunctional", metav1.ConditionFalse) + testinghelper.AssertOnlyConditions(t, actions[1].(clienttesting.UpdateActionImpl).Object, expectedCondition) + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + fakeKubeClient := fakekube.NewSimpleClientset(c.deployments...) + kubeInformers := kubeinformers.NewSharedInformerFactory(fakeKubeClient, 5*time.Minute) + deployStore := kubeInformers.Apps().V1().Deployments().Informer().GetStore() + for _, deployment := range c.deployments { + deployStore.Add(deployment) + } + + fakeOperatorClient := fakeoperatorclient.NewSimpleClientset(c.clusterManagers...) + operatorInformers := operatorinformers.NewSharedInformerFactory(fakeOperatorClient, 5*time.Minute) + clusterManagerStore := operatorInformers.Operator().V1().ClusterManagers().Informer().GetStore() + for _, clusterManager := range c.clusterManagers { + clusterManagerStore.Add(clusterManager) + } + + controller := &clusterManagerStatusController{ + deploymentLister: kubeInformers.Apps().V1().Deployments().Lister(), + clusterManagerClient: fakeOperatorClient.OperatorV1().ClusterManagers(), + clusterManagerLister: operatorInformers.Operator().V1().ClusterManagers().Lister(), + } + + syncContext := testinghelper.NewFakeSyncContext(t, c.queueKey) + err := controller.sync(context.TODO(), syncContext) + if err != nil { + t.Errorf("Expected no error when update status: %v", err) + } + c.validateActions(t, fakeOperatorClient.Actions()) + }) + } + +} diff --git a/pkg/operators/manager.go b/pkg/operators/manager.go index 836c6ac8e..8e6f99f11 100644 --- a/pkg/operators/manager.go +++ b/pkg/operators/manager.go @@ -14,7 +14,8 @@ import ( operatorclient "github.com/open-cluster-management/api/client/operator/clientset/versioned" operatorinformer "github.com/open-cluster-management/api/client/operator/informers/externalversions" "github.com/open-cluster-management/registration-operator/pkg/helpers" - "github.com/open-cluster-management/registration-operator/pkg/operators/clustermanager" + "github.com/open-cluster-management/registration-operator/pkg/operators/clustermanager/controllers/clustermanagercontroller" + clustermanagerstatuscontroller "github.com/open-cluster-management/registration-operator/pkg/operators/clustermanager/controllers/statuscontroller" "github.com/open-cluster-management/registration-operator/pkg/operators/klusterlet/controllers/klusterletcontroller" "github.com/open-cluster-management/registration-operator/pkg/operators/klusterlet/controllers/statuscontroller" ) @@ -44,7 +45,7 @@ func RunClusterManagerOperator(ctx context.Context, controllerContext *controlle } operatorInformer := operatorinformer.NewSharedInformerFactory(operatorClient, 5*time.Minute) - clusterManagerController := clustermanager.NewClusterManagerController( + clusterManagerController := clustermanagercontroller.NewClusterManagerController( kubeClient, apiExtensionClient, apiRegistrationClient.ApiregistrationV1(), @@ -53,9 +54,16 @@ func RunClusterManagerOperator(ctx context.Context, controllerContext *controlle kubeInformer.Apps().V1().Deployments(), controllerContext.EventRecorder) + statusController := clustermanagerstatuscontroller.NewClusterManagerStatusController( + operatorClient.OperatorV1().ClusterManagers(), + operatorInformer.Operator().V1().ClusterManagers(), + kubeInformer.Apps().V1().Deployments(), + controllerContext.EventRecorder) + go operatorInformer.Start(ctx.Done()) go kubeInformer.Start(ctx.Done()) go clusterManagerController.Run(ctx, 1) + go statusController.Run(ctx, 1) <-ctx.Done() return nil } diff --git a/test/integration/clustermanager_test.go b/test/integration/clustermanager_test.go index 3b4b0777f..7d1d02c01 100644 --- a/test/integration/clustermanager_test.go +++ b/test/integration/clustermanager_test.go @@ -12,7 +12,6 @@ import ( "github.com/openshift/library-go/pkg/controller/controllercmd" - operatorapiv1 "github.com/open-cluster-management/api/operator/v1" "github.com/open-cluster-management/registration-operator/pkg/helpers" "github.com/open-cluster-management/registration-operator/pkg/operators" "github.com/open-cluster-management/registration-operator/test/integration/util" @@ -28,16 +27,7 @@ func startHubOperator(ctx context.Context) { var _ = ginkgo.Describe("ClusterManager", func() { var cancel context.CancelFunc - var err error - var clusterManagerName string - var hubRegistrationClusterRole string - var hubWebhookClusterRole string - var hubRegistrationSA string - var hubWebhookSA string - var hubRegistrationDeployment string - var hubWebhookDeployment string - var webhookSecret string - var validtingWebhook string + var hubRegistrationDeployment = fmt.Sprintf("%s-registration-controller", clusterManagerName) ginkgo.BeforeEach(func() { var ctx context.Context @@ -52,29 +42,7 @@ var _ = ginkgo.Describe("ClusterManager", func() { }) ginkgo.Context("Deploy and clean hub component", func() { - ginkgo.BeforeEach(func() { - clusterManagerName = "hub" - hubRegistrationClusterRole = fmt.Sprintf("system:open-cluster-management:%s-registration-controller", clusterManagerName) - hubWebhookClusterRole = fmt.Sprintf("system:open-cluster-management:%s-registration-webhook", clusterManagerName) - hubRegistrationSA = fmt.Sprintf("%s-registration-controller-sa", clusterManagerName) - hubWebhookSA = fmt.Sprintf("%s-registration-webhook-sa", clusterManagerName) - hubRegistrationDeployment = fmt.Sprintf("%s-registration-controller", clusterManagerName) - hubWebhookDeployment = fmt.Sprintf("%s-registration-webhook", clusterManagerName) - webhookSecret = "webhook-serving-cert" - validtingWebhook = "managedclustervalidators.admission.cluster.open-cluster-management.io" - }) - ginkgo.It("should have expected resource created successfully", func() { - clusterManager := &operatorapiv1.ClusterManager{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterManagerName, - }, - Spec: operatorapiv1.ClusterManagerSpec{ - RegistrationImagePullSpec: "quay.io/open-cluster-management/registration", - }, - } - _, err = operatorClient.OperatorV1().ClusterManagers().Create(context.Background(), clusterManager, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Check namespace gomega.Eventually(func() bool { if _, err := kubeClient.CoreV1().Namespaces().Get(context.Background(), hubNamespace, metav1.GetOptions{}); err != nil { @@ -84,6 +52,8 @@ var _ = ginkgo.Describe("ClusterManager", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // Check clusterrole/clusterrolebinding + hubRegistrationClusterRole := fmt.Sprintf("system:open-cluster-management:%s-registration-controller", clusterManagerName) + hubWebhookClusterRole := fmt.Sprintf("system:open-cluster-management:%s-registration-webhook", clusterManagerName) gomega.Eventually(func() bool { if _, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubRegistrationClusterRole, metav1.GetOptions{}); err != nil { return false @@ -110,6 +80,8 @@ var _ = ginkgo.Describe("ClusterManager", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // Check service account + hubRegistrationSA := fmt.Sprintf("%s-registration-controller-sa", clusterManagerName) + hubWebhookSA := fmt.Sprintf("%s-registration-webhook-sa", clusterManagerName) gomega.Eventually(func() bool { if _, err := kubeClient.CoreV1().ServiceAccounts(hubNamespace).Get(context.Background(), hubRegistrationSA, metav1.GetOptions{}); err != nil { return false @@ -130,6 +102,8 @@ var _ = ginkgo.Describe("ClusterManager", func() { } return true }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + hubWebhookDeployment := fmt.Sprintf("%s-registration-webhook", clusterManagerName) gomega.Eventually(func() bool { if _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubWebhookDeployment, metav1.GetOptions{}); err != nil { return false @@ -146,6 +120,7 @@ var _ = ginkgo.Describe("ClusterManager", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // Check webhook secret + webhookSecret := "webhook-serving-cert" gomega.Eventually(func() bool { s, err := kubeClient.CoreV1().Secrets(hubNamespace).Get(context.Background(), webhookSecret, metav1.GetOptions{}) if err != nil { @@ -158,6 +133,7 @@ var _ = ginkgo.Describe("ClusterManager", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // Check validating webhook + validtingWebhook := "managedclustervalidators.admission.cluster.open-cluster-management.io" gomega.Eventually(func() bool { if _, err := kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(context.Background(), validtingWebhook, metav1.GetOptions{}); err != nil { return false @@ -167,104 +143,130 @@ var _ = ginkgo.Describe("ClusterManager", func() { util.AssertClusterManagerCondition(clusterManagerName, operatorClient, "Applied", "ClusterManagerApplied", metav1.ConditionTrue) }) - }) - ginkgo.It("Deployment should be updated when clustermanager is changed", func() { - gomega.Eventually(func() bool { - if _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil { - return false - } - return true - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + ginkgo.It("Deployment should be updated when clustermanager is changed", func() { + gomega.Eventually(func() bool { + if _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil { + return false + } + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - // Check if generations are correct - gomega.Eventually(func() bool { - actual, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) - if err != nil { - return false - } + // Check if generations are correct + gomega.Eventually(func() bool { + actual, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return false + } - if actual.Generation != actual.Status.ObservedGeneration { - return false - } + if actual.Generation != actual.Status.ObservedGeneration { + return false + } - return true - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - clusterManager.Spec.RegistrationImagePullSpec = "testimage:latest" - _, err = operatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + clusterManager.Spec.RegistrationImagePullSpec = "testimage:latest" + _, err = operatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Eventually(func() bool { - actual, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}) - if err != nil { - return false - } - gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1)) - if actual.Spec.Template.Spec.Containers[0].Image != "testimage:latest" { - return false - } - return true - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + gomega.Eventually(func() bool { + actual, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}) + if err != nil { + return false + } + gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1)) + if actual.Spec.Template.Spec.Containers[0].Image != "testimage:latest" { + return false + } + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - // Check if generations are correct - gomega.Eventually(func() bool { - actual, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) - if err != nil { - return false - } + // Check if generations are correct + gomega.Eventually(func() bool { + actual, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return false + } - if actual.Generation != actual.Status.ObservedGeneration { - return false - } + if actual.Generation != actual.Status.ObservedGeneration { + return false + } - return true - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - }) + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + }) - ginkgo.It("Deployment should be reconciled when manually updated", func() { - gomega.Eventually(func() bool { - if _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil { - return false - } - return true - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - registrationoDeployment, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - registrationoDeployment.Spec.Template.Spec.Containers[0].Image = "testimage2:latest" - _, err = kubeClient.AppsV1().Deployments(hubNamespace).Update(context.Background(), registrationoDeployment, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Eventually(func() bool { + ginkgo.It("Deployment should be reconciled when manually updated", func() { + gomega.Eventually(func() bool { + if _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil { + return false + } + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) registrationoDeployment, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}) - if err != nil { - return false - } - if registrationoDeployment.Spec.Template.Spec.Containers[0].Image != "testimage:latest" { - return false - } - return true - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + registrationoDeployment.Spec.Template.Spec.Containers[0].Image = "testimage2:latest" + _, err = kubeClient.AppsV1().Deployments(hubNamespace).Update(context.Background(), registrationoDeployment, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Eventually(func() bool { + registrationoDeployment, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}) + if err != nil { + return false + } + if registrationoDeployment.Spec.Template.Spec.Containers[0].Image != "testimage:latest" { + return false + } + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - // Check if generations are correct - gomega.Eventually(func() bool { - actual, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) - if err != nil { - return false - } + // Check if generations are correct + gomega.Eventually(func() bool { + actual, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return false + } + registrationDeployment, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}) + if err != nil { + return false + } + + deploymentGeneration := helpers.NewGenerationStatus(appsv1.SchemeGroupVersion.WithResource("deployments"), registrationDeployment) + actualGeneration := helpers.FindGenerationStatus(actual.Status.Generations, deploymentGeneration) + if deploymentGeneration.LastGeneration != actualGeneration.LastGeneration { + return false + } + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + }) + }) + + ginkgo.Context("Cluster manager statuses", func() { + ginkgo.It("should have correct degraded conditions", func() { + gomega.Eventually(func() bool { + if _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil { + return false + } + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // The cluster manager should be unavailable at first + util.AssertClusterManagerCondition(clusterManagerName, operatorClient, "HubRegistrationDegraded", "UnavailableRegistrationPod", metav1.ConditionTrue) + + // Update replica of deployment registrationDeployment, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}) - if err != nil { - return false - } + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + registrationDeployment.Status.AvailableReplicas = 3 + registrationDeployment.Status.Replicas = 3 + registrationDeployment.Status.ReadyReplicas = 3 + _, err = kubeClient.AppsV1().Deployments(hubNamespace).UpdateStatus(context.Background(), registrationDeployment, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) - deploymentGeneration := helpers.NewGenerationStatus(appsv1.SchemeGroupVersion.WithResource("deployments"), registrationDeployment) - actualGeneration := helpers.FindGenerationStatus(actual.Status.Generations, deploymentGeneration) - if deploymentGeneration.LastGeneration != actualGeneration.LastGeneration { - return false - } - return true - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + // The cluster manager should be functional at last + util.AssertClusterManagerCondition(clusterManagerName, operatorClient, "HubRegistrationDegraded", "RegistrationFunctional", metav1.ConditionFalse) + }) }) }) diff --git a/test/integration/doc.go b/test/integration/doc.go index 8bc209f04..c44c8a07b 100644 --- a/test/integration/doc.go +++ b/test/integration/doc.go @@ -1,4 +1,4 @@ // Package integration provides integration tests for open-cluster-management registration-operator, the test cases include -// - TODO deploy/update/remove the cluster manager -// - TODO deploy/update/remove the klusterlet +// - deploy/update/remove the cluster manager +// - deploy/update/remove the klusterlet package integration diff --git a/test/integration/integration_suite_test.go b/test/integration/integration_suite_test.go index 5c6d42a75..2dc3c3165 100644 --- a/test/integration/integration_suite_test.go +++ b/test/integration/integration_suite_test.go @@ -1,11 +1,13 @@ package integration import ( + "context" "path/filepath" "testing" "github.com/onsi/ginkgo" "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -16,6 +18,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" operatorclient "github.com/open-cluster-management/api/client/operator/clientset/versioned" + operatorapiv1 "github.com/open-cluster-management/api/operator/v1" ) func TestIntegration(t *testing.T) { @@ -28,6 +31,7 @@ const ( eventuallyInterval = 1 // seconds hubNamespace = "open-cluster-management-hub" spokeNamespace = "open-cluster-management-agent" + clusterManagerName = "hub" ) var testEnv *envtest.Environment @@ -65,6 +69,18 @@ var _ = ginkgo.BeforeSuite(func(done ginkgo.Done) { gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(kubeClient).ToNot(gomega.BeNil()) + // prepare a ClusterManager + clusterManager := &operatorapiv1.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterManagerName, + }, + Spec: operatorapiv1.ClusterManagerSpec{ + RegistrationImagePullSpec: "quay.io/open-cluster-management/registration", + }, + } + _, err = operatorClient.OperatorV1().ClusterManagers().Create(context.Background(), clusterManager, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + restConfig = cfg close(done) diff --git a/test/integration/util/assertion.go b/test/integration/util/assertion.go index 2de1ed85f..918ead78a 100644 --- a/test/integration/util/assertion.go +++ b/test/integration/util/assertion.go @@ -31,12 +31,12 @@ func AssertKlusterletCondition( func AssertClusterManagerCondition( name string, operatorClient operatorclientset.Interface, expectedType, expectedReason string, expectedWorkStatus metav1.ConditionStatus) { gomega.Eventually(func() bool { - klusterlet, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), name, metav1.GetOptions{}) + clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), name, metav1.GetOptions{}) if err != nil { return false } // check work status condition - return HasCondition(klusterlet.Status.Conditions, expectedType, expectedReason, expectedWorkStatus) + return HasCondition(clusterManager.Status.Conditions, expectedType, expectedReason, expectedWorkStatus) }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) }