From b56812384944fffa910cf9b797f67b24eb2ae77e Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Fri, 20 Dec 2024 09:12:10 +0800 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8=20Start=20import=20in=20registration?= =?UTF-8?q?=20by=20feature=20gate.=20(#775)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update clustermanager operator to support capi Signed-off-by: Jian Qiu * Update operator to start importer Signed-off-by: Jian Qiu --------- Signed-off-by: Jian Qiu --- .../templates/cluster_role.yaml | 3 + .../cluster-manager/templates/operator.yaml | 2 + .../config/operator/operator.yaml | 2 + .../config/rbac/cluster_role.yaml | 3 + ...cluster-manager.clusterserviceversion.yaml | 12 +- .../klusterlet.clusterserviceversion.yaml | 2 +- ...ster-manager-registration-clusterrole.yaml | 17 ++ ...uster-manager-registration-deployment.yaml | 3 + manifests/config.go | 2 + pkg/operator/helpers/chart/config.go | 16 ++ .../clustermanager_controller.go | 7 + .../clustermanager_hub_reconcile.go | 18 -- .../hub/importer/options/options.go | 4 + pkg/registration/hub/importer/renderers.go | 35 ++++ .../hub/importer/renderers_test.go | 88 ++++++++ pkg/registration/hub/manager.go | 2 + .../operator/clustermanager_hosted_test.go | 137 ------------- .../operator/clustermanager_test.go | 190 +++++++++++++----- 18 files changed, 334 insertions(+), 209 deletions(-) diff --git a/deploy/cluster-manager/chart/cluster-manager/templates/cluster_role.yaml b/deploy/cluster-manager/chart/cluster-manager/templates/cluster_role.yaml index 7271c81bd..e1a2fc149 100644 --- a/deploy/cluster-manager/chart/cluster-manager/templates/cluster_role.yaml +++ b/deploy/cluster-manager/chart/cluster-manager/templates/cluster_role.yaml @@ -150,3 +150,6 @@ rules: - apiGroups: ["multicluster.x-k8s.io"] resources: ["clusterprofiles/status"] verbs: ["update", "patch"] +- apiGroups: [ "cluster.x-k8s.io" ] + resources: [ "clusters" ] + verbs: ["get", "list", "watch"] diff --git a/deploy/cluster-manager/chart/cluster-manager/templates/operator.yaml b/deploy/cluster-manager/chart/cluster-manager/templates/operator.yaml index 5d6e55de9..bee6aa7b1 100644 --- a/deploy/cluster-manager/chart/cluster-manager/templates/operator.yaml +++ b/deploy/cluster-manager/chart/cluster-manager/templates/operator.yaml @@ -42,6 +42,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: AGENT_IMAGE + value: {{ template "operatorImage" . }} securityContext: {{- toYaml .Values.securityContext | nindent 10 }} livenessProbe: diff --git a/deploy/cluster-manager/config/operator/operator.yaml b/deploy/cluster-manager/config/operator/operator.yaml index 911d2d853..6bc53929b 100644 --- a/deploy/cluster-manager/config/operator/operator.yaml +++ b/deploy/cluster-manager/config/operator/operator.yaml @@ -53,6 +53,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: AGENT_IMAGE + value: quay.io/open-cluster-management/registration-operator:latest securityContext: allowPrivilegeEscalation: false capabilities: diff --git a/deploy/cluster-manager/config/rbac/cluster_role.yaml b/deploy/cluster-manager/config/rbac/cluster_role.yaml index a902a7cb2..e1ba32d76 100644 --- a/deploy/cluster-manager/config/rbac/cluster_role.yaml +++ b/deploy/cluster-manager/config/rbac/cluster_role.yaml @@ -152,3 +152,6 @@ rules: - apiGroups: ["multicluster.x-k8s.io"] resources: ["clusterprofiles/status"] verbs: ["update", "patch"] +- apiGroups: [ "cluster.x-k8s.io" ] + resources: [ "clusters" ] + verbs: ["get", "list", "watch"] diff --git a/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml b/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml index 2a2392dd5..4c02b6172 100644 --- a/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml +++ b/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml @@ -59,7 +59,7 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2024-12-02T08:08:47Z" + createdAt: "2024-12-18T07:51:41Z" description: Manages the installation and upgrade of the ClusterManager. operators.operatorframework.io/builder: operator-sdk-v1.32.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 @@ -525,6 +525,14 @@ spec: verbs: - update - patch + - apiGroups: + - cluster.x-k8s.io + resources: + - clusters + verbs: + - get + - list + - watch serviceAccountName: cluster-manager deployments: - label: @@ -571,6 +579,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: AGENT_IMAGE + value: quay.io/open-cluster-management/registration-operator:latest image: quay.io/open-cluster-management/registration-operator:latest imagePullPolicy: IfNotPresent livenessProbe: diff --git a/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml b/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml index 529cbbea1..e92b9f1cc 100644 --- a/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml +++ b/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml @@ -31,7 +31,7 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2024-12-02T08:08:47Z" + createdAt: "2024-12-18T07:51:42Z" description: Manages the installation and upgrade of the Klusterlet. operators.operatorframework.io/builder: operator-sdk-v1.32.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 diff --git a/manifests/cluster-manager/hub/cluster-manager-registration-clusterrole.yaml b/manifests/cluster-manager/hub/cluster-manager-registration-clusterrole.yaml index 283beab69..5c3e930fb 100644 --- a/manifests/cluster-manager/hub/cluster-manager-registration-clusterrole.yaml +++ b/manifests/cluster-manager/hub/cluster-manager-registration-clusterrole.yaml @@ -100,3 +100,20 @@ rules: - apiGroups: ["register.open-cluster-management.io"] resources: ["managedclusters/accept"] verbs: ["update"] +{{if .ClusterImporterEnabled}} +- apiGroups: ["cluster.x-k8s.io"] + resources: ["clusters"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] +{{end}} +{{if .ClusterProfileEnabled}} +# Allow hub to manage clusterprofile +- apiGroups: ["multicluster.x-k8s.io"] + resources: ["clusterprofiles"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: ["multicluster.x-k8s.io"] + resources: ["clusterprofiles/status"] + verbs: ["update", "patch"] +{{end}} diff --git a/manifests/cluster-manager/management/cluster-manager-registration-deployment.yaml b/manifests/cluster-manager/management/cluster-manager-registration-deployment.yaml index 6c64d99cb..0dd8b8d0e 100644 --- a/manifests/cluster-manager/management/cluster-manager-registration-deployment.yaml +++ b/manifests/cluster-manager/management/cluster-manager-registration-deployment.yaml @@ -57,6 +57,9 @@ spec: {{if .AutoApproveUsers}} - "--cluster-auto-approval-users={{ .AutoApproveUsers }}" {{end}} + {{if .ClusterImporterEnabled}} + - "--agent-image={{ .AgentImage }}" + {{end}} {{ if .HostedMode }} - "--kubeconfig=/var/run/secrets/hub/kubeconfig" {{ end }} diff --git a/manifests/config.go b/manifests/config.go index 023e24e79..d62aca81b 100644 --- a/manifests/config.go +++ b/manifests/config.go @@ -20,7 +20,9 @@ type HubConfig struct { AddOnManagerEnabled bool MWReplicaSetEnabled bool ClusterProfileEnabled bool + AgentImage string CloudEventsDriverEnabled bool + ClusterImporterEnabled bool WorkDriver string AutoApproveUsers string ImagePullSecret string diff --git a/pkg/operator/helpers/chart/config.go b/pkg/operator/helpers/chart/config.go index 96faf1233..fd7721ad7 100644 --- a/pkg/operator/helpers/chart/config.go +++ b/pkg/operator/helpers/chart/config.go @@ -81,6 +81,9 @@ type ImagesConfig struct { // The image pull secret name is open-cluster-management-image-pull-credentials. // Please set the userName and password if you use a private image registry. ImageCredentials ImageCredentials `json:"imageCredentials,omitempty"` + // Overrides is to override the image of the component, if this is specified, + // the registry and tag will be ignored. + Overrides Overrides `json:"overrides,omitempty"` } type ImageCredentials struct { @@ -90,6 +93,19 @@ type ImageCredentials struct { DockerConfigJson string `json:"dockerConfigJson,omitempty"` } +type Overrides struct { + // RegistrationImage is the image of the registration component. + RegistrationImage string `json:"registrationImage,omitempty"` + // WorkImage is the image of the work component. + WorkImage string `json:"workImage,omitempty"` + // OperatorImage is the image of the operator component. + OperatorImage string `json:"operatorImage,omitempty"` + // PlacementImage is the image of the placement component + PlacementImage string `json:"placementImage,omitempty"` + // AddOnManagerImage is the image of the addOnManager component + AddOnManagerImage string `json:"addOnManagerImage,omitempty"` +} + type ClusterManagerConfig struct { // Create determines if create the clusterManager CR, default is true. Create bool `json:"create,omitempty"` diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go index 80a9e32a7..32af09449 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" errorhelpers "errors" + "os" "strings" "time" @@ -184,6 +185,12 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration", registrationFeatureGates, ocmfeature.DefaultHubRegistrationFeatureGates) config.ClusterProfileEnabled = helpers.FeatureGateEnabled(registrationFeatureGates, ocmfeature.DefaultHubRegistrationFeatureGates, ocmfeature.ClusterProfile) + // setting for cluster importer. + // TODO(qiujian16) since this is disabled by feature gate, the image is obtained from cluster manager's env var. Need a more elegant approach. + config.ClusterImporterEnabled = helpers.FeatureGateEnabled(registrationFeatureGates, ocmfeature.DefaultHubRegistrationFeatureGates, ocmfeature.ClusterImporter) + if config.ClusterImporterEnabled { + config.AgentImage = os.Getenv("AGENT_IMAGE") + } var workFeatureGates []operatorapiv1.FeatureGate if clusterManager.Spec.WorkConfiguration != nil { diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go index 3da93dd09..b29d853a3 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go @@ -62,12 +62,6 @@ var ( "cluster-manager/hub/cluster-manager-addon-manager-serviceaccount.yaml", } - clusterProfileResourceFiles = []string{ - // clusterprofile - "cluster-manager/hub/cluster-manager-clusterprofiles-clusterrole.yaml", - "cluster-manager/hub/cluster-manager-clusterprofiles-clusterrolebinding.yaml", - } - // The hubHostedWebhookServiceFiles should only be deployed on the hub cluster when the deploy mode is hosted. hubDefaultWebhookServiceFiles = []string{ "cluster-manager/hub/cluster-manager-registration-webhook-service.yaml", @@ -107,14 +101,6 @@ func (c *hubReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterM } } - // If ClusterProfile is not enabled, remove related resources - if !config.ClusterProfileEnabled { - _, _, err := cleanResources(ctx, c.hubKubeClient, cm, config, clusterProfileResourceFiles...) - if err != nil { - return cm, reconcileStop, err - } - } - hubResources := getHubResources(cm.Spec.DeployOption.Mode, config) var appliedErrs []error @@ -171,10 +157,6 @@ func getHubResources(mode operatorapiv1.InstallMode, config manifests.HubConfig) hubResources = append(hubResources, mwReplicaSetResourceFiles...) } - if config.ClusterProfileEnabled { - hubResources = append(hubResources, clusterProfileResourceFiles...) - } - // the hubHostedWebhookServiceFiles are only used in hosted mode if helpers.IsHosted(mode) { hubResources = append(hubResources, hubHostedWebhookServiceFiles...) diff --git a/pkg/registration/hub/importer/options/options.go b/pkg/registration/hub/importer/options/options.go index 3d480fec5..e9f46cbd8 100644 --- a/pkg/registration/hub/importer/options/options.go +++ b/pkg/registration/hub/importer/options/options.go @@ -4,6 +4,7 @@ import "github.com/spf13/pflag" type Options struct { APIServerURL string + AgentImage string } func New() *Options { @@ -14,4 +15,7 @@ func New() *Options { func (m *Options) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&m.APIServerURL, "hub-apiserver-url", m.APIServerURL, "APIServer URL of the hub cluster that the spoke cluster can access, Only used for spoke cluster import") + fs.StringVar(&m.AgentImage, "agent-image", m.AgentImage, + "Image of the agent to import, only singleton mode is used for importer and only registration-operator "+ + "image is needed.") } diff --git a/pkg/registration/hub/importer/renderers.go b/pkg/registration/hub/importer/renderers.go index dbbbedb88..45a1f94a7 100644 --- a/pkg/registration/hub/importer/renderers.go +++ b/pkg/registration/hub/importer/renderers.go @@ -6,6 +6,8 @@ import ( "github.com/ghodss/yaml" authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1" @@ -16,6 +18,8 @@ import ( "open-cluster-management.io/ocm/pkg/operator/helpers/chart" ) +const imagePullSecretName = "open-cluster-management-image-pull-credentials" + func RenderBootstrapHubKubeConfig( kubeClient kubernetes.Interface, apiServerURL string) KlusterletConfigRenderer { return func(ctx context.Context, config *chart.KlusterletChartConfig) (*chart.KlusterletChartConfig, error) { @@ -91,3 +95,34 @@ func RenderBootstrapHubKubeConfig( return config, nil } } + +func RenderImage(image string) KlusterletConfigRenderer { + return func(ctx context.Context, config *chart.KlusterletChartConfig) (*chart.KlusterletChartConfig, error) { + if len(image) == 0 { + return config, nil + } + config.Images.Overrides = chart.Overrides{ + OperatorImage: image, + } + return config, nil + } +} + +func RenderImagePullSecret(kubeClient kubernetes.Interface, namespace string) KlusterletConfigRenderer { + return func(ctx context.Context, config *chart.KlusterletChartConfig) (*chart.KlusterletChartConfig, error) { + secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, imagePullSecretName, metav1.GetOptions{}) + switch { + case errors.IsNotFound(err): + return config, nil + case err != nil: + return config, err + } + + if len(secret.Data[corev1.DockerConfigJsonKey]) == 0 { + return config, nil + } + + config.Images.ImageCredentials.DockerConfigJson = string(secret.Data[corev1.DockerConfigJsonKey]) + return config, nil + } +} diff --git a/pkg/registration/hub/importer/renderers_test.go b/pkg/registration/hub/importer/renderers_test.go index 23ae084a5..7eb82e581 100644 --- a/pkg/registration/hub/importer/renderers_test.go +++ b/pkg/registration/hub/importer/renderers_test.go @@ -97,3 +97,91 @@ func TestRenderBootstrapHubKubeConfig(t *testing.T) { }) } } + +func TestRenderImage(t *testing.T) { + cases := []struct { + name string + image string + expectedImage string + }{ + { + name: "image is not set", + }, + { + name: "image is set", + image: "test:latest", + expectedImage: "test:latest", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config := &chart.KlusterletChartConfig{} + render := RenderImage(c.image) + config, err := render(context.TODO(), config) + if err != nil { + t.Fatalf("failed to render image: %v", err) + } + if config.Images.Overrides.OperatorImage != c.expectedImage { + t.Errorf("expected: %s, got: %s", c.expectedImage, config.Images.Overrides.OperatorImage) + } + }) + } +} + +func TestRenderImagePullSecret(t *testing.T) { + cases := []struct { + name string + secrets []runtime.Object + expected string + }{ + { + name: "not image secret", + secrets: []runtime.Object{}, + }, + { + name: "secert has not correct key", + secrets: []runtime.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: imagePullSecretName, + Namespace: "test", + }, + Data: map[string][]byte{ + "docker": []byte("test"), + }, + }, + }, + }, + { + name: "secert has correct key", + secrets: []runtime.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: imagePullSecretName, + Namespace: "test", + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: []byte("test"), + }, + }, + }, + expected: "test", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + client := kubefake.NewClientset(c.secrets...) + config := &chart.KlusterletChartConfig{} + render := RenderImagePullSecret(client, "test") + config, err := render(context.TODO(), config) + if err != nil { + t.Fatalf("failed to render image: %v", err) + } + if config.Images.ImageCredentials.DockerConfigJson != c.expected { + t.Errorf("expected: %s, got: %s", c.expected, config.Images.ImageCredentials.DockerConfigJson) + } + }) + } +} diff --git a/pkg/registration/hub/manager.go b/pkg/registration/hub/manager.go index 8bc303dac..56eb65782 100644 --- a/pkg/registration/hub/manager.go +++ b/pkg/registration/hub/manager.go @@ -260,6 +260,8 @@ func (m *HubManagerOptions) RunControllerManagerWithInformers( clusterImporter = importer.NewImporter( []importer.KlusterletConfigRenderer{ importer.RenderBootstrapHubKubeConfig(kubeClient, m.ImportOption.APIServerURL), + importer.RenderImage(m.ImportOption.AgentImage), + importer.RenderImagePullSecret(kubeClient, controllerContext.OperatorNamespace), }, clusterClient, clusterInformers.Cluster().V1().ManagedClusters(), diff --git a/test/integration/operator/clustermanager_hosted_test.go b/test/integration/operator/clustermanager_hosted_test.go index 46135f9dd..6a6b92f3c 100644 --- a/test/integration/operator/clustermanager_hosted_test.go +++ b/test/integration/operator/clustermanager_hosted_test.go @@ -74,7 +74,6 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { var hubAddonManagerDeployment = fmt.Sprintf("%s-addon-manager-controller", clusterManagerName) var hubRegistrationClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:controller", clusterManagerName) var hubRegistrationWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) - var hubRegistrationClusterProfileRole = fmt.Sprintf("open-cluster-management:%s-clusterprofile:controller", clusterManagerName) var hubWorkWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) var hubWorkControllerClusterRole = fmt.Sprintf("open-cluster-management:%s-work:controller", clusterManagerName) var hubAddOnManagerClusterRole = fmt.Sprintf("open-cluster-management:%s-addon-manager:controller", clusterManagerName) @@ -763,130 +762,6 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) - ginkgo.It("should have expected resource created/deleted when feature gates ClusterProfile enabled/disabled", func() { - ginkgo.By("Enable ClusterProfile feature gate") - gomega.Eventually(func() error { - clusterManager, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get( - context.Background(), clusterManagerName, metav1.GetOptions{}) - if err != nil { - return err - } - featureGate := []operatorapiv1.FeatureGate{ - { - Feature: string(feature.ClusterProfile), - Mode: operatorapiv1.FeatureGateModeTypeEnable, - }, - } - if clusterManager.Spec.RegistrationConfiguration != nil { - for _, fg := range clusterManager.Spec.RegistrationConfiguration.FeatureGates { - if fg.Feature != string(feature.ClusterProfile) { - featureGate = append(featureGate, fg) - } - } - } - clusterManager.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationHubConfiguration{ - FeatureGates: featureGate, - } - _, err = hostedOperatorClient.OperatorV1().ClusterManagers().Update( - context.Background(), clusterManager, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) - - // Check clusterrole/clusterrolebinding - gomega.Eventually(func() error { - if _, err := hostedKubeClient.RbacV1().ClusterRoles().Get( - context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}); err != nil { - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) - gomega.Eventually(func() error { - if _, err := hostedKubeClient.RbacV1().ClusterRoleBindings().Get( - context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}); err != nil { - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) - - // Check if relatedResources are correct - gomega.Eventually(func() error { - actual, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get( - context.Background(), clusterManagerName, metav1.GetOptions{}) - if err != nil { - return err - } - // increase 3 resources clusterrole/clusterrolebinding and crd - if len(actual.Status.RelatedResources) != 48 { - return fmt.Errorf("should get 48 relatedResources, actual got %v, %v", - len(actual.Status.RelatedResources), actual.Status.RelatedResources) - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Revert ClusterProfile to disable mode") - // Check ClusterProfile disable - gomega.Eventually(func() error { - clusterManager, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get( - context.Background(), clusterManagerName, metav1.GetOptions{}) - if err != nil { - return err - } - featureGate := []operatorapiv1.FeatureGate{ - { - Feature: string(feature.ClusterProfile), - Mode: operatorapiv1.FeatureGateModeTypeDisable, - }, - } - if clusterManager.Spec.RegistrationConfiguration != nil { - for _, fg := range clusterManager.Spec.RegistrationConfiguration.FeatureGates { - if fg.Feature != string(feature.ClusterProfile) { - featureGate = append(featureGate, fg) - } - } - } - clusterManager.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationHubConfiguration{ - FeatureGates: featureGate, - } - _, err = hostedOperatorClient.OperatorV1().ClusterManagers().Update( - context.Background(), clusterManager, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) - - // Check clusterrole/clusterrolebinding - gomega.Eventually(func() bool { - _, err := hostedKubeClient.RbacV1().ClusterRoles().Get( - context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}) - if err == nil { - return false - } - return errors.IsNotFound(err) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - gomega.Eventually(func() bool { - _, err := hostedKubeClient.RbacV1().ClusterRoleBindings().Get( - context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}) - if err == nil { - return false - } - return errors.IsNotFound(err) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - // Check if relatedResources are correct - gomega.Eventually(func() error { - actual, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get( - context.Background(), clusterManagerName, metav1.GetOptions{}) - if err != nil { - return err - } - // reduce 2 resources clusterrole/clusterrolebinding - if len(actual.Status.RelatedResources) != 46 { - return fmt.Errorf("should get 46 relatedResources, actual got %v, %v", - len(actual.Status.RelatedResources), actual.Status.RelatedResources) - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - }) - ginkgo.It("Deployment should be updated when clustermanager is changed", func() { gomega.Eventually(func() error { if _, err := hostedKubeClient.AppsV1().Deployments(hubNamespaceHosted).Get(hostedCtx, hubRegistrationDeployment, metav1.GetOptions{}); err != nil { @@ -948,18 +823,6 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { return nil }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) - - // Check if relatedResources are correct - gomega.Eventually(func() error { - actual, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get(hostedCtx, clusterManagerName, metav1.GetOptions{}) - if err != nil { - return err - } - if len(actual.Status.RelatedResources) != 46 { - return fmt.Errorf("should get 46 relatedResources, actual got %v", len(actual.Status.RelatedResources)) - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) ginkgo.It("Deployment should be added nodeSelector and toleration when add nodePlacement into clustermanager", func() { diff --git a/test/integration/operator/clustermanager_test.go b/test/integration/operator/clustermanager_test.go index 3cc3367bb..088a5cd9a 100644 --- a/test/integration/operator/clustermanager_test.go +++ b/test/integration/operator/clustermanager_test.go @@ -3,6 +3,7 @@ package operator import ( "context" "fmt" + "os" "time" "github.com/onsi/ginkgo/v2" @@ -12,6 +13,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/rest" "k8s.io/client-go/util/cert" @@ -63,7 +65,6 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { var hubAddonManagerDeployment = fmt.Sprintf("%s-addon-manager-controller", clusterManagerName) var hubRegistrationClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:controller", clusterManagerName) var hubRegistrationWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) - var hubRegistrationClusterProfileRole = fmt.Sprintf("open-cluster-management:%s-clusterprofile:controller", clusterManagerName) var hubWorkWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-work:webhook", clusterManagerName) var hubWorkControllerClusterRole = fmt.Sprintf("open-cluster-management:%s-work:controller", clusterManagerName) var hubAddOnManagerClusterRole = fmt.Sprintf("open-cluster-management:%s-addon-manager:controller", clusterManagerName) @@ -760,36 +761,21 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { return err }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) - // Check clusterrole/clusterrolebinding + // Check clusterrole gomega.Eventually(func() error { - if _, err := kubeClient.RbacV1().ClusterRoles().Get( - context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}); err != nil { - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) - gomega.Eventually(func() error { - if _, err := kubeClient.RbacV1().ClusterRoleBindings().Get( - context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}); err != nil { - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) - - // Check if relatedResources are correct - gomega.Eventually(func() error { - actual, err := operatorClient.OperatorV1().ClusterManagers().Get( - context.Background(), clusterManagerName, metav1.GetOptions{}) + cr, err := kubeClient.RbacV1().ClusterRoles().Get( + context.Background(), hubRegistrationClusterRole, metav1.GetOptions{}) if err != nil { return err } - // increase 3 resources clusterrole/clusterrolebinding and crd - if len(actual.Status.RelatedResources) != 48 { - return fmt.Errorf("should get 48 relatedResources, actual got %v, %v", - len(actual.Status.RelatedResources), actual.Status.RelatedResources) + for _, rule := range cr.Rules { + apiGrouSet := sets.New[string](rule.APIGroups...) + if apiGrouSet.Has("multicluster.x-k8s.io") { + return nil + } } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + return fmt.Errorf("expected multicluster.x-k8s.io rules to exist") + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) ginkgo.By("Revert ClusterProfile to disable mode") // Check ClusterProfile disable @@ -820,41 +806,141 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { return err }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) - // Check clusterrole/clusterrolebinding - gomega.Eventually(func() bool { - _, err := kubeClient.RbacV1().ClusterRoles().Get( - context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}) - if err == nil { - return false - } - return errors.IsNotFound(err) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - gomega.Eventually(func() bool { - _, err := kubeClient.RbacV1().ClusterRoleBindings().Get( - context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}) - if err == nil { - return false - } - return errors.IsNotFound(err) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - // Check if relatedResources are correct gomega.Eventually(func() error { - actual, err := operatorClient.OperatorV1().ClusterManagers().Get( + cr, err := kubeClient.RbacV1().ClusterRoles().Get( + context.Background(), hubRegistrationClusterRole, metav1.GetOptions{}) + if err != nil { + return err + } + for _, rule := range cr.Rules { + apiGrouSet := sets.New[string](rule.APIGroups...) + if apiGrouSet.Has("multicluster.x-k8s.io") { + return fmt.Errorf("expected multicluster.x-k8s.io rules to not exist") + } + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + }) + + ginkgo.It("should have expected resource created/deleted when feature gates ClusterImporter enabled/disabled", func() { + ginkgo.By("Enable ClusterImporter feature gate") + os.Setenv("AGENT_IMAGE", "test-agent:latest") + gomega.Eventually(func() error { + clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get( context.Background(), clusterManagerName, metav1.GetOptions{}) if err != nil { return err } - // reduce 2 resources clusterrole/clusterrolebinding - if len(actual.Status.RelatedResources) != 46 { - return fmt.Errorf("should get 46 relatedResources, actual got %v, %v", - len(actual.Status.RelatedResources), actual.Status.RelatedResources) + featureGate := []operatorapiv1.FeatureGate{ + { + Feature: string(feature.ClusterImporter), + Mode: operatorapiv1.FeatureGateModeTypeEnable, + }, + } + if clusterManager.Spec.RegistrationConfiguration != nil { + for _, fg := range clusterManager.Spec.RegistrationConfiguration.FeatureGates { + if fg.Feature != string(feature.ClusterImporter) { + featureGate = append(featureGate, fg) + } + } + } + clusterManager.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationHubConfiguration{ + FeatureGates: featureGate, + } + _, err = operatorClient.OperatorV1().ClusterManagers().Update( + context.Background(), clusterManager, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // Check clusterrole + gomega.Eventually(func() error { + cr, err := kubeClient.RbacV1().ClusterRoles().Get( + context.Background(), hubRegistrationClusterRole, metav1.GetOptions{}) + if err != nil { + return err + } + for _, rule := range cr.Rules { + apiGrouSet := sets.New[string](rule.APIGroups...) + if apiGrouSet.Has("cluster.x-k8s.io") { + return nil + } + } + return fmt.Errorf("expected cluster.x-k8s.io rules to exist") + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // check deployment + gomega.Eventually(func() error { + deploy, err := kubeClient.AppsV1().Deployments(hubNamespace).Get( + context.Background(), hubRegistrationDeployment, metav1.GetOptions{}) + if err != nil { + return err + } + argSet := sets.New[string](deploy.Spec.Template.Spec.Containers[0].Args...) + if argSet.Has("--agent-image=test-agent:latest") { + return nil + } + return fmt.Errorf("expected agent-image flag to be set") + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + ginkgo.By("Revert ClusterImporter to disable mode") + // Check ClusterProfile disable + gomega.Eventually(func() error { + clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get( + context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + featureGate := []operatorapiv1.FeatureGate{ + { + Feature: string(feature.ClusterImporter), + Mode: operatorapiv1.FeatureGateModeTypeDisable, + }, + } + if clusterManager.Spec.RegistrationConfiguration != nil { + for _, fg := range clusterManager.Spec.RegistrationConfiguration.FeatureGates { + if fg.Feature != string(feature.ClusterImporter) { + featureGate = append(featureGate, fg) + } + } + } + clusterManager.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationHubConfiguration{ + FeatureGates: featureGate, + } + _, err = operatorClient.OperatorV1().ClusterManagers().Update( + context.Background(), clusterManager, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // Check deployment + gomega.Eventually(func() error { + deploy, err := kubeClient.AppsV1().Deployments(hubNamespace).Get( + context.Background(), hubRegistrationDeployment, metav1.GetOptions{}) + if err != nil { + return err + } + argSet := sets.New[string](deploy.Spec.Template.Spec.Containers[0].Args...) + if argSet.Has("--agent-image=test-agent:latest") { + return fmt.Errorf("expected agent-image flag not to be set") } return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + // check cluster role + gomega.Eventually(func() error { + cr, err := kubeClient.RbacV1().ClusterRoles().Get( + context.Background(), hubRegistrationClusterRole, metav1.GetOptions{}) + if err != nil { + return err + } + for _, rule := range cr.Rules { + apiGrouSet := sets.New[string](rule.APIGroups...) + if apiGrouSet.Has("cluster.x-k8s.io") { + return fmt.Errorf("expected cluster.x-k8s.io rules to not exist") + } + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) }) - ginkgo.It("Deployment should be updated when clustermanager is changed", func() { gomega.Eventually(func() error { if _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil {