From ee0a60f013bb575f89227b1db05d238fce8c8077 Mon Sep 17 00:00:00 2001 From: Qing Hao Date: Thu, 2 Mar 2023 16:29:14 +0800 Subject: [PATCH] addon manager feature gates (#325) Signed-off-by: haoqing0110 --- Makefile | 8 +- ...cluster-management_clustermanagers.cr.yaml | 3 +- ...-management_clustermanagers_hosted.cr.yaml | 1 + ...cluster-manager.clusterserviceversion.yaml | 2 + ...cluster-management.io_clustermanagers.yaml | 35 +++ ...ter-manager-addon-manager-clusterrole.yaml | 35 +++ ...ager-addon-manager-clusterrolebinding.yaml | 12 + ...-manager-addon-manager-serviceaccount.yaml | 5 + ...ster-manager-addon-manager-deployment.yaml | 84 +++++++ manifests/config.go | 2 + pkg/helpers/helpers.go | 38 +++ pkg/helpers/helpers_test.go | 226 ++++++++++++++++++ .../clustermanager_controller.go | 30 +++ .../clustermanager_controller_test.go | 35 ++- .../clustermanager_hub_reconcile.go | 41 ++-- .../clustermanager_runtime_reconcile.go | 38 ++- .../clustermanager_webhook_reconcile.go | 20 +- .../integration/clustermanager_hosted_test.go | 146 ++++++++++- test/integration/clustermanager_test.go | 148 +++++++++++- test/integration/integration_suite_test.go | 2 + 20 files changed, 831 insertions(+), 80 deletions(-) create mode 100644 manifests/cluster-manager/hub/cluster-manager-addon-manager-clusterrole.yaml create mode 100644 manifests/cluster-manager/hub/cluster-manager-addon-manager-clusterrolebinding.yaml create mode 100644 manifests/cluster-manager/hub/cluster-manager-addon-manager-serviceaccount.yaml create mode 100644 manifests/cluster-manager/management/cluster-manager-addon-manager-deployment.yaml diff --git a/Makefile b/Makefile index 6c2047fe1..380ba5a84 100644 --- a/Makefile +++ b/Makefile @@ -32,6 +32,10 @@ REGISTRATION_IMAGE?=$(IMAGE_REGISTRY)/registration:$(REGISTRATION_TAG) PLACEMENT_TAG?=latest PLACEMENT_IMAGE?=$(IMAGE_REGISTRY)/placement:$(PLACEMENT_TAG) +# ADDON_MANAGER_IMAGE can be set in the env to override calculated value +ADDON_MANAGER_TAG?=latest +ADDON_MANAGER_IMAGE?=$(IMAGE_REGISTRY)/addon-manager:$(ADDON_MANAGER_TAG) + OPERATOR_SDK?=$(PERMANENT_TMP_GOPATH)/bin/operator-sdk OPERATOR_SDK_VERSION?=v1.1.0 operatorsdk_gen_dir:=$(dir $(OPERATOR_SDK)) @@ -119,10 +123,10 @@ deploy-hub-operator: ensure-kustomize mv deploy/cluster-manager/config/kustomization.yaml.tmp deploy/cluster-manager/config/kustomization.yaml apply-hub-cr: - $(SED_CMD) -e "s,quay.io/open-cluster-management/registration,$(REGISTRATION_IMAGE)," -e "s,quay.io/open-cluster-management/work,$(WORK_IMAGE)," -e "s,quay.io/open-cluster-management/placement,$(PLACEMENT_IMAGE)," deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers.cr.yaml | $(KUBECTL) apply -f - + $(SED_CMD) -e "s,quay.io/open-cluster-management/registration,$(REGISTRATION_IMAGE)," -e "s,quay.io/open-cluster-management/work,$(WORK_IMAGE)," -e "s,quay.io/open-cluster-management/placement,$(PLACEMENT_IMAGE)," -e "s,quay.io/open-cluster-management/addon-manager,$(ADDON_MANAGER_IMAGE)," deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers.cr.yaml | $(KUBECTL) apply -f - apply-hub-cr-hosted: external-hub-secret - $(SED_CMD) -e "s,quay.io/open-cluster-management/registration,$(REGISTRATION_IMAGE)," -e "s,quay.io/open-cluster-management/work,$(WORK_IMAGE)," -e "s,quay.io/open-cluster-management/placement,$(PLACEMENT_IMAGE)," deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers_hosted.cr.yaml | $(KUBECTL) apply -f - + $(SED_CMD) -e "s,quay.io/open-cluster-management/registration,$(REGISTRATION_IMAGE)," -e "s,quay.io/open-cluster-management/work,$(WORK_IMAGE)," -e "s,quay.io/open-cluster-management/placement,$(PLACEMENT_IMAGE)," -e "s,quay.io/open-cluster-management/addon-manager,$(ADDON_MANAGER_IMAGE)," deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers_hosted.cr.yaml | $(KUBECTL) apply -f - clean-hub: clean-hub-cr clean-hub-operator diff --git a/deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers.cr.yaml b/deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers.cr.yaml index d76da8658..6009f3cce 100644 --- a/deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers.cr.yaml +++ b/deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers.cr.yaml @@ -6,9 +6,10 @@ spec: registrationImagePullSpec: quay.io/open-cluster-management/registration workImagePullSpec: quay.io/open-cluster-management/work placementImagePullSpec: quay.io/open-cluster-management/placement + addOnManagerImagePullSpec: quay.io/open-cluster-management/addon-manager deployOption: mode: Default registrationConfiguration: featureGates: - feature: DefaultClusterSet - mode: Enable \ No newline at end of file + mode: Enable diff --git a/deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers_hosted.cr.yaml b/deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers_hosted.cr.yaml index ba4076c6c..f94ac2805 100644 --- a/deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers_hosted.cr.yaml +++ b/deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers_hosted.cr.yaml @@ -6,6 +6,7 @@ spec: registrationImagePullSpec: quay.io/open-cluster-management/registration workImagePullSpec: quay.io/open-cluster-management/work placementImagePullSpec: quay.io/open-cluster-management/placement + addOnManagerImagePullSpec: quay.io/open-cluster-management/addon-manager deployOption: mode: Hosted hosted: diff --git a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml index b3a95f737..dada1590a 100644 --- a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml +++ b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml @@ -11,6 +11,7 @@ metadata: "name": "cluster-manager" }, "spec": { + "addOnManagerImagePullSpec": "quay.io/open-cluster-management/addon-manager", "deployOption": { "mode": "Default" }, @@ -34,6 +35,7 @@ metadata: "name": "cluster-manager" }, "spec": { + "addOnManagerImagePullSpec": "quay.io/open-cluster-management/addon-manager", "deployOption": { "hosted": { "registrationWebhookConfiguration": { diff --git a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/operator.open-cluster-management.io_clustermanagers.yaml b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/operator.open-cluster-management.io_clustermanagers.yaml index 60c080d9c..cb625d1f7 100644 --- a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/operator.open-cluster-management.io_clustermanagers.yaml +++ b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/operator.open-cluster-management.io_clustermanagers.yaml @@ -31,6 +31,41 @@ spec: mode: Default description: Spec represents a desired deployment configuration of controllers that govern registration and work distribution for attached Klusterlets. properties: + addOnManagerConfiguration: + default: + mode: Disable + description: AddOnManagerConfiguration contains the configuration of addon manager + properties: + featureGates: + description: "FeatureGates represents the list of feature gates for addon manager If it is set empty, default feature gates will be used. If it is set, featuregate/Foo is an example of one item in FeatureGates: 1. If featuregate/Foo does not exist, registration-operator will discard it 2. If featuregate/Foo exists and is false by default. It is now possible to set featuregate/Foo=[false|true] 3. If featuregate/Foo exists and is true by default. If a cluster-admin upgrading from 1 to 2 wants to continue having featuregate/Foo=false, \the can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false." + items: + properties: + feature: + description: Feature is the key of feature gate. e.g. featuregate/Foo. + type: string + mode: + default: Disable + description: Mode is either Enable, Disable, "" where "" is Disable by default. In Enable mode, a valid feature gate `featuregate/Foo` will be set to "--featuregate/Foo=true". In Disable mode, a valid feature gate `featuregate/Foo` will be set to "--featuregate/Foo=false". + enum: + - Enable + - Disable + type: string + required: + - feature + type: object + type: array + mode: + default: Disable + description: Mode is either Enable, Disable, "" where "" is Disable by default. In Enable mode, the component will be installed. In Disable mode, the component will not be installed. + enum: + - Enable + - Disable + type: string + type: object + addOnManagerImagePullSpec: + default: quay.io/open-cluster-management/addon-manager + description: AddOnManagerImagePullSpec represents the desired image configuration of addon manager controller/webhook installed on hub. + type: string deployOption: default: mode: Default diff --git a/manifests/cluster-manager/hub/cluster-manager-addon-manager-clusterrole.yaml b/manifests/cluster-manager/hub/cluster-manager-addon-manager-clusterrole.yaml new file mode 100644 index 000000000..29eb399b2 --- /dev/null +++ b/manifests/cluster-manager/hub/cluster-manager-addon-manager-clusterrole.yaml @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:{{ .ClusterManagerName }}-addon-manager:controller +rules: +# Allow controller to get/list/watch/create/delete configmaps/events +- apiGroups: [""] + resources: ["configmaps", "events"] + verbs: ["get", "list", "watch", "create", "update", "delete", "deletecollection", "patch"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +- apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["get", "create"] +# Allow controller to manage managedclusters/placements/placementdecisions +- apiGroups: ["cluster.open-cluster-management.io"] + resources: ["managedclusters", "placements", "placementdecisions"] + verbs: ["get", "list", "watch"] +# Allow controller to manage managedclusteraddons/clustermanagementaddons +- apiGroups: ["addon.open-cluster-management.io"] + resources: ["managedclusteraddons/finalizers"] + verbs: ["update"] +- apiGroups: [ "addon.open-cluster-management.io" ] + resources: [ "clustermanagementaddons/finalizers" ] + verbs: [ "update" ] +- apiGroups: ["addon.open-cluster-management.io"] + resources: ["clustermanagementaddons"] + verbs: ["get", "list", "watch"] +- apiGroups: ["addon.open-cluster-management.io"] + resources: ["managedclusteraddons"] + verbs: ["get", "list", "watch", "create", "update", "delete"] +- apiGroups: ["addon.open-cluster-management.io"] + resources: ["managedclusteraddons/status"] + verbs: ["update", "patch"] diff --git a/manifests/cluster-manager/hub/cluster-manager-addon-manager-clusterrolebinding.yaml b/manifests/cluster-manager/hub/cluster-manager-addon-manager-clusterrolebinding.yaml new file mode 100644 index 000000000..c8c0a2ae6 --- /dev/null +++ b/manifests/cluster-manager/hub/cluster-manager-addon-manager-clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: open-cluster-management:{{ .ClusterManagerName }}-addon-manager:controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: open-cluster-management:{{ .ClusterManagerName }}-addon-manager:controller +subjects: +- kind: ServiceAccount + namespace: {{ .ClusterManagerNamespace }} + name: {{ .ClusterManagerName }}-addon-manager-controller-sa diff --git a/manifests/cluster-manager/hub/cluster-manager-addon-manager-serviceaccount.yaml b/manifests/cluster-manager/hub/cluster-manager-addon-manager-serviceaccount.yaml new file mode 100644 index 000000000..b79d6cb81 --- /dev/null +++ b/manifests/cluster-manager/hub/cluster-manager-addon-manager-serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .ClusterManagerName }}-addon-manager-controller-sa + namespace: {{ .ClusterManagerNamespace }} diff --git a/manifests/cluster-manager/management/cluster-manager-addon-manager-deployment.yaml b/manifests/cluster-manager/management/cluster-manager-addon-manager-deployment.yaml new file mode 100644 index 000000000..00b5cf650 --- /dev/null +++ b/manifests/cluster-manager/management/cluster-manager-addon-manager-deployment.yaml @@ -0,0 +1,84 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .ClusterManagerName }}-addon-manager-controller + namespace: {{ .ClusterManagerNamespace }} + labels: + app: clustermanager-controller +spec: + replicas: {{ .Replica }} + selector: + matchLabels: + app: clustermanager-addon-manager-controller + template: + metadata: + labels: + app: clustermanager-addon-manager-controller + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 70 + podAffinityTerm: + topologyKey: failure-domain.beta.kubernetes.io/zone + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - clustermanager-addon-manager-controller + - weight: 30 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - clustermanager-addon-manager-controller + {{ if not .HostedMode }} + serviceAccountName: {{ .ClusterManagerName }}-addon-manager-controller-sa + {{ end }} + containers: + - name: addon-manager-controller + image: {{ .AddOnManagerImage }} + args: + - "/addon-manager" + - "manager" + {{ if .HostedMode }} + - "--kubeconfig=/var/run/secrets/hub/kubeconfig" + {{ end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsNonRoot: true + livenessProbe: + httpGet: + path: /healthz + scheme: HTTPS + port: 8443 + initialDelaySeconds: 2 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /healthz + scheme: HTTPS + port: 8443 + initialDelaySeconds: 2 + resources: + requests: + cpu: 2m + memory: 16Mi + {{ if .HostedMode }} + volumeMounts: + - mountPath: /var/run/secrets/hub + name: kubeconfig + readOnly: true + volumes: + - name: kubeconfig + secret: + secretName: {{ .ClusterManagerName }}-addon-manager-controller-sa-kubeconfig + {{ end }} diff --git a/manifests/config.go b/manifests/config.go index 779202277..de3ad7763 100644 --- a/manifests/config.go +++ b/manifests/config.go @@ -14,6 +14,8 @@ type HubConfig struct { WorkWebhook Webhook RegistrationFeatureGates []string WorkFeatureGates []string + AddOnManagerImage string + AddOnManagerComponentMode string } type Webhook struct { diff --git a/pkg/helpers/helpers.go b/pkg/helpers/helpers.go index 79e0f37ec..cbd7ca75e 100644 --- a/pkg/helpers/helpers.go +++ b/pkg/helpers/helpers.go @@ -250,6 +250,8 @@ func CleanUpStaticObject( switch t := object.(type) { case *corev1.Namespace: err = client.CoreV1().Namespaces().Delete(ctx, t.Name, metav1.DeleteOptions{}) + case *appsv1.Deployment: + err = client.AppsV1().Deployments(t.Namespace).Delete(ctx, t.Name, metav1.DeleteOptions{}) case *corev1.Endpoints: err = client.CoreV1().Endpoints(t.Namespace).Delete(ctx, t.Name, metav1.DeleteOptions{}) case *corev1.Service: @@ -706,6 +708,20 @@ func SetRelatedResourcesStatuses( } } +func RemoveRelatedResourcesStatuses( + relatedResourcesStatuses *[]operatorapiv1.RelatedResourceMeta, + rmRelatedResourcesStatus operatorapiv1.RelatedResourceMeta) { + if relatedResourcesStatuses == nil { + return + } + + existingRelatedResource := FindRelatedResourcesStatus(*relatedResourcesStatuses, rmRelatedResourcesStatus) + if existingRelatedResource != nil { + RemoveRelatedResourcesStatus(relatedResourcesStatuses, rmRelatedResourcesStatus) + return + } +} + func FindRelatedResourcesStatus( relatedResourcesStatuses []operatorapiv1.RelatedResourceMeta, relatedResource operatorapiv1.RelatedResourceMeta) *operatorapiv1.RelatedResourceMeta { @@ -717,6 +733,18 @@ func FindRelatedResourcesStatus( return nil } +func RemoveRelatedResourcesStatus( + relatedResourcesStatuses *[]operatorapiv1.RelatedResourceMeta, + relatedResource operatorapiv1.RelatedResourceMeta) { + var result []operatorapiv1.RelatedResourceMeta + for _, v := range *relatedResourcesStatuses { + if v != relatedResource { + result = append(result, v) + } + } + *relatedResourcesStatuses = result +} + func SetRelatedResourcesStatusesWithObj( relatedResourcesStatuses *[]operatorapiv1.RelatedResourceMeta, objData []byte) { res, err := GenerateRelatedResource(objData) @@ -727,6 +755,16 @@ func SetRelatedResourcesStatusesWithObj( SetRelatedResourcesStatuses(relatedResourcesStatuses, res) } +func RemoveRelatedResourcesStatusesWithObj( + relatedResourcesStatuses *[]operatorapiv1.RelatedResourceMeta, objData []byte) { + res, err := GenerateRelatedResource(objData) + if err != nil { + klog.Errorf("failed to generate relatedResource %v, and skip to set into status. %v", objData, err) + return + } + RemoveRelatedResourcesStatuses(relatedResourcesStatuses, res) +} + func UpdateClusterManagerRelatedResourcesFn(relatedResources ...operatorapiv1.RelatedResourceMeta) UpdateClusterManagerStatusFunc { return func(oldStatus *operatorapiv1.ClusterManagerStatus) error { if !reflect.DeepEqual(oldStatus.RelatedResources, relatedResources) { diff --git a/pkg/helpers/helpers_test.go b/pkg/helpers/helpers_test.go index 9c96145cb..78add8594 100644 --- a/pkg/helpers/helpers_test.go +++ b/pkg/helpers/helpers_test.go @@ -1198,6 +1198,232 @@ func TestGetRelatedResource(t *testing.T) { } } +func TestSetRelatedResourcesStatusesWithObj(t *testing.T) { + cases := []struct { + name string + manifestFile string + config manifests.HubConfig + relatedResources []operatorapiv1.RelatedResourceMeta + expectedRelatedResource []operatorapiv1.RelatedResourceMeta + }{ + { + name: "append obj to nil relatedResources", + manifestFile: "cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml", + config: manifests.HubConfig{ + ClusterManagerName: "test", + Replica: 1, + }, + relatedResources: nil, + expectedRelatedResource: []operatorapiv1.RelatedResourceMeta{ + { + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + Namespace: "", + Name: "clustermanagementaddons.addon.open-cluster-management.io", + }, + }, + }, + { + name: "append obj to empty relatedResources", + manifestFile: "cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml", + config: manifests.HubConfig{ + ClusterManagerName: "test", + Replica: 1, + }, + relatedResources: []operatorapiv1.RelatedResourceMeta{}, + expectedRelatedResource: []operatorapiv1.RelatedResourceMeta{ + { + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + Namespace: "", + Name: "clustermanagementaddons.addon.open-cluster-management.io", + }, + }, + }, + { + name: "append obj to relatedResources", + manifestFile: "cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml", + config: manifests.HubConfig{ + ClusterManagerName: "test", + Replica: 1, + }, + relatedResources: []operatorapiv1.RelatedResourceMeta{ + { + Group: "apps", + Version: "v1", + Resource: "deployments", + Namespace: "test-namespace", + Name: "test-registration-controller", + }, + }, + expectedRelatedResource: []operatorapiv1.RelatedResourceMeta{ + { + Group: "apps", + Version: "v1", + Resource: "deployments", + Namespace: "test-namespace", + Name: "test-registration-controller", + }, + { + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + Namespace: "", + Name: "clustermanagementaddons.addon.open-cluster-management.io", + }, + }, + }, + { + name: "append duplicate obj to relatedResources", + manifestFile: "cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml", + config: manifests.HubConfig{ + ClusterManagerName: "test", + Replica: 1, + }, + relatedResources: []operatorapiv1.RelatedResourceMeta{ + { + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + Namespace: "", + Name: "clustermanagementaddons.addon.open-cluster-management.io", + }, + }, + expectedRelatedResource: []operatorapiv1.RelatedResourceMeta{ + { + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + Namespace: "", + Name: "clustermanagementaddons.addon.open-cluster-management.io", + }, + }, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + template, err := manifests.ClusterManagerManifestFiles.ReadFile(c.manifestFile) + if err != nil { + t.Errorf("failed to read file %v", err) + } + objData := assets.MustCreateAssetFromTemplate(c.manifestFile, template, c.config).Data + + SetRelatedResourcesStatusesWithObj(&c.relatedResources, objData) + if !reflect.DeepEqual(c.relatedResources, c.expectedRelatedResource) { + t.Errorf("Expect to get %v, but got %v", c.expectedRelatedResource, c.relatedResources) + } + }) + + } +} + +func TestRemoveRelatedResourcesStatusesWithObj(t *testing.T) { + cases := []struct { + name string + manifestFile string + config manifests.HubConfig + relatedResources []operatorapiv1.RelatedResourceMeta + expectedRelatedResource []operatorapiv1.RelatedResourceMeta + }{ + { + name: "remove obj from nil relatedResources", + manifestFile: "cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml", + config: manifests.HubConfig{ + ClusterManagerName: "test", + Replica: 1, + }, + relatedResources: nil, + expectedRelatedResource: nil, + }, + { + name: "remove obj from empty relatedResources", + manifestFile: "cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml", + config: manifests.HubConfig{ + ClusterManagerName: "test", + Replica: 1, + }, + relatedResources: []operatorapiv1.RelatedResourceMeta{}, + expectedRelatedResource: []operatorapiv1.RelatedResourceMeta{}, + }, + { + name: "remove obj from relatedResources", + manifestFile: "cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml", + config: manifests.HubConfig{ + ClusterManagerName: "test", + Replica: 1, + }, + relatedResources: []operatorapiv1.RelatedResourceMeta{ + { + Group: "apps", + Version: "v1", + Resource: "deployments", + Namespace: "test-namespace", + Name: "test-registration-controller", + }, + { + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + Namespace: "", + Name: "clustermanagementaddons.addon.open-cluster-management.io", + }, + }, + expectedRelatedResource: []operatorapiv1.RelatedResourceMeta{ + { + Group: "apps", + Version: "v1", + Resource: "deployments", + Namespace: "test-namespace", + Name: "test-registration-controller", + }, + }, + }, + { + name: "remove not exist obj from relatedResources", + manifestFile: "cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml", + config: manifests.HubConfig{ + ClusterManagerName: "test", + Replica: 1, + }, + relatedResources: []operatorapiv1.RelatedResourceMeta{ + { + Group: "apps", + Version: "v1", + Resource: "deployments", + Namespace: "test-namespace", + Name: "test-registration-controller", + }, + }, + expectedRelatedResource: []operatorapiv1.RelatedResourceMeta{ + { + Group: "apps", + Version: "v1", + Resource: "deployments", + Namespace: "test-namespace", + Name: "test-registration-controller", + }, + }, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + template, err := manifests.ClusterManagerManifestFiles.ReadFile(c.manifestFile) + if err != nil { + t.Errorf("failed to read file %v", err) + } + objData := assets.MustCreateAssetFromTemplate(c.manifestFile, template, c.config).Data + + RemoveRelatedResourcesStatusesWithObj(&c.relatedResources, objData) + if !reflect.DeepEqual(c.relatedResources, c.expectedRelatedResource) { + t.Errorf("Expect to get %v, but got %v", c.expectedRelatedResource, c.relatedResources) + } + }) + + } +} + func TestUpdateRelatedResources(t *testing.T) { gvrDeployment := appsv1.SchemeGroupVersion.WithResource("deployments") gvrSecret := corev1.SchemeGroupVersion.WithResource("secrets") diff --git a/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go index 48ebfeba8..37f422f47 100644 --- a/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go +++ b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go @@ -16,6 +16,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" + "github.com/openshift/library-go/pkg/assets" "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" @@ -138,6 +139,7 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f RegistrationImage: clusterManager.Spec.RegistrationImagePullSpec, WorkImage: clusterManager.Spec.WorkImagePullSpec, PlacementImage: clusterManager.Spec.PlacementImagePullSpec, + AddOnManagerImage: clusterManager.Spec.AddOnManagerImagePullSpec, Replica: helpers.DetermineReplica(ctx, n.operatorKubeClient, clusterManager.Spec.DeployOption.Mode, nil), HostedMode: clusterManager.Spec.DeployOption.Mode == operatorapiv1.InstallModeHosted, RegistrationWebhook: manifests.Webhook{ @@ -148,6 +150,10 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f }, } + if clusterManager.Spec.AddOnManagerConfiguration != nil { + config.AddOnManagerComponentMode = string(clusterManager.Spec.AddOnManagerConfiguration.Mode) + } + var featureGateCondition metav1.Condition // If there are some invalid feature gates of registration or work, will output // condition `ValidFeatureGates` False in ClusterManager. @@ -369,3 +375,27 @@ func convertWebhookConfiguration(webhookConfiguration operatorapiv1.WebhookConfi IsIPFormat: isIPFormat(webhookConfiguration.Address), } } + +// clean specified resources +func cleanResources(ctx context.Context, kubeClient kubernetes.Interface, cm *operatorapiv1.ClusterManager, config manifests.HubConfig, resources ...string) (*operatorapiv1.ClusterManager, reconcileState, error) { + for _, file := range resources { + err := helpers.CleanUpStaticObject( + ctx, + kubeClient, nil, nil, + func(name string) ([]byte, error) { + template, err := manifests.ClusterManagerManifestFiles.ReadFile(name) + if err != nil { + return nil, err + } + objData := assets.MustCreateAssetFromTemplate(name, template, config).Data + helpers.RemoveRelatedResourcesStatusesWithObj(&cm.Status.RelatedResources, objData) + return objData, nil + }, + file, + ) + if err != nil { + return cm, reconcileContinue, err + } + } + return cm, reconcileContinue, nil +} diff --git a/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go index 691aca676..128c3f38f 100644 --- a/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go +++ b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go @@ -56,6 +56,9 @@ func newClusterManager(name string) *operatorapiv1.ClusterManager { DeployOption: operatorapiv1.ClusterManagerDeployOption{ Mode: operatorapiv1.InstallModeDefault, }, + AddOnManagerConfiguration: &operatorapiv1.AddOnManagerConfiguration{ + Mode: operatorapiv1.ComponentModeTypeEnable, + }, }, } } @@ -180,6 +183,29 @@ func setDeployment(clusterManagerName, clusterManagerNamespace string) []runtime ObservedGeneration: 1, }, }, + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterManagerName + "-addon-manager-controller", + Namespace: clusterManagerNamespace, + Generation: 1, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "addon-manager-controller", + }, + }, + }, + }, + Replicas: &replicas, + }, + Status: appsv1.DeploymentStatus{ + ReadyReplicas: replicas, + ObservedGeneration: 1, + }, + }, } } @@ -221,6 +247,9 @@ func ensureObject(t *testing.T, object runtime.Object, hubCore *operatorapiv1.Cl if strings.Contains(o.Name, "placement") && hubCore.Spec.PlacementImagePullSpec != o.Spec.Template.Spec.Containers[0].Image { t.Errorf("Placement image does not match to the expected.") } + if strings.Contains(o.Name, "addon-manager") && hubCore.Spec.AddOnManagerImagePullSpec != o.Spec.Template.Spec.Containers[0].Image { + t.Errorf("AddOnManager image does not match to the expected.") + } } } @@ -250,7 +279,7 @@ func TestSyncDeploy(t *testing.T) { // Check if resources are created as expected // We expect creat the namespace twice respectively in the management cluster and the hub cluster. - testinghelper.AssertEqualNumber(t, len(createKubeObjects), 21) + testinghelper.AssertEqualNumber(t, len(createKubeObjects), 24) for _, object := range createKubeObjects { ensureObject(t, object, clusterManager) } @@ -290,7 +319,7 @@ func TestSyncDeployNoWebhook(t *testing.T) { // Check if resources are created as expected // We expect creat the namespace twice respectively in the management cluster and the hub cluster. - testinghelper.AssertEqualNumber(t, len(createKubeObjects), 20) + testinghelper.AssertEqualNumber(t, len(createKubeObjects), 24) for _, object := range createKubeObjects { ensureObject(t, object, clusterManager) } @@ -332,7 +361,7 @@ func TestSyncDelete(t *testing.T) { deleteKubeActions = append(deleteKubeActions, deleteKubeAction) } } - testinghelper.AssertEqualNumber(t, len(deleteKubeActions), 21) // delete namespace both from the hub cluster and the mangement cluster + testinghelper.AssertEqualNumber(t, len(deleteKubeActions), 24) // delete namespace both from the hub cluster and the mangement cluster deleteCRDActions := []clienttesting.DeleteActionImpl{} crdActions := tc.apiExtensionClient.Actions() diff --git a/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go index 82e1bfba4..59541c569 100644 --- a/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go +++ b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go @@ -43,6 +43,13 @@ var ( "cluster-manager/hub/cluster-manager-placement-serviceaccount.yaml", } + hubAddOnManagerRbacResourceFiles = []string{ + // addon-manager + "cluster-manager/hub/cluster-manager-addon-manager-clusterrole.yaml", + "cluster-manager/hub/cluster-manager-addon-manager-clusterrolebinding.yaml", + "cluster-manager/hub/cluster-manager-addon-manager-serviceaccount.yaml", + } + // The hubHostedWebhookServiceFiles should only be deployed on the hub cluster when the deploy mode is hosted. hubDefaultWebhookServiceFiles = []string{ "cluster-manager/hub/cluster-manager-registration-webhook-service.yaml", @@ -65,6 +72,14 @@ type hubReoncile struct { } func (c *hubReoncile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { + // If AddOnManager is not enabled, remove related resources + if operatorapiv1.ComponentModeType(config.AddOnManagerComponentMode) != operatorapiv1.ComponentModeTypeEnable { + _, _, err := cleanResources(ctx, c.hubKubeClient, cm, config, hubAddOnManagerRbacResourceFiles...) + if err != nil { + return cm, reconcileStop, err + } + } + hubResources := getHubResources(cm.Spec.DeployOption.Mode, config) var appliedErrs []error @@ -106,33 +121,15 @@ func (c *hubReoncile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterMa func (c *hubReoncile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { hubResources := getHubResources(cm.Spec.DeployOption.Mode, config) - for _, file := range hubResources { - err := helpers.CleanUpStaticObject( - ctx, - c.hubKubeClient, - nil, - nil, - func(name string) ([]byte, error) { - template, err := manifests.ClusterManagerManifestFiles.ReadFile(name) - if err != nil { - return nil, err - } - return assets.MustCreateAssetFromTemplate(name, template, config).Data, nil - }, - file, - ) - if err != nil { - // TODO update condition - return cm, reconcileContinue, err - } - } - - return cm, reconcileContinue, nil + return cleanResources(ctx, c.hubKubeClient, cm, config, hubResources...) } func getHubResources(mode operatorapiv1.InstallMode, config manifests.HubConfig) []string { hubResources := []string{namespaceResource} hubResources = append(hubResources, hubRbacResourceFiles...) + if operatorapiv1.ComponentModeType(config.AddOnManagerComponentMode) == operatorapiv1.ComponentModeTypeEnable { + hubResources = append(hubResources, hubAddOnManagerRbacResourceFiles...) + } // the hubHostedWebhookServiceFiles are only used in hosted mode if mode == operatorapiv1.InstallModeHosted { hubResources = append(hubResources, hubHostedWebhookServiceFiles...) diff --git a/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go index 00513807f..c64f98397 100644 --- a/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go +++ b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go @@ -30,6 +30,10 @@ var ( "cluster-manager/management/cluster-manager-work-webhook-deployment.yaml", "cluster-manager/management/cluster-manager-placement-deployment.yaml", } + + addOnManagerDeploymentFiles = []string{ + "cluster-manager/management/cluster-manager-addon-manager-deployment.yaml", + } ) type runtimeReconcile struct { @@ -44,6 +48,14 @@ type runtimeReconcile struct { } func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { + // If AddOnManager is not enabled, remove related resources + if operatorapiv1.ComponentModeType(config.AddOnManagerComponentMode) != operatorapiv1.ComponentModeTypeEnable { + _, _, err := cleanResources(ctx, c.kubeClient, cm, config, addOnManagerDeploymentFiles...) + if err != nil { + return cm, reconcileStop, err + } + } + // In the Hosted mode, ensure the rbac kubeconfig secrets is existed for deployments to mount. // In this step, we get serviceaccount token from the hub cluster to form a kubeconfig and set it as a secret on the management cluster. // Before this step, the serviceaccounts in the hub cluster and the namespace in the management cluster should be applied first. @@ -90,7 +102,11 @@ func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus } var progressingDeployments []string - for _, file := range deploymentFiles { + deployResources := deploymentFiles + if operatorapiv1.ComponentModeType(config.AddOnManagerComponentMode) == operatorapiv1.ComponentModeTypeEnable { + deployResources = append(deployResources, addOnManagerDeploymentFiles...) + } + for _, file := range deployResources { updatedDeployment, currentGeneration, err := helpers.ApplyDeployment( ctx, c.kubeClient, @@ -150,25 +166,7 @@ func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus func (c *runtimeReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { // Remove All Static files managementResources := []string{namespaceResource} // because namespace is removed, we don't need to remove deployments explicitly - for _, file := range managementResources { - err := helpers.CleanUpStaticObject( - ctx, - c.kubeClient, nil, nil, - func(name string) ([]byte, error) { - template, err := manifests.ClusterManagerManifestFiles.ReadFile(name) - if err != nil { - return nil, err - } - return assets.MustCreateAssetFromTemplate(name, template, config).Data, nil - }, - file, - ) - if err != nil { - return cm, reconcileContinue, err - } - } - - return cm, reconcileContinue, nil + return cleanResources(ctx, c.kubeClient, cm, config, managementResources...) } // getSAs return serviceaccount names of all hub components diff --git a/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go index 9be4e4430..a4fcd2b53 100644 --- a/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go +++ b/pkg/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go @@ -93,23 +93,5 @@ func (c *webhookReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterM // Remove All webhook files webhookResources := hubRegistrationWebhookResourceFiles webhookResources = append(webhookResources, hubWorkWebhookResourceFiles...) - for _, file := range webhookResources { - err := helpers.CleanUpStaticObject( - ctx, - c.hubKubeClient, nil, nil, - func(name string) ([]byte, error) { - template, err := manifests.ClusterManagerManifestFiles.ReadFile(name) - if err != nil { - return nil, err - } - return assets.MustCreateAssetFromTemplate(name, template, config).Data, nil - }, - file, - ) - if err != nil { - return cm, reconcileContinue, err - } - } - - return cm, reconcileContinue, nil + return cleanResources(ctx, c.kubeClient, cm, config, webhookResources...) } diff --git a/test/integration/clustermanager_hosted_test.go b/test/integration/clustermanager_hosted_test.go index e390b4a77..bb094704b 100644 --- a/test/integration/clustermanager_hosted_test.go +++ b/test/integration/clustermanager_hosted_test.go @@ -8,12 +8,14 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + operatorapiv1 "open-cluster-management.io/api/operator/v1" v1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/registration-operator/pkg/helpers" "open-cluster-management.io/registration-operator/test/integration/util" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/cert" @@ -41,6 +43,15 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { var hubPlacementDeployment = fmt.Sprintf("%s-placement-controller", clusterManagerName) var hubRegistrationWebhookDeployment = fmt.Sprintf("%s-registration-webhook", clusterManagerName) var hubWorkWebhookDeployment = fmt.Sprintf("%s-work-webhook", clusterManagerName) + var hubAddOnManagerDeployment = fmt.Sprintf("%s-addon-manager-controller", clusterManagerName) + var hubRegistrationClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:controller", clusterManagerName) + var hubRegistrationWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) + var hubWorkWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) + var hubAddOnManagerClusterRole = fmt.Sprintf("open-cluster-management:%s-addon-manager:controller", clusterManagerName) + var hubRegistrationSA = fmt.Sprintf("%s-registration-controller-sa", clusterManagerName) + var hubRegistrationWebhookSA = fmt.Sprintf("%s-registration-webhook-sa", clusterManagerName) + var hubWorkWebhookSA = fmt.Sprintf("%s-work-webhook-sa", clusterManagerName) + var hubAddOnManagerSA = fmt.Sprintf("%s-addon-manager-controller-sa", clusterManagerName) ginkgo.BeforeEach(func() { hostedCtx, hostedCancel = context.WithCancel(context.Background()) @@ -92,9 +103,6 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) // Check clusterrole/clusterrolebinding - hubRegistrationClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:controller", clusterManagerName) - hubRegistrationWebhookClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) - hubWorkWebhookClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) gomega.Eventually(func() error { if _, err := hostedKubeClient.RbacV1().ClusterRoles().Get(hostedCtx, hubRegistrationClusterRole, metav1.GetOptions{}); err != nil { return err @@ -133,9 +141,6 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) // Check service account - hubRegistrationSA := fmt.Sprintf("%s-registration-controller-sa", clusterManagerName) - hubRegistrationWebhookSA := fmt.Sprintf("%s-registration-webhook-sa", clusterManagerName) - hubWorkWebhookSA := fmt.Sprintf("%s-work-webhook-sa", clusterManagerName) gomega.Eventually(func() error { if _, err := hostedKubeClient.CoreV1().ServiceAccounts(hubNamespaceHosted).Get(hostedCtx, hubRegistrationSA, metav1.GetOptions{}); err != nil { return err @@ -261,6 +266,135 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { util.AssertClusterManagerCondition(clusterManagerName, hostedOperatorClient, "Applied", "ClusterManagerApplied", metav1.ConditionTrue) }) + ginkgo.It("should have expected resource created/deleted successfully when feature gates AddOnManager enabled/disabled", func() { + // Check addon manager default mode + clusterManager, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() bool { + if clusterManager.Spec.AddOnManagerConfiguration.Mode == operatorapiv1.ComponentModeTypeDisable { + return true + } + return false + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check addon manager enabled mode + clusterManager.Spec.AddOnManagerConfiguration.Mode = operatorapiv1.ComponentModeTypeEnable + _, err = hostedOperatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() bool { + if clusterManager.Spec.AddOnManagerConfiguration.Mode == operatorapiv1.ComponentModeTypeEnable { + return true + } + return false + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check clusterrole/clusterrolebinding + gomega.Eventually(func() error { + if _, err := hostedKubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubAddOnManagerClusterRole, metav1.GetOptions{}); err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + gomega.Eventually(func() error { + if _, err := hostedKubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), hubAddOnManagerClusterRole, metav1.GetOptions{}); err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // Check service account + gomega.Eventually(func() error { + if _, err := hostedKubeClient.CoreV1().ServiceAccounts(hubNamespaceHosted).Get(context.Background(), hubAddOnManagerSA, metav1.GetOptions{}); err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // Check deployment + gomega.Eventually(func() error { + if _, err := hostedKubeClient.AppsV1().Deployments(hubNamespaceHosted).Get(context.Background(), hubAddOnManagerDeployment, metav1.GetOptions{}); err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // Check if relatedResources are correct + gomega.Eventually(func() error { + actual, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + if len(actual.Status.RelatedResources) != 38 { + return fmt.Errorf("should get 38 relatedResources, actual got %v", len(actual.Status.RelatedResources)) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Check addon manager disable mode + clusterManager, err = hostedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + clusterManager.Spec.AddOnManagerConfiguration.Mode = operatorapiv1.ComponentModeTypeDisable + clusterManager, err = hostedOperatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() bool { + if clusterManager.Spec.AddOnManagerConfiguration.Mode == operatorapiv1.ComponentModeTypeDisable { + return true + } + return false + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check clusterrole/clusterrolebinding + gomega.Eventually(func() bool { + _, err := hostedKubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubAddOnManagerClusterRole, metav1.GetOptions{}) + if err == nil { + return false + } + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + gomega.Eventually(func() bool { + _, err := hostedKubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), hubAddOnManagerClusterRole, metav1.GetOptions{}) + if err == nil { + return false + } + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check service account + gomega.Eventually(func() bool { + _, err := hostedKubeClient.CoreV1().ServiceAccounts(hubNamespaceHosted).Get(context.Background(), hubAddOnManagerSA, metav1.GetOptions{}) + if err == nil { + return false + } + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check deployment + gomega.Eventually(func() bool { + _, err := hostedKubeClient.AppsV1().Deployments(hubNamespaceHosted).Get(context.Background(), hubAddOnManagerDeployment, metav1.GetOptions{}) + if err == nil { + return false + } + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check if relatedResources are correct + gomega.Eventually(func() error { + actual, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + if len(actual.Status.RelatedResources) != 34 { + return fmt.Errorf("should get 34 relatedResources, actual got %v", len(actual.Status.RelatedResources)) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + }) + ginkgo.It("Deployment should be updated when clustermanager is changed", func() { gomega.Eventually(func() error { if _, err := hostedKubeClient.AppsV1().Deployments(hubNamespaceHosted).Get(hostedCtx, hubRegistrationDeployment, metav1.GetOptions{}); err != nil { diff --git a/test/integration/clustermanager_test.go b/test/integration/clustermanager_test.go index 137901577..e65bd4de6 100644 --- a/test/integration/clustermanager_test.go +++ b/test/integration/clustermanager_test.go @@ -11,12 +11,14 @@ import ( v1 "open-cluster-management.io/api/operator/v1" appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" "k8s.io/client-go/util/cert" "github.com/openshift/library-go/pkg/controller/controllercmd" + operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/registration-operator/pkg/helpers" "open-cluster-management.io/registration-operator/pkg/operators/clustermanager" certrotation "open-cluster-management.io/registration-operator/pkg/operators/clustermanager/controllers/certrotationcontroller" @@ -50,6 +52,15 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { var hubPlacementDeployment = fmt.Sprintf("%s-placement-controller", clusterManagerName) var hubRegistrationWebhookDeployment = fmt.Sprintf("%s-registration-webhook", clusterManagerName) var hubWorkWebhookDeployment = fmt.Sprintf("%s-work-webhook", clusterManagerName) + var hubAddOnManagerDeployment = fmt.Sprintf("%s-addon-manager-controller", clusterManagerName) + var hubRegistrationClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:controller", clusterManagerName) + var hubRegistrationWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) + var hubWorkWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) + var hubAddOnManagerClusterRole = fmt.Sprintf("open-cluster-management:%s-addon-manager:controller", clusterManagerName) + var hubRegistrationSA = fmt.Sprintf("%s-registration-controller-sa", clusterManagerName) + var hubRegistrationWebhookSA = fmt.Sprintf("%s-registration-webhook-sa", clusterManagerName) + var hubWorkWebhookSA = fmt.Sprintf("%s-work-webhook-sa", clusterManagerName) + var hubAddOnManagerSA = fmt.Sprintf("%s-addon-manager-controller-sa", clusterManagerName) ginkgo.BeforeEach(func() { var ctx context.Context @@ -76,9 +87,6 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { return nil }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) // Check clusterrole/clusterrolebinding - hubRegistrationClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:controller", clusterManagerName) - hubRegistrationWebhookClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) - hubWorkWebhookClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName) gomega.Eventually(func() error { if _, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubRegistrationClusterRole, metav1.GetOptions{}); err != nil { return err @@ -119,9 +127,6 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) // Check service account - hubRegistrationSA := fmt.Sprintf("%s-registration-controller-sa", clusterManagerName) - hubRegistrationWebhookSA := fmt.Sprintf("%s-registration-webhook-sa", clusterManagerName) - hubWorkWebhookSA := fmt.Sprintf("%s-work-webhook-sa", clusterManagerName) gomega.Eventually(func() error { if _, err := kubeClient.CoreV1().ServiceAccounts(hubNamespace).Get(context.Background(), hubRegistrationSA, metav1.GetOptions{}); err != nil { return err @@ -246,6 +251,135 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { util.AssertClusterManagerCondition(clusterManagerName, operatorClient, "Applied", "ClusterManagerApplied", metav1.ConditionTrue) }) + ginkgo.It("should have expected resource created/deleted successfully when feature gates AddOnManager enabled/disabled", func() { + // Check addon manager default mode + clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() bool { + if clusterManager.Spec.AddOnManagerConfiguration.Mode == operatorapiv1.ComponentModeTypeDisable { + return true + } + return false + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check addon manager enabled mode + clusterManager.Spec.AddOnManagerConfiguration.Mode = operatorapiv1.ComponentModeTypeEnable + _, err = operatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() bool { + if clusterManager.Spec.AddOnManagerConfiguration.Mode == operatorapiv1.ComponentModeTypeEnable { + return true + } + return false + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check clusterrole/clusterrolebinding + gomega.Eventually(func() error { + if _, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubAddOnManagerClusterRole, metav1.GetOptions{}); err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + gomega.Eventually(func() error { + if _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), hubAddOnManagerClusterRole, metav1.GetOptions{}); err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // Check service account + gomega.Eventually(func() error { + if _, err := kubeClient.CoreV1().ServiceAccounts(hubNamespace).Get(context.Background(), hubAddOnManagerSA, metav1.GetOptions{}); err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // Check deployment + gomega.Eventually(func() error { + if _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubAddOnManagerDeployment, metav1.GetOptions{}); err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // Check if relatedResources are correct + gomega.Eventually(func() error { + actual, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + if len(actual.Status.RelatedResources) != 38 { + return fmt.Errorf("should get 38 relatedResources, actual got %v", len(actual.Status.RelatedResources)) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Check addon manager disable mode + clusterManager, err = operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + clusterManager.Spec.AddOnManagerConfiguration.Mode = operatorapiv1.ComponentModeTypeDisable + clusterManager, err = operatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() bool { + if clusterManager.Spec.AddOnManagerConfiguration.Mode == operatorapiv1.ComponentModeTypeDisable { + return true + } + return false + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check clusterrole/clusterrolebinding + gomega.Eventually(func() bool { + _, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubAddOnManagerClusterRole, metav1.GetOptions{}) + if err == nil { + return false + } + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + gomega.Eventually(func() bool { + _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), hubAddOnManagerClusterRole, metav1.GetOptions{}) + if err == nil { + return false + } + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check service account + gomega.Eventually(func() bool { + _, err := kubeClient.CoreV1().ServiceAccounts(hubNamespace).Get(context.Background(), hubAddOnManagerSA, metav1.GetOptions{}) + if err == nil { + return false + } + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check deployment + gomega.Eventually(func() bool { + _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubAddOnManagerDeployment, metav1.GetOptions{}) + if err == nil { + return false + } + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Check if relatedResources are correct + gomega.Eventually(func() error { + actual, err := operatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + if len(actual.Status.RelatedResources) != 34 { + return fmt.Errorf("should get 34 relatedResources, actual got %v", len(actual.Status.RelatedResources)) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + }) + ginkgo.It("Deployment should be updated when clustermanager is changed", func() { gomega.Eventually(func() error { if _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil { @@ -316,7 +450,6 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { } return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) ginkgo.It("Deployment should be added nodeSelector and toleration when add nodePlacement into clustermanager", func() { @@ -408,6 +541,7 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { return nil }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) }) + }) ginkgo.Context("Cluster manager statuses", func() { diff --git a/test/integration/integration_suite_test.go b/test/integration/integration_suite_test.go index 5e6c3de5f..e9f26b1cc 100644 --- a/test/integration/integration_suite_test.go +++ b/test/integration/integration_suite_test.go @@ -132,6 +132,7 @@ var _ = ginkgo.BeforeSuite(func() { RegistrationImagePullSpec: "quay.io/open-cluster-management/registration", WorkImagePullSpec: "quay.io/open-cluster-management/work", PlacementImagePullSpec: "quay.io/open-cluster-management/placement", + AddOnManagerImagePullSpec: "quay.io/open-cluster-management/addon-manager", DeployOption: operatorapiv1.ClusterManagerDeployOption{ Mode: operatorapiv1.InstallModeDefault, }, @@ -155,6 +156,7 @@ var _ = ginkgo.BeforeSuite(func() { RegistrationImagePullSpec: "quay.io/open-cluster-management/registration", WorkImagePullSpec: "quay.io/open-cluster-management/work", PlacementImagePullSpec: "quay.io/open-cluster-management/placement", + AddOnManagerImagePullSpec: "quay.io/open-cluster-management/addon-manager", DeployOption: operatorapiv1.ClusterManagerDeployOption{ Mode: operatorapiv1.InstallModeHosted, Hosted: &operatorapiv1.HostedClusterManagerConfiguration{