mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 10:00:11 +00:00
✨ Start import in registration by feature gate. (#775)
* Update clustermanager operator to support capi Signed-off-by: Jian Qiu <jqiu@redhat.com> * Update operator to start importer Signed-off-by: Jian Qiu <jqiu@redhat.com> --------- Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
@@ -150,3 +150,6 @@ rules:
|
||||
- apiGroups: ["multicluster.x-k8s.io"]
|
||||
resources: ["clusterprofiles/status"]
|
||||
verbs: ["update", "patch"]
|
||||
- apiGroups: [ "cluster.x-k8s.io" ]
|
||||
resources: [ "clusters" ]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -42,6 +42,8 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: AGENT_IMAGE
|
||||
value: {{ template "operatorImage" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 10 }}
|
||||
livenessProbe:
|
||||
|
||||
@@ -53,6 +53,8 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: AGENT_IMAGE
|
||||
value: quay.io/open-cluster-management/registration-operator:latest
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
|
||||
@@ -152,3 +152,6 @@ rules:
|
||||
- apiGroups: ["multicluster.x-k8s.io"]
|
||||
resources: ["clusterprofiles/status"]
|
||||
verbs: ["update", "patch"]
|
||||
- apiGroups: [ "cluster.x-k8s.io" ]
|
||||
resources: [ "clusters" ]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -59,7 +59,7 @@ metadata:
|
||||
categories: Integration & Delivery,OpenShift Optional
|
||||
certified: "false"
|
||||
containerImage: quay.io/open-cluster-management/registration-operator:latest
|
||||
createdAt: "2024-12-02T08:08:47Z"
|
||||
createdAt: "2024-12-18T07:51:41Z"
|
||||
description: Manages the installation and upgrade of the ClusterManager.
|
||||
operators.operatorframework.io/builder: operator-sdk-v1.32.0
|
||||
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
|
||||
@@ -525,6 +525,14 @@ spec:
|
||||
verbs:
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- cluster.x-k8s.io
|
||||
resources:
|
||||
- clusters
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
serviceAccountName: cluster-manager
|
||||
deployments:
|
||||
- label:
|
||||
@@ -571,6 +579,8 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: AGENT_IMAGE
|
||||
value: quay.io/open-cluster-management/registration-operator:latest
|
||||
image: quay.io/open-cluster-management/registration-operator:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
|
||||
@@ -31,7 +31,7 @@ metadata:
|
||||
categories: Integration & Delivery,OpenShift Optional
|
||||
certified: "false"
|
||||
containerImage: quay.io/open-cluster-management/registration-operator:latest
|
||||
createdAt: "2024-12-02T08:08:47Z"
|
||||
createdAt: "2024-12-18T07:51:42Z"
|
||||
description: Manages the installation and upgrade of the Klusterlet.
|
||||
operators.operatorframework.io/builder: operator-sdk-v1.32.0
|
||||
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
|
||||
|
||||
@@ -100,3 +100,20 @@ rules:
|
||||
- apiGroups: ["register.open-cluster-management.io"]
|
||||
resources: ["managedclusters/accept"]
|
||||
verbs: ["update"]
|
||||
{{if .ClusterImporterEnabled}}
|
||||
- apiGroups: ["cluster.x-k8s.io"]
|
||||
resources: ["clusters"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
{{end}}
|
||||
{{if .ClusterProfileEnabled}}
|
||||
# Allow hub to manage clusterprofile
|
||||
- apiGroups: ["multicluster.x-k8s.io"]
|
||||
resources: ["clusterprofiles"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["multicluster.x-k8s.io"]
|
||||
resources: ["clusterprofiles/status"]
|
||||
verbs: ["update", "patch"]
|
||||
{{end}}
|
||||
|
||||
@@ -57,6 +57,9 @@ spec:
|
||||
{{if .AutoApproveUsers}}
|
||||
- "--cluster-auto-approval-users={{ .AutoApproveUsers }}"
|
||||
{{end}}
|
||||
{{if .ClusterImporterEnabled}}
|
||||
- "--agent-image={{ .AgentImage }}"
|
||||
{{end}}
|
||||
{{ if .HostedMode }}
|
||||
- "--kubeconfig=/var/run/secrets/hub/kubeconfig"
|
||||
{{ end }}
|
||||
|
||||
@@ -20,7 +20,9 @@ type HubConfig struct {
|
||||
AddOnManagerEnabled bool
|
||||
MWReplicaSetEnabled bool
|
||||
ClusterProfileEnabled bool
|
||||
AgentImage string
|
||||
CloudEventsDriverEnabled bool
|
||||
ClusterImporterEnabled bool
|
||||
WorkDriver string
|
||||
AutoApproveUsers string
|
||||
ImagePullSecret string
|
||||
|
||||
@@ -81,6 +81,9 @@ type ImagesConfig struct {
|
||||
// The image pull secret name is open-cluster-management-image-pull-credentials.
|
||||
// Please set the userName and password if you use a private image registry.
|
||||
ImageCredentials ImageCredentials `json:"imageCredentials,omitempty"`
|
||||
// Overrides is to override the image of the component, if this is specified,
|
||||
// the registry and tag will be ignored.
|
||||
Overrides Overrides `json:"overrides,omitempty"`
|
||||
}
|
||||
|
||||
type ImageCredentials struct {
|
||||
@@ -90,6 +93,19 @@ type ImageCredentials struct {
|
||||
DockerConfigJson string `json:"dockerConfigJson,omitempty"`
|
||||
}
|
||||
|
||||
type Overrides struct {
|
||||
// RegistrationImage is the image of the registration component.
|
||||
RegistrationImage string `json:"registrationImage,omitempty"`
|
||||
// WorkImage is the image of the work component.
|
||||
WorkImage string `json:"workImage,omitempty"`
|
||||
// OperatorImage is the image of the operator component.
|
||||
OperatorImage string `json:"operatorImage,omitempty"`
|
||||
// PlacementImage is the image of the placement component
|
||||
PlacementImage string `json:"placementImage,omitempty"`
|
||||
// AddOnManagerImage is the image of the addOnManager component
|
||||
AddOnManagerImage string `json:"addOnManagerImage,omitempty"`
|
||||
}
|
||||
|
||||
type ClusterManagerConfig struct {
|
||||
// Create determines if create the clusterManager CR, default is true.
|
||||
Create bool `json:"create,omitempty"`
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
errorhelpers "errors"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -184,6 +185,12 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
|
||||
config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration",
|
||||
registrationFeatureGates, ocmfeature.DefaultHubRegistrationFeatureGates)
|
||||
config.ClusterProfileEnabled = helpers.FeatureGateEnabled(registrationFeatureGates, ocmfeature.DefaultHubRegistrationFeatureGates, ocmfeature.ClusterProfile)
|
||||
// setting for cluster importer.
|
||||
// TODO(qiujian16) since this is disabled by feature gate, the image is obtained from cluster manager's env var. Need a more elegant approach.
|
||||
config.ClusterImporterEnabled = helpers.FeatureGateEnabled(registrationFeatureGates, ocmfeature.DefaultHubRegistrationFeatureGates, ocmfeature.ClusterImporter)
|
||||
if config.ClusterImporterEnabled {
|
||||
config.AgentImage = os.Getenv("AGENT_IMAGE")
|
||||
}
|
||||
|
||||
var workFeatureGates []operatorapiv1.FeatureGate
|
||||
if clusterManager.Spec.WorkConfiguration != nil {
|
||||
|
||||
@@ -62,12 +62,6 @@ var (
|
||||
"cluster-manager/hub/cluster-manager-addon-manager-serviceaccount.yaml",
|
||||
}
|
||||
|
||||
clusterProfileResourceFiles = []string{
|
||||
// clusterprofile
|
||||
"cluster-manager/hub/cluster-manager-clusterprofiles-clusterrole.yaml",
|
||||
"cluster-manager/hub/cluster-manager-clusterprofiles-clusterrolebinding.yaml",
|
||||
}
|
||||
|
||||
// The hubHostedWebhookServiceFiles should only be deployed on the hub cluster when the deploy mode is hosted.
|
||||
hubDefaultWebhookServiceFiles = []string{
|
||||
"cluster-manager/hub/cluster-manager-registration-webhook-service.yaml",
|
||||
@@ -107,14 +101,6 @@ func (c *hubReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterM
|
||||
}
|
||||
}
|
||||
|
||||
// If ClusterProfile is not enabled, remove related resources
|
||||
if !config.ClusterProfileEnabled {
|
||||
_, _, err := cleanResources(ctx, c.hubKubeClient, cm, config, clusterProfileResourceFiles...)
|
||||
if err != nil {
|
||||
return cm, reconcileStop, err
|
||||
}
|
||||
}
|
||||
|
||||
hubResources := getHubResources(cm.Spec.DeployOption.Mode, config)
|
||||
var appliedErrs []error
|
||||
|
||||
@@ -171,10 +157,6 @@ func getHubResources(mode operatorapiv1.InstallMode, config manifests.HubConfig)
|
||||
hubResources = append(hubResources, mwReplicaSetResourceFiles...)
|
||||
}
|
||||
|
||||
if config.ClusterProfileEnabled {
|
||||
hubResources = append(hubResources, clusterProfileResourceFiles...)
|
||||
}
|
||||
|
||||
// the hubHostedWebhookServiceFiles are only used in hosted mode
|
||||
if helpers.IsHosted(mode) {
|
||||
hubResources = append(hubResources, hubHostedWebhookServiceFiles...)
|
||||
|
||||
@@ -4,6 +4,7 @@ import "github.com/spf13/pflag"
|
||||
|
||||
type Options struct {
|
||||
APIServerURL string
|
||||
AgentImage string
|
||||
}
|
||||
|
||||
func New() *Options {
|
||||
@@ -14,4 +15,7 @@ func New() *Options {
|
||||
func (m *Options) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&m.APIServerURL, "hub-apiserver-url", m.APIServerURL,
|
||||
"APIServer URL of the hub cluster that the spoke cluster can access, Only used for spoke cluster import")
|
||||
fs.StringVar(&m.AgentImage, "agent-image", m.AgentImage,
|
||||
"Image of the agent to import, only singleton mode is used for importer and only registration-operator "+
|
||||
"image is needed.")
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
authv1 "k8s.io/api/authentication/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
@@ -16,6 +18,8 @@ import (
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers/chart"
|
||||
)
|
||||
|
||||
const imagePullSecretName = "open-cluster-management-image-pull-credentials"
|
||||
|
||||
func RenderBootstrapHubKubeConfig(
|
||||
kubeClient kubernetes.Interface, apiServerURL string) KlusterletConfigRenderer {
|
||||
return func(ctx context.Context, config *chart.KlusterletChartConfig) (*chart.KlusterletChartConfig, error) {
|
||||
@@ -91,3 +95,34 @@ func RenderBootstrapHubKubeConfig(
|
||||
return config, nil
|
||||
}
|
||||
}
|
||||
|
||||
func RenderImage(image string) KlusterletConfigRenderer {
|
||||
return func(ctx context.Context, config *chart.KlusterletChartConfig) (*chart.KlusterletChartConfig, error) {
|
||||
if len(image) == 0 {
|
||||
return config, nil
|
||||
}
|
||||
config.Images.Overrides = chart.Overrides{
|
||||
OperatorImage: image,
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
}
|
||||
|
||||
func RenderImagePullSecret(kubeClient kubernetes.Interface, namespace string) KlusterletConfigRenderer {
|
||||
return func(ctx context.Context, config *chart.KlusterletChartConfig) (*chart.KlusterletChartConfig, error) {
|
||||
secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, imagePullSecretName, metav1.GetOptions{})
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
return config, nil
|
||||
case err != nil:
|
||||
return config, err
|
||||
}
|
||||
|
||||
if len(secret.Data[corev1.DockerConfigJsonKey]) == 0 {
|
||||
return config, nil
|
||||
}
|
||||
|
||||
config.Images.ImageCredentials.DockerConfigJson = string(secret.Data[corev1.DockerConfigJsonKey])
|
||||
return config, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,3 +97,91 @@ func TestRenderBootstrapHubKubeConfig(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderImage(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
image string
|
||||
expectedImage string
|
||||
}{
|
||||
{
|
||||
name: "image is not set",
|
||||
},
|
||||
{
|
||||
name: "image is set",
|
||||
image: "test:latest",
|
||||
expectedImage: "test:latest",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
config := &chart.KlusterletChartConfig{}
|
||||
render := RenderImage(c.image)
|
||||
config, err := render(context.TODO(), config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to render image: %v", err)
|
||||
}
|
||||
if config.Images.Overrides.OperatorImage != c.expectedImage {
|
||||
t.Errorf("expected: %s, got: %s", c.expectedImage, config.Images.Overrides.OperatorImage)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderImagePullSecret(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
secrets []runtime.Object
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "not image secret",
|
||||
secrets: []runtime.Object{},
|
||||
},
|
||||
{
|
||||
name: "secert has not correct key",
|
||||
secrets: []runtime.Object{
|
||||
&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: imagePullSecretName,
|
||||
Namespace: "test",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"docker": []byte("test"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "secert has correct key",
|
||||
secrets: []runtime.Object{
|
||||
&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: imagePullSecretName,
|
||||
Namespace: "test",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
corev1.DockerConfigJsonKey: []byte("test"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: "test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
client := kubefake.NewClientset(c.secrets...)
|
||||
config := &chart.KlusterletChartConfig{}
|
||||
render := RenderImagePullSecret(client, "test")
|
||||
config, err := render(context.TODO(), config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to render image: %v", err)
|
||||
}
|
||||
if config.Images.ImageCredentials.DockerConfigJson != c.expected {
|
||||
t.Errorf("expected: %s, got: %s", c.expected, config.Images.ImageCredentials.DockerConfigJson)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,6 +260,8 @@ func (m *HubManagerOptions) RunControllerManagerWithInformers(
|
||||
clusterImporter = importer.NewImporter(
|
||||
[]importer.KlusterletConfigRenderer{
|
||||
importer.RenderBootstrapHubKubeConfig(kubeClient, m.ImportOption.APIServerURL),
|
||||
importer.RenderImage(m.ImportOption.AgentImage),
|
||||
importer.RenderImagePullSecret(kubeClient, controllerContext.OperatorNamespace),
|
||||
},
|
||||
clusterClient,
|
||||
clusterInformers.Cluster().V1().ManagedClusters(),
|
||||
|
||||
@@ -74,7 +74,6 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() {
|
||||
var hubAddonManagerDeployment = fmt.Sprintf("%s-addon-manager-controller", clusterManagerName)
|
||||
var hubRegistrationClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:controller", clusterManagerName)
|
||||
var hubRegistrationWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName)
|
||||
var hubRegistrationClusterProfileRole = fmt.Sprintf("open-cluster-management:%s-clusterprofile:controller", clusterManagerName)
|
||||
var hubWorkWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName)
|
||||
var hubWorkControllerClusterRole = fmt.Sprintf("open-cluster-management:%s-work:controller", clusterManagerName)
|
||||
var hubAddOnManagerClusterRole = fmt.Sprintf("open-cluster-management:%s-addon-manager:controller", clusterManagerName)
|
||||
@@ -763,130 +762,6 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() {
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.It("should have expected resource created/deleted when feature gates ClusterProfile enabled/disabled", func() {
|
||||
ginkgo.By("Enable ClusterProfile feature gate")
|
||||
gomega.Eventually(func() error {
|
||||
clusterManager, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get(
|
||||
context.Background(), clusterManagerName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
featureGate := []operatorapiv1.FeatureGate{
|
||||
{
|
||||
Feature: string(feature.ClusterProfile),
|
||||
Mode: operatorapiv1.FeatureGateModeTypeEnable,
|
||||
},
|
||||
}
|
||||
if clusterManager.Spec.RegistrationConfiguration != nil {
|
||||
for _, fg := range clusterManager.Spec.RegistrationConfiguration.FeatureGates {
|
||||
if fg.Feature != string(feature.ClusterProfile) {
|
||||
featureGate = append(featureGate, fg)
|
||||
}
|
||||
}
|
||||
}
|
||||
clusterManager.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationHubConfiguration{
|
||||
FeatureGates: featureGate,
|
||||
}
|
||||
_, err = hostedOperatorClient.OperatorV1().ClusterManagers().Update(
|
||||
context.Background(), clusterManager, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// Check clusterrole/clusterrolebinding
|
||||
gomega.Eventually(func() error {
|
||||
if _, err := hostedKubeClient.RbacV1().ClusterRoles().Get(
|
||||
context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
gomega.Eventually(func() error {
|
||||
if _, err := hostedKubeClient.RbacV1().ClusterRoleBindings().Get(
|
||||
context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// Check if relatedResources are correct
|
||||
gomega.Eventually(func() error {
|
||||
actual, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get(
|
||||
context.Background(), clusterManagerName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// increase 3 resources clusterrole/clusterrolebinding and crd
|
||||
if len(actual.Status.RelatedResources) != 48 {
|
||||
return fmt.Errorf("should get 48 relatedResources, actual got %v, %v",
|
||||
len(actual.Status.RelatedResources), actual.Status.RelatedResources)
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Revert ClusterProfile to disable mode")
|
||||
// Check ClusterProfile disable
|
||||
gomega.Eventually(func() error {
|
||||
clusterManager, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get(
|
||||
context.Background(), clusterManagerName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
featureGate := []operatorapiv1.FeatureGate{
|
||||
{
|
||||
Feature: string(feature.ClusterProfile),
|
||||
Mode: operatorapiv1.FeatureGateModeTypeDisable,
|
||||
},
|
||||
}
|
||||
if clusterManager.Spec.RegistrationConfiguration != nil {
|
||||
for _, fg := range clusterManager.Spec.RegistrationConfiguration.FeatureGates {
|
||||
if fg.Feature != string(feature.ClusterProfile) {
|
||||
featureGate = append(featureGate, fg)
|
||||
}
|
||||
}
|
||||
}
|
||||
clusterManager.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationHubConfiguration{
|
||||
FeatureGates: featureGate,
|
||||
}
|
||||
_, err = hostedOperatorClient.OperatorV1().ClusterManagers().Update(
|
||||
context.Background(), clusterManager, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// Check clusterrole/clusterrolebinding
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := hostedKubeClient.RbacV1().ClusterRoles().Get(
|
||||
context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := hostedKubeClient.RbacV1().ClusterRoleBindings().Get(
|
||||
context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Check if relatedResources are correct
|
||||
gomega.Eventually(func() error {
|
||||
actual, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get(
|
||||
context.Background(), clusterManagerName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// reduce 2 resources clusterrole/clusterrolebinding
|
||||
if len(actual.Status.RelatedResources) != 46 {
|
||||
return fmt.Errorf("should get 46 relatedResources, actual got %v, %v",
|
||||
len(actual.Status.RelatedResources), actual.Status.RelatedResources)
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
})
|
||||
|
||||
ginkgo.It("Deployment should be updated when clustermanager is changed", func() {
|
||||
gomega.Eventually(func() error {
|
||||
if _, err := hostedKubeClient.AppsV1().Deployments(hubNamespaceHosted).Get(hostedCtx, hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
|
||||
@@ -948,18 +823,6 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() {
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// Check if relatedResources are correct
|
||||
gomega.Eventually(func() error {
|
||||
actual, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get(hostedCtx, clusterManagerName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(actual.Status.RelatedResources) != 46 {
|
||||
return fmt.Errorf("should get 46 relatedResources, actual got %v", len(actual.Status.RelatedResources))
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.It("Deployment should be added nodeSelector and toleration when add nodePlacement into clustermanager", func() {
|
||||
|
||||
@@ -3,6 +3,7 @@ package operator
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/cert"
|
||||
|
||||
@@ -63,7 +65,6 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() {
|
||||
var hubAddonManagerDeployment = fmt.Sprintf("%s-addon-manager-controller", clusterManagerName)
|
||||
var hubRegistrationClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:controller", clusterManagerName)
|
||||
var hubRegistrationWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName)
|
||||
var hubRegistrationClusterProfileRole = fmt.Sprintf("open-cluster-management:%s-clusterprofile:controller", clusterManagerName)
|
||||
var hubWorkWebhookClusterRole = fmt.Sprintf("open-cluster-management:%s-work:webhook", clusterManagerName)
|
||||
var hubWorkControllerClusterRole = fmt.Sprintf("open-cluster-management:%s-work:controller", clusterManagerName)
|
||||
var hubAddOnManagerClusterRole = fmt.Sprintf("open-cluster-management:%s-addon-manager:controller", clusterManagerName)
|
||||
@@ -760,36 +761,21 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() {
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// Check clusterrole/clusterrolebinding
|
||||
// Check clusterrole
|
||||
gomega.Eventually(func() error {
|
||||
if _, err := kubeClient.RbacV1().ClusterRoles().Get(
|
||||
context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
gomega.Eventually(func() error {
|
||||
if _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(
|
||||
context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// Check if relatedResources are correct
|
||||
gomega.Eventually(func() error {
|
||||
actual, err := operatorClient.OperatorV1().ClusterManagers().Get(
|
||||
context.Background(), clusterManagerName, metav1.GetOptions{})
|
||||
cr, err := kubeClient.RbacV1().ClusterRoles().Get(
|
||||
context.Background(), hubRegistrationClusterRole, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// increase 3 resources clusterrole/clusterrolebinding and crd
|
||||
if len(actual.Status.RelatedResources) != 48 {
|
||||
return fmt.Errorf("should get 48 relatedResources, actual got %v, %v",
|
||||
len(actual.Status.RelatedResources), actual.Status.RelatedResources)
|
||||
for _, rule := range cr.Rules {
|
||||
apiGrouSet := sets.New[string](rule.APIGroups...)
|
||||
if apiGrouSet.Has("multicluster.x-k8s.io") {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
return fmt.Errorf("expected multicluster.x-k8s.io rules to exist")
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Revert ClusterProfile to disable mode")
|
||||
// Check ClusterProfile disable
|
||||
@@ -820,41 +806,141 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() {
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// Check clusterrole/clusterrolebinding
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := kubeClient.RbacV1().ClusterRoles().Get(
|
||||
context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := kubeClient.RbacV1().ClusterRoleBindings().Get(
|
||||
context.Background(), hubRegistrationClusterProfileRole, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Check if relatedResources are correct
|
||||
gomega.Eventually(func() error {
|
||||
actual, err := operatorClient.OperatorV1().ClusterManagers().Get(
|
||||
cr, err := kubeClient.RbacV1().ClusterRoles().Get(
|
||||
context.Background(), hubRegistrationClusterRole, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, rule := range cr.Rules {
|
||||
apiGrouSet := sets.New[string](rule.APIGroups...)
|
||||
if apiGrouSet.Has("multicluster.x-k8s.io") {
|
||||
return fmt.Errorf("expected multicluster.x-k8s.io rules to not exist")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.It("should have expected resource created/deleted when feature gates ClusterImporter enabled/disabled", func() {
|
||||
ginkgo.By("Enable ClusterImporter feature gate")
|
||||
os.Setenv("AGENT_IMAGE", "test-agent:latest")
|
||||
gomega.Eventually(func() error {
|
||||
clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get(
|
||||
context.Background(), clusterManagerName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// reduce 2 resources clusterrole/clusterrolebinding
|
||||
if len(actual.Status.RelatedResources) != 46 {
|
||||
return fmt.Errorf("should get 46 relatedResources, actual got %v, %v",
|
||||
len(actual.Status.RelatedResources), actual.Status.RelatedResources)
|
||||
featureGate := []operatorapiv1.FeatureGate{
|
||||
{
|
||||
Feature: string(feature.ClusterImporter),
|
||||
Mode: operatorapiv1.FeatureGateModeTypeEnable,
|
||||
},
|
||||
}
|
||||
if clusterManager.Spec.RegistrationConfiguration != nil {
|
||||
for _, fg := range clusterManager.Spec.RegistrationConfiguration.FeatureGates {
|
||||
if fg.Feature != string(feature.ClusterImporter) {
|
||||
featureGate = append(featureGate, fg)
|
||||
}
|
||||
}
|
||||
}
|
||||
clusterManager.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationHubConfiguration{
|
||||
FeatureGates: featureGate,
|
||||
}
|
||||
_, err = operatorClient.OperatorV1().ClusterManagers().Update(
|
||||
context.Background(), clusterManager, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// Check clusterrole
|
||||
gomega.Eventually(func() error {
|
||||
cr, err := kubeClient.RbacV1().ClusterRoles().Get(
|
||||
context.Background(), hubRegistrationClusterRole, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, rule := range cr.Rules {
|
||||
apiGrouSet := sets.New[string](rule.APIGroups...)
|
||||
if apiGrouSet.Has("cluster.x-k8s.io") {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("expected cluster.x-k8s.io rules to exist")
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// check deployment
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(
|
||||
context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
argSet := sets.New[string](deploy.Spec.Template.Spec.Containers[0].Args...)
|
||||
if argSet.Has("--agent-image=test-agent:latest") {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("expected agent-image flag to be set")
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Revert ClusterImporter to disable mode")
|
||||
// Check ClusterProfile disable
|
||||
gomega.Eventually(func() error {
|
||||
clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get(
|
||||
context.Background(), clusterManagerName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
featureGate := []operatorapiv1.FeatureGate{
|
||||
{
|
||||
Feature: string(feature.ClusterImporter),
|
||||
Mode: operatorapiv1.FeatureGateModeTypeDisable,
|
||||
},
|
||||
}
|
||||
if clusterManager.Spec.RegistrationConfiguration != nil {
|
||||
for _, fg := range clusterManager.Spec.RegistrationConfiguration.FeatureGates {
|
||||
if fg.Feature != string(feature.ClusterImporter) {
|
||||
featureGate = append(featureGate, fg)
|
||||
}
|
||||
}
|
||||
}
|
||||
clusterManager.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationHubConfiguration{
|
||||
FeatureGates: featureGate,
|
||||
}
|
||||
_, err = operatorClient.OperatorV1().ClusterManagers().Update(
|
||||
context.Background(), clusterManager, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// Check deployment
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(
|
||||
context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
argSet := sets.New[string](deploy.Spec.Template.Spec.Containers[0].Args...)
|
||||
if argSet.Has("--agent-image=test-agent:latest") {
|
||||
return fmt.Errorf("expected agent-image flag not to be set")
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
|
||||
// check cluster role
|
||||
gomega.Eventually(func() error {
|
||||
cr, err := kubeClient.RbacV1().ClusterRoles().Get(
|
||||
context.Background(), hubRegistrationClusterRole, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, rule := range cr.Rules {
|
||||
apiGrouSet := sets.New[string](rule.APIGroups...)
|
||||
if apiGrouSet.Has("cluster.x-k8s.io") {
|
||||
return fmt.Errorf("expected cluster.x-k8s.io rules to not exist")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.It("Deployment should be updated when clustermanager is changed", func() {
|
||||
gomega.Eventually(func() error {
|
||||
if _, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
|
||||
|
||||
Reference in New Issue
Block a user