diff --git a/manifests/klusterlet/managed/klusterlet-registration-clusterrole.yaml b/manifests/klusterlet/managed/klusterlet-registration-clusterrole.yaml index dfdb12625..680de09a6 100644 --- a/manifests/klusterlet/managed/klusterlet-registration-clusterrole.yaml +++ b/manifests/klusterlet/managed/klusterlet-registration-clusterrole.yaml @@ -20,3 +20,7 @@ rules: - apiGroups: ["cluster.open-cluster-management.io"] resources: ["clusterclaims"] verbs: ["get", "list", "watch"] + # Allow agent to list clusterproperties +- apiGroups: ["about.k8s.io"] + resources: ["clusterproperties"] + verbs: ["get", "list", "watch"] diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go index 449fe4d40..4fb2a12c7 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go @@ -209,6 +209,9 @@ type klusterletConfig struct { ManagedClusterArn string ManagedClusterRoleArn string ManagedClusterRoleSuffix string + + // flag to enable about about-api + AboutAPIEnabled bool } // If multiplehubs feature gate is enabled, using the bootstrapkubeconfigs from klusterlet CR. @@ -389,6 +392,9 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto } config.ClusterAnnotationsString = strings.Join(annotationsArray, ",") } + + config.AboutAPIEnabled = helpers.FeatureGateEnabled( + registrationFeatureGates, ocmfeature.DefaultSpokeRegistrationFeatureGates, ocmfeature.ClusterProperty) config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration", registrationFeatureGates, ocmfeature.DefaultSpokeRegistrationFeatureGates) diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go index 0b42995fd..3cf5d27bf 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go @@ -37,6 +37,7 @@ import ( fakeoperatorclient "open-cluster-management.io/api/client/operator/clientset/versioned/fake" operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions" fakeworkclient "open-cluster-management.io/api/client/work/clientset/versioned/fake" + ocmfeature "open-cluster-management.io/api/feature" operatorapiv1 "open-cluster-management.io/api/operator/v1" workapiv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/sdk-go/pkg/patcher" @@ -1465,6 +1466,40 @@ func TestClusterClaimConfigInSingletonMode(t *testing.T) { "", "cluster1", claimConfig) } +// TestSyncEnableClusterProperty test enabling clusterproperty +func TestSyncEnableClusterProperty(t *testing.T) { + klusterlet := newKlusterlet("klusterlet", "testns", "cluster1") + klusterlet.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationConfiguration{ + FeatureGates: []operatorapiv1.FeatureGate{ + { + Feature: string(ocmfeature.ClusterProperty), + Mode: operatorapiv1.FeatureGateModeTypeEnable, + }, + }, + } + + objects := []runtime.Object{} + syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet") + controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false, + objects...) + + err := controller.controller.sync(context.TODO(), syncContext) + if err != nil { + t.Errorf("Expected non error when sync, %v", err) + } + + var createCnt int + for _, action := range controller.apiExtensionClient.Actions() { + if action.GetVerb() == "create" { + createCnt++ + } + } + + if createCnt != 3 { + t.Errorf("Expected 3 actions, got %d", len(controller.apiExtensionClient.Actions())) + } +} + func newKubeConfig(host string) []byte { configData, _ := runtime.Encode(clientcmdlatest.Codec, &clientcmdapi.Config{ Clusters: map[string]*clientcmdapi.Cluster{"test-cluster": { diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go index daee7baf4..ad0e39426 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go @@ -26,6 +26,8 @@ var ( "klusterlet/managed/0000_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml", "klusterlet/managed/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml", } + + aboutAPIFile = "klusterlet/managed/clusterproperties.crd.yaml" ) // crdReconcile apply crds to managed clusters @@ -41,6 +43,13 @@ func (r *crdReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1. r.managedClusterClients.apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions(), crdmanager.EqualV1, ) + + var crdFiles []string + crdFiles = append(crdFiles, crdV1StaticFiles...) + if config.AboutAPIEnabled { + crdFiles = append(crdFiles, aboutAPIFile) + } + applyErr := crdManager.Apply(ctx, func(name string) ([]byte, error) { template, err := manifests.KlusterletManifestFiles.ReadFile(name) @@ -51,7 +60,7 @@ func (r *crdReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1. helpers.SetRelatedResourcesStatusesWithObj(&klusterlet.Status.RelatedResources, objData) return objData, nil }, - crdV1StaticFiles..., + crdFiles..., ) if applyErr != nil { @@ -74,6 +83,13 @@ func (r *crdReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klus r.managedClusterClients.apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions(), crdmanager.EqualV1, ) + + var crdFiles []string + crdFiles = append(crdFiles, crdV1StaticFiles...) + if config.AboutAPIEnabled { + crdFiles = append(crdFiles, aboutAPIFile) + } + deleteErr := crdManager.Clean(ctx, true, func(name string) ([]byte, error) { template, err := manifests.KlusterletManifestFiles.ReadFile(name) @@ -84,7 +100,7 @@ func (r *crdReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klus helpers.SetRelatedResourcesStatusesWithObj(&klusterlet.Status.RelatedResources, objData) return objData, nil }, - crdV1StaticFiles..., + crdFiles..., ) if deleteErr != nil { diff --git a/test/e2e/clusterproperty_test.go b/test/e2e/clusterproperty_test.go new file mode 100644 index 000000000..83821f24b --- /dev/null +++ b/test/e2e/clusterproperty_test.go @@ -0,0 +1,124 @@ +package e2e + +import ( + "context" + "fmt" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + aboutv1alpha1 "sigs.k8s.io/about-api/pkg/apis/v1alpha1" + aboutclient "sigs.k8s.io/about-api/pkg/generated/clientset/versioned" + + ocmfeature "open-cluster-management.io/api/feature" +) + +var _ = ginkgo.Describe("ClusterProperty API test", func() { + var aboutClusterClient aboutclient.Interface + var err error + ginkgo.BeforeEach(func() { + gomega.Eventually(func() error { + return spoke.EnableRegistrationFeature(universalKlusterletName, string(ocmfeature.ClusterProperty)) + }).Should(gomega.Succeed()) + + aboutClusterClient, err = aboutclient.NewForConfig(spoke.RestConfig) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ginkgo.DeferCleanup(func() { + gomega.Eventually(func() error { + return spoke.RemoveRegistrationFeature(universalKlusterletName, string(ocmfeature.ClusterProperty)) + }).Should(gomega.Succeed()) + + err = aboutClusterClient.AboutV1alpha1().ClusterProperties().DeleteCollection( + context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + }) + + ginkgo.Context("create/update/delete clusterproperty", func() { + ginkgo.It("managed cluster should have clusterproperty synced", func() { + prop1 := &aboutv1alpha1.ClusterProperty{ + ObjectMeta: metav1.ObjectMeta{ + Name: "prop1", + }, + Spec: aboutv1alpha1.ClusterPropertySpec{ + Value: "value1", + }, + } + + gomega.Eventually(func() error { + _, err = aboutClusterClient.AboutV1alpha1().ClusterProperties().Create( + context.Background(), prop1, metav1.CreateOptions{}) + return err + }).Should(gomega.Succeed()) + + ginkgo.By("create a cluster property") + gomega.Eventually(func() error { + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get( + context.Background(), universalClusterName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, claim := range managedCluster.Status.ClusterClaims { + if claim.Name == "prop1" && claim.Value == "value1" { + return nil + } + } + return fmt.Errorf( + "managed cluster does not have prop1 synced, got %v", managedCluster.Status.ClusterClaims) + }).Should(gomega.Succeed()) + + ginkgo.By("update a cluster property") + gomega.Eventually(func() error { + p, err := aboutClusterClient.AboutV1alpha1().ClusterProperties().Get( + context.Background(), "prop1", metav1.GetOptions{}) + if err != nil { + return err + } + p.Spec.Value = "value2" + _, err = aboutClusterClient.AboutV1alpha1().ClusterProperties().Update( + context.Background(), p, metav1.UpdateOptions{}) + return err + }).Should(gomega.Succeed()) + + gomega.Eventually(func() error { + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get( + context.Background(), universalClusterName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, claim := range managedCluster.Status.ClusterClaims { + if claim.Name == "prop1" && claim.Value == "value2" { + return nil + } + } + return fmt.Errorf( + "managed cluster does not have prop1 synced, got %v", managedCluster.Status.ClusterClaims) + }).Should(gomega.Succeed()) + + ginkgo.By("delete a cluster property") + err = aboutClusterClient.AboutV1alpha1().ClusterProperties().Delete( + context.Background(), "prop1", metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get( + context.Background(), universalClusterName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, claim := range managedCluster.Status.ClusterClaims { + if claim.Name == "prop1" && claim.Value == "value2" { + return fmt.Errorf( + "managed cluster should not have prop1 synced, got %v", managedCluster.Status.ClusterClaims) + } + } + return nil + }).Should(gomega.Succeed()) + }) + }) + +}) diff --git a/test/e2e/managedcluster_loopback_test.go b/test/e2e/managedcluster_loopback_test.go index 22ce55c2b..84a855c89 100644 --- a/test/e2e/managedcluster_loopback_test.go +++ b/test/e2e/managedcluster_loopback_test.go @@ -175,30 +175,31 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { // make sure the cpu and memory are still in the status, for compatibility ginkgo.By("Make sure cpu and memory exist in status") - err = wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - managedCluster, err := managedClusters.Get(context.TODO(), universalClusterName, metav1.GetOptions{}) - if err != nil { - return false, err - } + err = wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 30*time.Second, true, + func(ctx context.Context) (bool, error) { + managedCluster, err := managedClusters.Get(ctx, universalClusterName, metav1.GetOptions{}) + if err != nil { + return false, err + } - if _, exist := managedCluster.Status.Allocatable[clusterv1.ResourceCPU]; !exist { - return false, fmt.Errorf("Resource %v doesn't exist in Allocatable", clusterv1.ResourceCPU) - } + if _, exist := managedCluster.Status.Allocatable[clusterv1.ResourceCPU]; !exist { + return false, fmt.Errorf("Resource %v doesn't exist in Allocatable", clusterv1.ResourceCPU) + } - if _, exist := managedCluster.Status.Allocatable[clusterv1.ResourceMemory]; !exist { - return false, fmt.Errorf("Resource %v doesn't exist in Allocatable", clusterv1.ResourceMemory) - } + if _, exist := managedCluster.Status.Allocatable[clusterv1.ResourceMemory]; !exist { + return false, fmt.Errorf("Resource %v doesn't exist in Allocatable", clusterv1.ResourceMemory) + } - if _, exist := managedCluster.Status.Capacity[clusterv1.ResourceCPU]; !exist { - return false, fmt.Errorf("Resource %v doesn't exist in Capacity", clusterv1.ResourceCPU) - } + if _, exist := managedCluster.Status.Capacity[clusterv1.ResourceCPU]; !exist { + return false, fmt.Errorf("Resource %v doesn't exist in Capacity", clusterv1.ResourceCPU) + } - if _, exist := managedCluster.Status.Capacity[clusterv1.ResourceMemory]; !exist { - return false, fmt.Errorf("Resource %v doesn't exist in Capacity", clusterv1.ResourceMemory) - } + if _, exist := managedCluster.Status.Capacity[clusterv1.ResourceMemory]; !exist { + return false, fmt.Errorf("Resource %v doesn't exist in Capacity", clusterv1.ResourceMemory) + } - return true, nil - }) + return true, nil + }) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("Make sure ClusterClaims are synced") @@ -208,14 +209,15 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { Value: clusterId, }, } - err = wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - managedCluster, err := managedClusters.Get(context.TODO(), universalClusterName, metav1.GetOptions{}) - if err != nil { - return false, err - } + err = wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 30*time.Second, true, + func(ctx context.Context) (bool, error) { + managedCluster, err := managedClusters.Get(ctx, universalClusterName, metav1.GetOptions{}) + if err != nil { + return false, err + } - return reflect.DeepEqual(clusterClaims, managedCluster.Status.ClusterClaims), nil - }) + return reflect.DeepEqual(clusterClaims, managedCluster.Status.ClusterClaims), nil + }) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("Create addon on hub") @@ -260,21 +262,22 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { ) ginkgo.By(fmt.Sprintf("Waiting for the CSR for addOn %q to exist", addOnName)) - err = wait.Poll(1*time.Second, 90*time.Second, func() (bool, error) { - var err error - csrs, err = csrClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: fmt.Sprintf("open-cluster-management.io/cluster-name=%s,open-cluster-management.io/addon-name=%s", universalClusterName, addOnName), + err = wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 90*time.Second, true, + func(ctx context.Context) (bool, error) { + var err error + csrs, err = csrClient.List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("open-cluster-management.io/cluster-name=%s,open-cluster-management.io/addon-name=%s", universalClusterName, addOnName), + }) + if err != nil { + return false, err + } + + if len(csrs.Items) >= 1 { + return true, nil + } + + return false, nil }) - if err != nil { - return false, err - } - - if len(csrs.Items) >= 1 { - return true, nil - } - - return false, nil - }) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("Approving all pending CSRs") diff --git a/test/framework/klusterlet.go b/test/framework/klusterlet.go index 36547654b..5739b614f 100644 --- a/test/framework/klusterlet.go +++ b/test/framework/klusterlet.go @@ -213,6 +213,56 @@ func (spoke *Spoke) CheckKlusterletStatus(klusterletName, condType, reason strin return nil } +func (spoke *Spoke) EnableRegistrationFeature(klusterletName, feature string) error { + kl, err := spoke.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) + if err != nil { + return err + } + + if kl.Spec.RegistrationConfiguration == nil { + kl.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationConfiguration{} + } + + if len(kl.Spec.RegistrationConfiguration.FeatureGates) == 0 { + kl.Spec.RegistrationConfiguration.FeatureGates = make([]operatorapiv1.FeatureGate, 0) + } + + for idx, f := range kl.Spec.RegistrationConfiguration.FeatureGates { + if f.Feature == feature { + if f.Mode == operatorapiv1.FeatureGateModeTypeEnable { + return nil + } + kl.Spec.RegistrationConfiguration.FeatureGates[idx].Mode = operatorapiv1.FeatureGateModeTypeEnable + _, err = spoke.OperatorClient.OperatorV1().Klusterlets().Update(context.TODO(), kl, metav1.UpdateOptions{}) + return err + } + } + + featureGate := operatorapiv1.FeatureGate{ + Feature: feature, + Mode: operatorapiv1.FeatureGateModeTypeEnable, + } + + kl.Spec.RegistrationConfiguration.FeatureGates = append(kl.Spec.RegistrationConfiguration.FeatureGates, featureGate) + _, err = spoke.OperatorClient.OperatorV1().Klusterlets().Update(context.TODO(), kl, metav1.UpdateOptions{}) + return err +} + +func (spoke *Spoke) RemoveRegistrationFeature(klusterletName string, feature string) error { + kl, err := spoke.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) + if err != nil { + return err + } + for indx, fg := range kl.Spec.RegistrationConfiguration.FeatureGates { + if fg.Feature == feature { + kl.Spec.RegistrationConfiguration.FeatureGates[indx].Mode = operatorapiv1.FeatureGateModeTypeDisable + break + } + } + _, err = spoke.OperatorClient.OperatorV1().Klusterlets().Update(context.TODO(), kl, metav1.UpdateOptions{}) + return err +} + // CleanKlusterletRelatedResources needs both hub side and spoke side operations func CleanKlusterletRelatedResources( hub *Hub, spoke *Spoke, diff --git a/test/framework/spoke.go b/test/framework/spoke.go index 0d2627342..03df403b6 100644 --- a/test/framework/spoke.go +++ b/test/framework/spoke.go @@ -1,6 +1,7 @@ package framework import ( + "k8s.io/client-go/rest" clientcmd "k8s.io/client-go/tools/clientcmd" ) @@ -14,6 +15,8 @@ type Spoke struct { // is different from the klusterlet namespace and name. KlusterletOperatorNamespace string KlusterletOperator string + + RestConfig *rest.Config } func NewSpoke(kubeconfig string) (*Spoke, error) { @@ -31,5 +34,6 @@ func NewSpoke(kubeconfig string) (*Spoke, error) { // The same name as deploy/klusterlet/config/operator/operator.yaml KlusterletOperatorNamespace: "open-cluster-management", KlusterletOperator: "klusterlet", + RestConfig: clusterCfg, }, nil }