mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
* set cma managed by addon-manager if not configured Signed-off-by: haoqing0110 <qhao@redhat.com> * update annotation in testing case Signed-off-by: haoqing0110 <qhao@redhat.com> * rename controller name Signed-off-by: haoqing0110 <qhao@redhat.com> --------- Signed-off-by: haoqing0110 <qhao@redhat.com>
210 lines
8.1 KiB
Go
210 lines
8.1 KiB
Go
package integration
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/onsi/ginkgo/v2"
|
|
"github.com/onsi/gomega"
|
|
corev1 "k8s.io/api/core/v1"
|
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
|
"k8s.io/apimachinery/pkg/api/meta"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/apimachinery/pkg/util/rand"
|
|
|
|
"open-cluster-management.io/addon-framework/pkg/addonmanager/constants"
|
|
"open-cluster-management.io/addon-framework/pkg/agent"
|
|
addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
|
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
|
workapiv1 "open-cluster-management.io/api/work/v1"
|
|
)
|
|
|
|
const (
|
|
deploymentJson = `{
|
|
"apiVersion": "apps/v1",
|
|
"kind": "Deployment",
|
|
"metadata": {
|
|
"name": "nginx-deployment",
|
|
"namespace": "default"
|
|
},
|
|
"spec": {
|
|
"replicas": 1,
|
|
"selector": {
|
|
"matchLabels": {
|
|
"app": "nginx"
|
|
}
|
|
},
|
|
"template": {
|
|
"metadata": {
|
|
"creationTimestamp": null,
|
|
"labels": {
|
|
"app": "nginx"
|
|
}
|
|
},
|
|
"spec": {
|
|
"containers": [
|
|
{
|
|
"image": "nginx:1.14.2",
|
|
"name": "nginx",
|
|
"ports": [
|
|
{
|
|
"containerPort": 80,
|
|
"protocol": "TCP"
|
|
}
|
|
]
|
|
}
|
|
]
|
|
}
|
|
}
|
|
}
|
|
}`
|
|
)
|
|
|
|
var _ = ginkgo.Describe("Agent deploy", func() {
|
|
var managedClusterName string
|
|
var err error
|
|
var manifestWorkName string
|
|
ginkgo.BeforeEach(func() {
|
|
suffix := rand.String(5)
|
|
managedClusterName = fmt.Sprintf("managedcluster-%s", suffix)
|
|
manifestWorkName = fmt.Sprintf("%s-0", constants.DeployWorkNamePrefix(testAddonImpl.name))
|
|
|
|
managedCluster := &clusterv1.ManagedCluster{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: managedClusterName,
|
|
},
|
|
Spec: clusterv1.ManagedClusterSpec{
|
|
HubAcceptsClient: true,
|
|
},
|
|
}
|
|
_, err = hubClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
|
|
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}}
|
|
_, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
|
|
cma := newClusterManagementAddon(testAddonImpl.name)
|
|
_, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Create(context.Background(),
|
|
cma, metav1.CreateOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
|
|
assertClusterManagementAddOnAnnotations(testAddonImpl.name)
|
|
})
|
|
|
|
ginkgo.AfterEach(func() {
|
|
err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(),
|
|
testAddonImpl.name, metav1.DeleteOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
})
|
|
|
|
ginkgo.It("Should deploy agent when cma is managed by addon-manager successfully", func() {
|
|
obj := &unstructured.Unstructured{}
|
|
err := obj.UnmarshalJSON([]byte(deploymentJson))
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
testAddonImpl.manifests[managedClusterName] = []runtime.Object{obj}
|
|
testAddonImpl.prober = &agent.HealthProber{
|
|
Type: agent.HealthProberTypeWork,
|
|
}
|
|
|
|
// Create ManagedClusterAddOn
|
|
addon := &addonapiv1alpha1.ManagedClusterAddOn{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: testAddonImpl.name,
|
|
},
|
|
Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{
|
|
InstallNamespace: "default",
|
|
},
|
|
}
|
|
_, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Create(context.Background(), addon, metav1.CreateOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
|
|
gomega.Eventually(func() error {
|
|
work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if len(work.Spec.Workload.Manifests) != 1 {
|
|
return fmt.Errorf("unexpected number of work manifests")
|
|
}
|
|
|
|
if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentJson)) {
|
|
return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw)
|
|
}
|
|
return nil
|
|
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
|
|
|
// Update work status to trigger addon status
|
|
work, err := hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
meta.SetStatusCondition(
|
|
&work.Status.Conditions,
|
|
metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation})
|
|
_, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
|
|
gomega.Eventually(func() error {
|
|
addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied) {
|
|
return fmt.Errorf("unexpected addon applied condition, %v", addon.Status.Conditions)
|
|
}
|
|
if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) {
|
|
return fmt.Errorf("unexpected addon progressing condition, %v", addon.Status.Conditions)
|
|
}
|
|
return nil
|
|
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
|
|
|
// update work to available so addon becomes available
|
|
work, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
meta.SetStatusCondition(
|
|
&work.Status.Conditions,
|
|
metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation})
|
|
_, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
|
|
gomega.Eventually(func() error {
|
|
addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionAvailable) {
|
|
return fmt.Errorf("unexpected addon available condition, %v", addon.Status.Conditions)
|
|
}
|
|
if !meta.IsStatusConditionFalse(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) {
|
|
return fmt.Errorf("unexpected addon progressing condition, %v", addon.Status.Conditions)
|
|
}
|
|
return nil
|
|
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
|
|
|
// do nothing if cluster is deleting and addon is not deleted
|
|
cluster, err := hubClusterClient.ClusterV1().ManagedClusters().Get(context.Background(), managedClusterName, metav1.GetOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
cluster.SetFinalizers([]string{clusterv1.ManagedClusterFinalizer})
|
|
_, err = hubClusterClient.ClusterV1().ManagedClusters().Update(context.Background(), cluster, metav1.UpdateOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{})
|
|
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
|
|
|
time.Sleep(5 * time.Second) // wait 5 seconds to sync
|
|
gomega.Eventually(func() error {
|
|
_, err = hubWorkClient.WorkV1().ManifestWorks(managedClusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{})
|
|
return err
|
|
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
|
})
|
|
|
|
})
|