mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 10:00:11 +00:00
Reduce e2e process time (#210)
for work and addon e2e, we do not need to restart klusterlet for each case Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
@@ -13,7 +13,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
@@ -22,21 +21,8 @@ import (
|
||||
|
||||
var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
ginkgo.Context("Checking addon lease on managed cluster to update addon status", func() {
|
||||
var (
|
||||
klusterletName string
|
||||
addOnName string
|
||||
)
|
||||
|
||||
var addOnName string
|
||||
ginkgo.BeforeEach(func() {
|
||||
if deployKlusterlet {
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
agentNamespace := fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6))
|
||||
_, err := t.CreateApprovedKlusterlet(
|
||||
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// create an addon on created managed cluster
|
||||
addOnName = fmt.Sprintf("addon-%s", rand.String(6))
|
||||
ginkgo.By(fmt.Sprintf("Creating managed cluster addon %q", addOnName))
|
||||
@@ -54,11 +40,6 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("Cleaning managed cluster %q", clusterName))
|
||||
if deployKlusterlet {
|
||||
ginkgo.By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
gomega.Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(gomega.BeNil())
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("Cleaning managed cluster addon installation namespace %q", addOnName))
|
||||
err := t.SpokeKubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
@@ -77,33 +58,29 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
err = wait.Poll(1*time.Second, 90*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() error {
|
||||
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
|
||||
if !meta.IsStatusConditionTrue(found.Status.Conditions, "Available") {
|
||||
return false, nil
|
||||
return fmt.Errorf("condition should be available")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
|
||||
// check if the cluster has a label for addon with expected value
|
||||
err = wait.Poll(1*time.Second, 90*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() bool {
|
||||
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false
|
||||
}
|
||||
if len(cluster.Labels) == 0 {
|
||||
return false, nil
|
||||
return false
|
||||
}
|
||||
key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName)
|
||||
return cluster.Labels[key] == "available", nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
return cluster.Labels[key] == "available"
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("Should update addon status to unavailable if addon stops to update its lease", func() {
|
||||
@@ -119,32 +96,29 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() error {
|
||||
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
if found.Status.Conditions == nil {
|
||||
return false, nil
|
||||
if !meta.IsStatusConditionTrue(found.Status.Conditions, "Available") {
|
||||
return fmt.Errorf("condition should be available")
|
||||
}
|
||||
cond := meta.FindStatusCondition(found.Status.Conditions, "Available")
|
||||
return cond.Status == metav1.ConditionTrue, nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
|
||||
// check if the cluster has a label for addon with expected value
|
||||
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() bool {
|
||||
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false
|
||||
}
|
||||
if len(cluster.Labels) == 0 {
|
||||
return false, nil
|
||||
return false
|
||||
}
|
||||
key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName)
|
||||
return cluster.Labels[key] == "available", nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
return cluster.Labels[key] == "available"
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating lease %q with a past time", addOnName))
|
||||
lease, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Get(context.TODO(), addOnName, metav1.GetOptions{})
|
||||
@@ -153,32 +127,29 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
_, err = t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Update(context.TODO(), lease, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() error {
|
||||
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
if found.Status.Conditions == nil {
|
||||
return false, nil
|
||||
if !meta.IsStatusConditionFalse(found.Status.Conditions, "Available") {
|
||||
return fmt.Errorf("condition should be available")
|
||||
}
|
||||
cond := meta.FindStatusCondition(found.Status.Conditions, "Available")
|
||||
return cond.Status == metav1.ConditionFalse, nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
|
||||
// check if the cluster has a label for addon with expected value
|
||||
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() bool {
|
||||
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false
|
||||
}
|
||||
if len(cluster.Labels) == 0 {
|
||||
return false, nil
|
||||
return false
|
||||
}
|
||||
key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName)
|
||||
return cluster.Labels[key] == "unhealthy", nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
return cluster.Labels[key] == "unhealthy"
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("Should update addon status to unknown if there is no lease for this addon", func() {
|
||||
@@ -194,87 +165,76 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() error {
|
||||
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
if found.Status.Conditions == nil {
|
||||
return false, nil
|
||||
if !meta.IsStatusConditionTrue(found.Status.Conditions, "Available") {
|
||||
return fmt.Errorf("condition should be available")
|
||||
}
|
||||
cond := meta.FindStatusCondition(found.Status.Conditions, "Available")
|
||||
return cond.Status == metav1.ConditionTrue, nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
|
||||
// check if the cluster has a label for addon with expected value
|
||||
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() bool {
|
||||
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false
|
||||
}
|
||||
if len(cluster.Labels) == 0 {
|
||||
return false, nil
|
||||
return false
|
||||
}
|
||||
key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName)
|
||||
return cluster.Labels[key] == "available", nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
return cluster.Labels[key] == "available"
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting lease %q", addOnName))
|
||||
err = t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() error {
|
||||
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
if !meta.IsStatusConditionTrue(found.Status.Conditions, "Available") {
|
||||
return false, nil
|
||||
return fmt.Errorf("condition should be available")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
|
||||
// check if the cluster has a label for addon with expected value
|
||||
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() bool {
|
||||
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false
|
||||
}
|
||||
if len(cluster.Labels) == 0 {
|
||||
return false, nil
|
||||
return false
|
||||
}
|
||||
key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName)
|
||||
return cluster.Labels[key] == "unreachable", nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
return cluster.Labels[key] == "unreachable"
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Checking managed cluster status to update addon status", func() {
|
||||
var (
|
||||
klusterletName string
|
||||
addOnName string
|
||||
)
|
||||
|
||||
var klusterletName, clusterName, addOnName string
|
||||
ginkgo.BeforeEach(func() {
|
||||
// create a managed cluster
|
||||
if deployKlusterlet {
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
agentNamespace := fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6))
|
||||
_, err := t.CreateApprovedKlusterlet(
|
||||
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
if !deployKlusterlet {
|
||||
ginkgo.Skip(fmt.Sprintf("skip if disabling deploy klusterlet"))
|
||||
}
|
||||
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
agentNamespace := fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6))
|
||||
_, err := t.CreateApprovedKlusterlet(
|
||||
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
// create an addon on created managed cluster
|
||||
addOnName = fmt.Sprintf("addon-%s", rand.String(6))
|
||||
ginkgo.By(fmt.Sprintf("Creating managed cluster addon %q", addOnName))
|
||||
err := t.CreateManagedClusterAddOn(clusterName, addOnName, addOnName)
|
||||
err = t.CreateManagedClusterAddOn(clusterName, addOnName, addOnName)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// create addon installation namespace
|
||||
@@ -291,6 +251,8 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
ginkgo.By(fmt.Sprintf("Cleaning managed cluster addon installation namespace %q", addOnName))
|
||||
err := t.HubKubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
ginkgo.By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
gomega.Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.It("Should update addon status to unknown if managed cluster stops to update its lease", func() {
|
||||
@@ -306,18 +268,20 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() error {
|
||||
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
if found.Status.Conditions == nil {
|
||||
return false, nil
|
||||
return fmt.Errorf("condition should not be nil")
|
||||
}
|
||||
cond := meta.FindStatusCondition(found.Status.Conditions, "Available")
|
||||
return cond.Status == metav1.ConditionTrue, nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
if cond.Status != metav1.ConditionTrue {
|
||||
return fmt.Errorf("available status should be true")
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
|
||||
// delete registration agent to stop agent update its status
|
||||
ginkgo.By(fmt.Sprintf("Stoping klusterlet"))
|
||||
@@ -355,18 +319,20 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
_, err = t.ClusterClient.ClusterV1().ManagedClusters().UpdateStatus(context.TODO(), found, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
err = wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
gomega.Eventually(func() error {
|
||||
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
if found.Status.Conditions == nil {
|
||||
return false, nil
|
||||
return fmt.Errorf("condition should not be nil")
|
||||
}
|
||||
cond := meta.FindStatusCondition(found.Status.Conditions, "Available")
|
||||
return cond.Status == metav1.ConditionUnknown, nil
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
if cond.Status != metav1.ConditionUnknown {
|
||||
return fmt.Errorf("available status should be unknown")
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,31 +1,24 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
)
|
||||
|
||||
var _ = Describe("Manage the managed cluster addons", func() {
|
||||
var klusterletName, clusterName, agentNamespace, addOnName string
|
||||
|
||||
var addOnName string
|
||||
BeforeEach(func() {
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
agentNamespace = fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6))
|
||||
addOnName = fmt.Sprintf("e2e-addon-%s", rand.String(6))
|
||||
|
||||
_, err := t.CreateApprovedKlusterlet(
|
||||
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(BeNil())
|
||||
err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Create one managed cluster addon and make sure it is available", func() {
|
||||
|
||||
@@ -44,9 +44,9 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Label("addon-manager"), func() {
|
||||
var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.Label("addon-manager"), func() {
|
||||
addOnName := "hello-template"
|
||||
var klusterletName, clusterName, agentNamespace, addonInstallNamespace string
|
||||
var addonInstallNamespace string
|
||||
|
||||
s := runtime.NewScheme()
|
||||
_ = scheme.AddToScheme(s)
|
||||
@@ -62,17 +62,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Label("ad
|
||||
"addon/signca_secret_rolebinding.yaml",
|
||||
}
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
surfix := rand.String(6)
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", surfix)
|
||||
clusterName = fmt.Sprintf("e2e-managedcluster-%s", surfix)
|
||||
agentNamespace = fmt.Sprintf("open-cluster-management-agent-%s", surfix)
|
||||
addonInstallNamespace = fmt.Sprintf("%s-addon", agentNamespace)
|
||||
|
||||
ginkgo.By("create addon custom sign secret")
|
||||
err := copySignerSecret(context.TODO(), t.HubKubeClient, "open-cluster-management-hub",
|
||||
"signer-secret", templateagent.AddonManagerNamespace(), customSignerSecretName)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
ginkgo.BeforeAll(func() {
|
||||
// enable addon management feature gate
|
||||
gomega.Eventually(func() error {
|
||||
clusterManager, err := t.OperatorClient.OperatorV1().ClusterManagers().Get(context.TODO(), "cluster-manager", metav1.GetOptions{})
|
||||
@@ -90,14 +80,31 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Label("ad
|
||||
_, err = t.OperatorClient.OperatorV1().ClusterManagers().Update(context.TODO(), clusterManager, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
})
|
||||
|
||||
ginkgo.AfterAll(func() {
|
||||
// disable addon management feature gate
|
||||
gomega.Eventually(func() error {
|
||||
clusterManager, err := t.OperatorClient.OperatorV1().ClusterManagers().Get(context.TODO(), "cluster-manager", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterManager.Spec.AddOnManagerConfiguration = &operatorapiv1.AddOnManagerConfiguration{}
|
||||
_, err = t.OperatorClient.OperatorV1().ClusterManagers().Update(context.TODO(), clusterManager, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
addonInstallNamespace = fmt.Sprintf("%s-addon", agentNamespace)
|
||||
ginkgo.By("create addon custom sign secret")
|
||||
err := copySignerSecret(context.TODO(), t.HubKubeClient, "open-cluster-management-hub",
|
||||
"signer-secret", templateagent.AddonManagerNamespace(), customSignerSecretName)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// the addon manager deployment should be running
|
||||
gomega.Eventually(t.CheckHubReady, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
_, err = t.CreateApprovedKlusterlet(
|
||||
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("create addon template resources for cluster %v", clusterName))
|
||||
err = createResourcesFromYamlFiles(context.Background(), t.HubDynamicClient, t.hubRestMapper, s,
|
||||
defaultAddonTemplateReaderManifestsFunc(manifests.AddonManifestFiles, map[string]interface{}{
|
||||
@@ -165,25 +172,6 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Label("ad
|
||||
ginkgo.Fail(fmt.Sprintf("failed to delete custom signer secret %v/%v: %v",
|
||||
templateagent.AddonManagerNamespace(), customSignerSecretName, err))
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
gomega.Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(gomega.BeNil())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Cleaning managed cluster namespace %s", clusterName))
|
||||
err = t.HubKubeClient.CoreV1().Namespaces().Delete(context.TODO(), clusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// disable addon management feature gate
|
||||
gomega.Eventually(func() error {
|
||||
clusterManager, err := t.OperatorClient.OperatorV1().ClusterManagers().Get(context.TODO(), "cluster-manager", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterManager.Spec.AddOnManagerConfiguration = &operatorapiv1.AddOnManagerConfiguration{}
|
||||
_, err = t.OperatorClient.OperatorV1().ClusterManagers().Update(context.TODO(), clusterManager, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
|
||||
})
|
||||
|
||||
ginkgo.It("Template type addon should be functioning", func() {
|
||||
|
||||
@@ -2,11 +2,13 @@ package e2e
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
)
|
||||
@@ -15,6 +17,8 @@ var t *Tester
|
||||
|
||||
var (
|
||||
clusterName string
|
||||
klusterletName string
|
||||
agentNamespace string
|
||||
hubKubeconfig string
|
||||
nilExecutorValidating bool
|
||||
deployKlusterlet bool
|
||||
@@ -69,4 +73,20 @@ var _ = BeforeSuite(func() {
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
if deployKlusterlet {
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
agentNamespace = fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6))
|
||||
_, err := t.CreateApprovedKlusterlet(
|
||||
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
if deployKlusterlet {
|
||||
By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(BeNil())
|
||||
}
|
||||
})
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration/clientcert"
|
||||
"open-cluster-management.io/ocm/pkg/registration/helpers"
|
||||
@@ -61,21 +60,11 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() {
|
||||
|
||||
ginkgo.It("Should register the hub as a managed cluster", func() {
|
||||
var (
|
||||
err error
|
||||
klusterletName string
|
||||
suffix = rand.String(6)
|
||||
nsName = fmt.Sprintf("loopback-spoke-%v", suffix)
|
||||
err error
|
||||
suffix = rand.String(6)
|
||||
nsName = fmt.Sprintf("loopback-spoke-%v", suffix)
|
||||
)
|
||||
ginkgo.By(fmt.Sprintf("Deploying the agent using suffix=%q ns=%q", suffix, nsName))
|
||||
clusterName := fmt.Sprintf("loopback-e2e-%v", suffix)
|
||||
if deployKlusterlet {
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
agentNamespace := fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6))
|
||||
_, err := t.CreateApprovedKlusterlet(
|
||||
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
var (
|
||||
managedCluster *clusterv1.ManagedCluster
|
||||
managedClusters = t.ClusterClient.ClusterV1().ManagedClusters()
|
||||
@@ -352,11 +341,6 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() {
|
||||
return errors.IsNotFound(err)
|
||||
}, 90*time.Second, 1*time.Second).Should(gomega.BeTrue())
|
||||
|
||||
if deployKlusterlet {
|
||||
ginkgo.By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
gomega.Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(gomega.BeNil())
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Cleaning managed cluster addon installation namespace %q", addOnName))
|
||||
err = t.SpokeKubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
clusterapiv1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
clusterapiv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1"
|
||||
|
||||
@@ -30,6 +29,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() {
|
||||
var nameSuffix string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
nameSuffix = rand.String(6)
|
||||
// Enable manifestWorkReplicaSet feature if not enabled
|
||||
gomega.Eventually(func() error {
|
||||
return t.EnableWorkFeature("ManifestWorkReplicaSet")
|
||||
@@ -46,27 +46,6 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() {
|
||||
})
|
||||
|
||||
ginkgo.Context("Creating a ManifestWorkReplicaSet and check created resources", func() {
|
||||
var klusterletName, clusterName string
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
nameSuffix = rand.String(5)
|
||||
|
||||
if deployKlusterlet {
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
agentNamespace := fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6))
|
||||
_, err := t.CreateApprovedKlusterlet(
|
||||
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.JustAfterEach(func() {
|
||||
if deployKlusterlet {
|
||||
ginkgo.By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
gomega.Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(gomega.BeNil())
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("Should create ManifestWorkReplicaSet successfullt", func() {
|
||||
ginkgo.By("create manifestworkreplicaset")
|
||||
ns1 := fmt.Sprintf("ns1-%s", nameSuffix)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
@@ -24,34 +23,21 @@ import (
|
||||
// and well configured as sanity check. Resource leftovers should be cleaned up on both hub and managed cluster.
|
||||
var _ = ginkgo.Describe("ManifestWork admission webhook", ginkgo.Label("validating-webhook", "sanity-check"), func() {
|
||||
var nameSuffix string
|
||||
var workName, klusterletName, clusterName string
|
||||
var workName string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
nameSuffix = rand.String(5)
|
||||
workName = fmt.Sprintf("w1-%s", nameSuffix)
|
||||
|
||||
if deployKlusterlet {
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
agentNamespace := fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6))
|
||||
_, err := t.CreateApprovedKlusterlet(
|
||||
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("delete manifestwork %v/%v", clusterName, workName))
|
||||
gomega.Expect(t.cleanManifestWorks(clusterName, workName)).To(gomega.BeNil())
|
||||
if deployKlusterlet {
|
||||
ginkgo.By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
gomega.Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(gomega.BeNil())
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.Context("Creating a manifestwork", func() {
|
||||
ginkgo.It("Should respond bad request when creating a manifestwork with no manifests", func() {
|
||||
work := newManifestWork(clusterName, workName, []runtime.Object{}...)
|
||||
work := newManifestWork(clusterName, workName)
|
||||
_, err := t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue())
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
@@ -149,31 +148,18 @@ const (
|
||||
// Test cases with lable "sanity-check" could be ran on an existing enviroment with work agent installed
|
||||
// and well configured as sanity check. Resource leftovers should be cleaned up on both hub and managed cluster.
|
||||
var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check"), func() {
|
||||
var workName, klusterletName string
|
||||
var workName string
|
||||
var err error
|
||||
var nameSuffix string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
nameSuffix = rand.String(5)
|
||||
workName = fmt.Sprintf("work-%s", nameSuffix)
|
||||
|
||||
if deployKlusterlet {
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
agentNamespace := fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6))
|
||||
_, err := t.CreateApprovedKlusterlet(
|
||||
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("delete manifestwork %v/%v", clusterName, workName))
|
||||
gomega.Expect(t.cleanManifestWorks(clusterName, workName)).To(gomega.BeNil())
|
||||
if deployKlusterlet {
|
||||
ginkgo.By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
gomega.Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(gomega.BeNil())
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.Context("Work CRUD", func() {
|
||||
|
||||
Reference in New Issue
Block a user