mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-05-14 13:17:39 +00:00
refactor e2e code
This commit is contained in:
@@ -3,7 +3,10 @@ package e2e
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
clusterclient "github.com/open-cluster-management/api/client/cluster/clientset/versioned"
|
||||
operatorclient "github.com/open-cluster-management/api/client/operator/clientset/versioned"
|
||||
workv1client "github.com/open-cluster-management/api/client/work/clientset/versioned"
|
||||
@@ -19,31 +22,113 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
const (
|
||||
eventuallyTimeout = 60 // seconds
|
||||
eventuallyInterval = 1 // seconds
|
||||
clusterManagerNamespace = helpers.ClusterManagerNamespace
|
||||
klusterletDefaultNamespace = helpers.KlusterletDefaultNamespace
|
||||
bootstrapHubKubeConfigSecret = helpers.BootstrapHubKubeConfigSecret
|
||||
hubRegistrationDeployment = "cluster-manager-registration-controller"
|
||||
hubWebhookDeployment = "cluster-manager-registration-webhook"
|
||||
operatorNamespace = "open-cluster-management"
|
||||
klusterletOperator = "klusterlet"
|
||||
)
|
||||
type Tester struct {
|
||||
KubeClient kubernetes.Interface
|
||||
ClusterCfg *rest.Config
|
||||
OperatorClient operatorclient.Interface
|
||||
ClusterClient clusterclient.Interface
|
||||
WorkClient workv1client.Interface
|
||||
bootstrapHubSecret *corev1.Secret
|
||||
EventuallyTimeout time.Duration
|
||||
EventuallyInterval time.Duration
|
||||
clusterManagerNamespace string
|
||||
klusterletDefaultNamespace string
|
||||
hubRegistrationDeployment string
|
||||
hubWebhookDeployment string
|
||||
operatorNamespace string
|
||||
klusterletOperator string
|
||||
}
|
||||
|
||||
var (
|
||||
kubeClient kubernetes.Interface
|
||||
clusterCfg *rest.Config
|
||||
operatorClient operatorclient.Interface
|
||||
clusterClient clusterclient.Interface
|
||||
workClient workv1client.Interface
|
||||
bootstrapHubSecret *corev1.Secret
|
||||
)
|
||||
// kubeconfigPath is the path of kubeconfig file, will be get from env "KUBECONFIG" by default.
|
||||
// bootstrapHubSecret is the bootstrap hub kubeconfig secret, and the format is "namespace/secretName".
|
||||
// Default of bootstrapHubSecret is helpers.KlusterletDefaultNamespace/helpers.BootstrapHubKubeConfigSecret.
|
||||
func NewTester(kubeconfigPath string) (*Tester, error) {
|
||||
var err error
|
||||
var tester = Tester{
|
||||
EventuallyTimeout: 60 * time.Second, // seconds
|
||||
EventuallyInterval: 1 * time.Second, // seconds
|
||||
clusterManagerNamespace: helpers.ClusterManagerNamespace,
|
||||
klusterletDefaultNamespace: helpers.KlusterletDefaultNamespace,
|
||||
hubRegistrationDeployment: "cluster-manager-registration-controller",
|
||||
hubWebhookDeployment: "cluster-manager-registration-webhook",
|
||||
operatorNamespace: "open-cluster-management",
|
||||
klusterletOperator: "klusterlet",
|
||||
}
|
||||
|
||||
if kubeconfigPath == "" {
|
||||
kubeconfigPath = os.Getenv("KUBECONFIG")
|
||||
}
|
||||
if tester.ClusterCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfigPath); err != nil {
|
||||
klog.Errorf("failed to get ClusterCfg from path %v . %v", kubeconfigPath, err)
|
||||
return nil, err
|
||||
}
|
||||
if tester.KubeClient, err = kubernetes.NewForConfig(tester.ClusterCfg); err != nil {
|
||||
klog.Errorf("failed to get KubeClient. %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if tester.OperatorClient, err = operatorclient.NewForConfig(tester.ClusterCfg); err != nil {
|
||||
klog.Errorf("failed to get OperatorClient. %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if tester.ClusterClient, err = clusterclient.NewForConfig(tester.ClusterCfg); err != nil {
|
||||
klog.Errorf("failed to get ClusterClient. %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if tester.WorkClient, err = workv1client.NewForConfig(tester.ClusterCfg); err != nil {
|
||||
klog.Errorf("failed to get WorkClient. %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &tester, nil
|
||||
}
|
||||
|
||||
func (t *Tester) SetEventuallyTimeout(timeout time.Duration) *Tester {
|
||||
t.EventuallyInterval = timeout
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *Tester) SetEventuallyInterval(timeout time.Duration) *Tester {
|
||||
t.EventuallyTimeout = timeout
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *Tester) SetOperatorNamespace(ns string) *Tester {
|
||||
t.operatorNamespace = ns
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *Tester) SetBootstrapHubSecret(bootstrapHubSecret string) error {
|
||||
var err error
|
||||
var bootstrapHubSecretName = helpers.BootstrapHubKubeConfigSecret
|
||||
var bootstrapHubSecretNamespace = helpers.KlusterletDefaultNamespace
|
||||
if bootstrapHubSecret != "" {
|
||||
bootstrapHubSecretNamespace, bootstrapHubSecretName, err = cache.SplitMetaNamespaceKey(bootstrapHubSecret)
|
||||
if err != nil {
|
||||
klog.Errorf("the format of bootstrapHubSecret %v is invalid. %v", bootstrapHubSecret, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if t.bootstrapHubSecret, err = t.KubeClient.CoreV1().Secrets(bootstrapHubSecretNamespace).
|
||||
Get(context.TODO(), bootstrapHubSecretName, metav1.GetOptions{}); err != nil {
|
||||
klog.Errorf("failed to get bootstrapHubSecret %v in ns %v. %v", bootstrapHubSecretName,
|
||||
bootstrapHubSecretNamespace, err)
|
||||
return err
|
||||
}
|
||||
t.bootstrapHubSecret.ObjectMeta.ResourceVersion = ""
|
||||
t.bootstrapHubSecret.ObjectMeta.Namespace = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Tester) CreateKlusterlet(name, clusterName, agentNamespace string) (*operatorapiv1.Klusterlet, error) {
|
||||
if name == "" {
|
||||
return nil, fmt.Errorf("the name should not be null")
|
||||
}
|
||||
|
||||
func createKlusterlet(name, clusterName, agentNamespace string) (realClusterName string) {
|
||||
var klusterlet = &operatorapiv1.Klusterlet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@@ -61,88 +146,85 @@ func createKlusterlet(name, clusterName, agentNamespace string) (realClusterName
|
||||
},
|
||||
}
|
||||
|
||||
Expect(name).NotTo(BeEmpty())
|
||||
if agentNamespace == "" {
|
||||
agentNamespace = t.klusterletDefaultNamespace
|
||||
}
|
||||
|
||||
// create agentNamespace and bootstrap-hub-kubeconfig secret
|
||||
if agentNamespace != "" && agentNamespace != klusterletDefaultNamespace {
|
||||
_, err := kubeClient.CoreV1().Namespaces().Get(context.TODO(), agentNamespace, metav1.GetOptions{})
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
namespace := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: agentNamespace,
|
||||
},
|
||||
}
|
||||
_, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), &namespace, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// create agentNamespace
|
||||
namespace := &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: agentNamespace,
|
||||
},
|
||||
}
|
||||
if _, err := t.KubeClient.CoreV1().Namespaces().Get(context.TODO(), agentNamespace, metav1.GetOptions{}); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.Errorf("failed to get ns %v. %v", agentNamespace, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
secret := bootstrapHubSecret.DeepCopy()
|
||||
secret.ObjectMeta.ResourceVersion = ""
|
||||
secret.SetNamespace(agentNamespace)
|
||||
_, err = kubeClient.CoreV1().Secrets(agentNamespace).Create(context.TODO(), secret, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if _, err := t.KubeClient.CoreV1().Namespaces().Create(context.TODO(),
|
||||
namespace, metav1.CreateOptions{}); err != nil {
|
||||
klog.Errorf("failed to create ns %v. %v", namespace, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// create bootstrap-hub-kubeconfig secret
|
||||
secret := t.bootstrapHubSecret.DeepCopy()
|
||||
if _, err := t.KubeClient.CoreV1().Secrets(agentNamespace).Get(context.TODO(), secret.Name, metav1.GetOptions{}); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.Errorf("failed to get secret %v in ns %v. %v", secret.Name, agentNamespace, err)
|
||||
return nil, err
|
||||
}
|
||||
if _, err = t.KubeClient.CoreV1().Secrets(agentNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
|
||||
klog.Errorf("failed to create secret %v in ns %v. %v", secret, agentNamespace, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// create klusterlet CR
|
||||
realKlusterlet, err := operatorClient.OperatorV1().Klusterlets().Create(context.TODO(), klusterlet, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
realClusterName = realKlusterlet.Spec.ClusterName
|
||||
realKlusterlet, err := t.OperatorClient.OperatorV1().Klusterlets().Create(context.TODO(),
|
||||
klusterlet, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create klusterlet %v . %v", klusterlet.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// the managed cluster should be created
|
||||
Eventually(func() error {
|
||||
clusters, err := clusterClient.ClusterV1().ManagedClusters().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(clusters.Items) == 0 {
|
||||
return fmt.Errorf("there is no managed cluster created")
|
||||
}
|
||||
if len(clusters.Items) > 1 {
|
||||
return fmt.Errorf("there are %v managed clusters created: %v", len(clusters.Items), clusters.Items)
|
||||
}
|
||||
|
||||
if realClusterName != "" && realClusterName != clusters.Items[0].Name {
|
||||
return fmt.Errorf("the managed cluster %v is not the one created %v", clusters.Items[0].Name, realClusterName)
|
||||
}
|
||||
|
||||
if realClusterName == "" {
|
||||
realClusterName = clusters.Items[0].Name
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout*4, eventuallyInterval*4).Should(Succeed())
|
||||
|
||||
// approve csr
|
||||
approveCSR(realClusterName)
|
||||
|
||||
// accept client
|
||||
acceptsClient(realClusterName)
|
||||
return
|
||||
return realKlusterlet, nil
|
||||
}
|
||||
|
||||
func approveCSR(clusterName string) {
|
||||
var csrs *certificatesv1beta1.CertificateSigningRequestList
|
||||
var csrClient = kubeClient.CertificatesV1beta1().CertificateSigningRequests()
|
||||
var err error
|
||||
Eventually(func() error {
|
||||
csrs, err = csrClient.List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("open-cluster-management.io/cluster-name = %v", clusterName),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(csrs.Items) == 0 {
|
||||
return fmt.Errorf("there is no csr related cluster %v", clusterName)
|
||||
}
|
||||
func (t *Tester) GetCreatedManagedCluster(clusterName string) (*clusterv1.ManagedCluster, error) {
|
||||
if clusterName == "" {
|
||||
return nil, fmt.Errorf("the name of managedcluster should not be null")
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
func (t *Tester) ApproveCSR(clusterName string) error {
|
||||
var csrs *certificatesv1beta1.CertificateSigningRequestList
|
||||
var csrClient = t.KubeClient.CertificatesV1beta1().CertificateSigningRequests()
|
||||
var err error
|
||||
|
||||
if csrs, err = csrClient.List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("open-cluster-management.io/cluster-name = %v", clusterName)}); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(csrs.Items) == 0 {
|
||||
return fmt.Errorf("there is no csr related cluster %v", clusterName)
|
||||
}
|
||||
|
||||
for i := range csrs.Items {
|
||||
csr := &csrs.Items[i]
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
csr, err = csrClient.Get(context.TODO(), csr.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if csr, err = csrClient.Get(context.TODO(), csr.Name, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isCSRInTerminalState(&csr.Status) {
|
||||
return nil
|
||||
@@ -153,11 +235,14 @@ func approveCSR(clusterName string) {
|
||||
Reason: "Approved by E2E",
|
||||
Message: "Approved as part of e2e",
|
||||
})
|
||||
_, err := csrClient.UpdateApproval(context.TODO(), csr, metav1.UpdateOptions{})
|
||||
_, err = csrClient.UpdateApproval(context.TODO(), csr, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isCSRInTerminalState(status *certificatesv1beta1.CertificateSigningRequestStatus) bool {
|
||||
@@ -172,26 +257,30 @@ func isCSRInTerminalState(status *certificatesv1beta1.CertificateSigningRequestS
|
||||
return false
|
||||
}
|
||||
|
||||
func acceptsClient(clusterName string) {
|
||||
func (t *Tester) AcceptsClient(clusterName string) error {
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
var err error
|
||||
managedCluster, err := clusterClient.ClusterV1().ManagedClusters().Get(context.TODO(),
|
||||
managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(),
|
||||
clusterName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
managedCluster.Spec.HubAcceptsClient = true
|
||||
managedCluster.Spec.LeaseDurationSeconds = 5
|
||||
managedCluster, err = clusterClient.ClusterV1().ManagedClusters().Update(context.TODO(),
|
||||
managedCluster, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(),
|
||||
managedCluster, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return err
|
||||
}
|
||||
|
||||
func managedClusterReady(clusterName string) error {
|
||||
managedCluster, err := clusterClient.ClusterV1().ManagedClusters().Get(context.TODO(),
|
||||
func (t *Tester) CheckManagedClusterStatus(clusterName string) error {
|
||||
managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(),
|
||||
clusterName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var okCount = 0
|
||||
for _, condition := range managedCluster.Status.Conditions {
|
||||
if (condition.Type == clusterv1.ManagedClusterConditionHubAccepted ||
|
||||
@@ -223,7 +312,7 @@ func newConfigmap(namespace, name string, data map[string]string) *corev1.Config
|
||||
}
|
||||
}
|
||||
|
||||
func createWorkOfConfigMap(name, clusterName, configMapName, configMapNamespace string) {
|
||||
func (t *Tester) CreateWorkOfConfigMap(name, clusterName, configMapName, configMapNamespace string) (*workapiv1.ManifestWork, error) {
|
||||
manifest := workapiv1.Manifest{}
|
||||
manifest.Object = newConfigmap(configMapNamespace, configMapName, map[string]string{"a": "b"})
|
||||
manifestWork := &workapiv1.ManifestWork{
|
||||
@@ -238,133 +327,107 @@ func createWorkOfConfigMap(name, clusterName, configMapName, configMapNamespace
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := workClient.WorkV1().ManifestWorks(clusterName).
|
||||
|
||||
return t.WorkClient.WorkV1().ManifestWorks(clusterName).
|
||||
Create(context.TODO(), manifestWork, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func cleanResources() {
|
||||
func (t *Tester) cleanKlusterletResources(klusterletName string) error {
|
||||
if klusterletName == "" {
|
||||
return fmt.Errorf("the klusterlet name should not be null")
|
||||
}
|
||||
|
||||
clusterName, err := t.GetClusterNameFromKlusterlet(klusterletName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// clean the manifest works
|
||||
manifestWorks, err := workClient.WorkV1().ManifestWorks("").
|
||||
manifestWorks, err := t.WorkClient.WorkV1().ManifestWorks(clusterName).
|
||||
List(context.TODO(), metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, work := range manifestWorks.Items {
|
||||
// ignore if failed to delete
|
||||
_ = workClient.WorkV1().ManifestWorks(work.Namespace).
|
||||
_ = t.WorkClient.WorkV1().ManifestWorks(work.Namespace).
|
||||
Delete(context.TODO(), work.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// clean the klusterlets
|
||||
klusterlets, err := operatorClient.OperatorV1().Klusterlets().List(context.TODO(), metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, klusterlet := range klusterlets.Items {
|
||||
err = operatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterlet.Name, metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = t.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// the klusterlets should be cleaned up
|
||||
Eventually(func() error {
|
||||
klusterlets, err := operatorClient.OperatorV1().Klusterlets().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(klusterlets.Items) != 0 {
|
||||
return fmt.Errorf("the klusterlets are not deleted")
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
// clean the managed clusters
|
||||
clusters, err := clusterClient.ClusterV1().ManagedClusters().List(context.TODO(), metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, cluster := range clusters.Items {
|
||||
err = clusterClient.ClusterV1().ManagedClusters().Delete(context.TODO(), cluster.Name, metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = t.ClusterClient.ClusterV1().ManagedClusters().Delete(context.TODO(), clusterName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// the managed clusters should be cleaned up
|
||||
Eventually(func() error {
|
||||
clusters, err := clusterClient.ClusterV1().ManagedClusters().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(clusters.Items) != 0 {
|
||||
return fmt.Errorf("the manged clusters are not deleted")
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
// the pods in open-cluster-management-agent ns should be cleaned up
|
||||
Eventually(func() error {
|
||||
var err error
|
||||
pods, err := kubeClient.CoreV1().Pods(klusterletDefaultNamespace).
|
||||
List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(pods.Items) != 0 {
|
||||
return fmt.Errorf("the pods in ns %v are not deleted", klusterletDefaultNamespace)
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkHubReady() {
|
||||
func (t *Tester) CheckHubReady() error {
|
||||
// make sure open-cluster-management-hub namespace is created
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.CoreV1().Namespaces().
|
||||
Get(context.TODO(), clusterManagerNamespace, metav1.GetOptions{})
|
||||
if _, err := t.KubeClient.CoreV1().Namespaces().
|
||||
Get(context.TODO(), t.clusterManagerNamespace, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
}
|
||||
|
||||
// make sure hub deployments are created
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.AppsV1().Deployments(clusterManagerNamespace).
|
||||
Get(context.TODO(), hubRegistrationDeployment, metav1.GetOptions{})
|
||||
if _, err := t.KubeClient.AppsV1().Deployments(t.clusterManagerNamespace).
|
||||
Get(context.TODO(), t.hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
}
|
||||
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.AppsV1().Deployments(clusterManagerNamespace).
|
||||
Get(context.TODO(), hubWebhookDeployment, metav1.GetOptions{})
|
||||
if _, err := t.KubeClient.AppsV1().Deployments(t.clusterManagerNamespace).
|
||||
Get(context.TODO(), t.hubWebhookDeployment, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkKlusterletOperatorReady() {
|
||||
func (t *Tester) CheckKlusterletOperatorReady() error {
|
||||
// make sure klusterlet operator deployment is created
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.AppsV1().Deployments(operatorNamespace).
|
||||
Get(context.TODO(), klusterletOperator, metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
// make sure open-cluster-management-agent namespace is created
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.CoreV1().Namespaces().
|
||||
Get(context.TODO(), klusterletDefaultNamespace, metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
// make sure bootstrap-hub-kubeconfig secret is created
|
||||
Eventually(func() error {
|
||||
var err error
|
||||
bootstrapHubSecret, err = kubeClient.CoreV1().Secrets(klusterletDefaultNamespace).
|
||||
Get(context.TODO(), bootstrapHubKubeConfigSecret, metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
// make sure there is no pods in open-cluster-management-agent namespace
|
||||
Eventually(func() error {
|
||||
var err error
|
||||
pods, err := kubeClient.CoreV1().Pods(klusterletDefaultNamespace).
|
||||
List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(pods.Items) != 0 {
|
||||
return fmt.Errorf("the pods in ns %v are not deleted", klusterletDefaultNamespace)
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
_, err := t.KubeClient.AppsV1().Deployments(t.operatorNamespace).
|
||||
Get(context.TODO(), t.klusterletOperator, metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *Tester) GetClusterNameFromKlusterlet(klusterletName string) (string, error) {
|
||||
if klusterletName == "" {
|
||||
return "", fmt.Errorf("the klusterlet name should not be null")
|
||||
}
|
||||
|
||||
klusterlet, err := t.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(),
|
||||
klusterletName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
clusterName := klusterlet.Spec.ClusterName
|
||||
if clusterName != "" {
|
||||
return clusterName, nil
|
||||
}
|
||||
|
||||
klusterletNamespace := klusterlet.Spec.Namespace
|
||||
if klusterletNamespace == "" {
|
||||
klusterletNamespace = helpers.KlusterletDefaultNamespace
|
||||
}
|
||||
|
||||
hubKubeconfigSecret, err := t.KubeClient.CoreV1().Secrets(klusterletNamespace).Get(context.TODO(),
|
||||
"hub-kubeconfig-secret", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
clusterNameByte, ok := hubKubeconfigSecret.Data["cluster-name"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("there is no cluster-name in secret, %+v", hubKubeconfigSecret)
|
||||
}
|
||||
|
||||
return string(clusterNameByte), nil
|
||||
}
|
||||
|
||||
@@ -3,12 +3,6 @@ package e2e
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
clusterclient "github.com/open-cluster-management/api/client/cluster/clientset/versioned"
|
||||
operatorclient "github.com/open-cluster-management/api/client/operator/clientset/versioned"
|
||||
workv1client "github.com/open-cluster-management/api/client/work/clientset/versioned"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -17,35 +11,25 @@ func TestE2E(t *testing.T) {
|
||||
RunSpecs(t, "E2E Suite")
|
||||
}
|
||||
|
||||
var t *Tester
|
||||
|
||||
// This suite is sensitive to the following environment variables:
|
||||
//
|
||||
// - KUBECONFIG is the location of the kubeconfig file to use
|
||||
var _ = BeforeSuite(func() {
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
err := func() error {
|
||||
var err error
|
||||
clusterCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeClient, err = kubernetes.NewForConfig(clusterCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
operatorClient, err = operatorclient.NewForConfig(clusterCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterClient, err = clusterclient.NewForConfig(clusterCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
workClient, err = workv1client.NewForConfig(clusterCfg)
|
||||
return err
|
||||
}()
|
||||
var err error
|
||||
|
||||
t, err = NewTester("")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
checkHubReady()
|
||||
checkKlusterletOperatorReady()
|
||||
cleanResources()
|
||||
Eventually(func() error {
|
||||
return t.CheckHubReady()
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
|
||||
Eventually(func() error {
|
||||
return t.CheckKlusterletOperatorReady()
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
|
||||
err = t.SetBootstrapHubSecret("")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -8,44 +8,78 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
)
|
||||
|
||||
var _ = Describe("Test Cases: Create klusterlet", func() {
|
||||
var _ = Describe("Create klusterlet CR", func() {
|
||||
var klusterletName = ""
|
||||
BeforeEach(func() {
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
checkHubReady()
|
||||
checkKlusterletOperatorReady()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By("clean resources after each case")
|
||||
cleanResources()
|
||||
By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
t.cleanKlusterletResources(klusterletName)
|
||||
})
|
||||
|
||||
It("Sub Case: Created klusterlet with managed cluster name", func() {
|
||||
It("Create klusterlet CR with managed cluster name", func() {
|
||||
var clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
var agentNamespace = fmt.Sprintf("e2e-agent-%s", rand.String(6))
|
||||
|
||||
By("create klusterlet with managed cluster name")
|
||||
realClusterName := createKlusterlet(klusterletName, clusterName, agentNamespace)
|
||||
Expect(realClusterName).Should(Equal(clusterName))
|
||||
By(fmt.Sprintf("create klusterlet %v with managed cluster name %v", klusterletName, clusterName))
|
||||
_, err := t.CreateKlusterlet(klusterletName, clusterName, agentNamespace)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
By("waiting for the managed cluster to be created and ready")
|
||||
By(fmt.Sprintf("waiting for the managed cluster %v to be created", clusterName))
|
||||
Eventually(func() error {
|
||||
return managedClusterReady(realClusterName)
|
||||
}, eventuallyTimeout*5, eventuallyInterval*5).Should(Succeed())
|
||||
_, err := t.GetCreatedManagedCluster(clusterName)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("approve the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.ApproveCSR(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("accept the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.AcceptsClient(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.CheckManagedClusterStatus(clusterName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Sub Case: Created klusterlet without managed cluster name", func() {
|
||||
It("Created klusterlet without managed cluster name", func() {
|
||||
var clusterName = ""
|
||||
var agentNamespace = ""
|
||||
var err error
|
||||
By(fmt.Sprintf("create klusterlet %v without managed cluster name", klusterletName))
|
||||
_, err = t.CreateKlusterlet(klusterletName, clusterName, agentNamespace)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
By("create klusterlet without managed cluster name")
|
||||
realClusterName := createKlusterlet(klusterletName, clusterName, agentNamespace)
|
||||
Expect(realClusterName).ShouldNot(BeEmpty())
|
||||
|
||||
By("waiting for the managed cluster to be created and ready")
|
||||
By("waiting for the managed cluster to be created")
|
||||
Eventually(func() error {
|
||||
return managedClusterReady(realClusterName)
|
||||
}, eventuallyTimeout*5, eventuallyInterval*5).Should(Succeed())
|
||||
clusterName, err = t.GetClusterNameFromKlusterlet(klusterletName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = t.GetCreatedManagedCluster(clusterName)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("approve the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.ApproveCSR(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("accept the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.AcceptsClient(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.CheckManagedClusterStatus(clusterName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
)
|
||||
|
||||
var _ = Describe("Test Cases: Create klusterlet and then create a configmap by manifestwork", func() {
|
||||
var _ = Describe("Create klusterlet and then create a configmap by manifestwork", func() {
|
||||
var klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
var clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
|
||||
var agentNamespace = fmt.Sprintf("e2e-agent-%s", rand.String(6))
|
||||
@@ -17,44 +17,58 @@ var _ = Describe("Test Cases: Create klusterlet and then create a configmap by m
|
||||
var configMapNamespace = "default"
|
||||
var configMapName = fmt.Sprintf("e2e-configmap-%s", rand.String(6))
|
||||
|
||||
BeforeEach(func() {
|
||||
checkHubReady()
|
||||
checkKlusterletOperatorReady()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By("clean resources after each case")
|
||||
cleanResources()
|
||||
By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
|
||||
t.cleanKlusterletResources(klusterletName)
|
||||
})
|
||||
|
||||
It("Sub Case: Create configmap using manifestwork and then delete klusterlet", func() {
|
||||
By("create klusterlet")
|
||||
realClusterName := createKlusterlet(klusterletName, clusterName, agentNamespace)
|
||||
Expect(realClusterName).Should(Equal(clusterName))
|
||||
It("Create configmap using manifestwork and then delete klusterlet", func() {
|
||||
var err error
|
||||
By(fmt.Sprintf("create klusterlet %v with managed cluster name %v", klusterletName, clusterName))
|
||||
_, err = t.CreateKlusterlet(klusterletName, clusterName, agentNamespace)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
By("waiting for managed cluster to be ready")
|
||||
By(fmt.Sprintf("waiting for the managed cluster %v to be created", clusterName))
|
||||
Eventually(func() error {
|
||||
return managedClusterReady(realClusterName)
|
||||
}, eventuallyTimeout*5, eventuallyInterval*5).Should(Succeed())
|
||||
_, err = t.GetCreatedManagedCluster(clusterName)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
|
||||
By("create configmap using manifestwork")
|
||||
createWorkOfConfigMap(workName, realClusterName, configMapName, configMapNamespace)
|
||||
|
||||
By("waiting for configmap to be created")
|
||||
By(fmt.Sprintf("approve the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.CoreV1().ConfigMaps(configMapNamespace).
|
||||
return t.ApproveCSR(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("accept the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.AcceptsClient(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.CheckManagedClusterStatus(clusterName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("create configmap %v/%v using manifestwork %v/%v", configMapNamespace,
|
||||
configMapName, clusterName, workName))
|
||||
_, err = t.CreateWorkOfConfigMap(workName, clusterName, configMapName, configMapNamespace)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("waiting for configmap %v/%v to be created", configMapNamespace, configMapName))
|
||||
Eventually(func() error {
|
||||
_, err := t.KubeClient.CoreV1().ConfigMaps(configMapNamespace).
|
||||
Get(context.TODO(), configMapName, metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout*5, eventuallyInterval*5).Should(Succeed())
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
|
||||
// Manifest work and ns should not be deleted after delete klusterlet
|
||||
By("delete klusterlet")
|
||||
err := operatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{})
|
||||
By(fmt.Sprintf("delete klusterlet %v", klusterletName))
|
||||
err = t.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("waiting for pods in agent namespace to be deleted")
|
||||
By(fmt.Sprintf("waiting for pods in agent namespace %v to be deleted", agentNamespace))
|
||||
Eventually(func() error {
|
||||
pods, err := kubeClient.CoreV1().Pods(agentNamespace).
|
||||
pods, err := t.KubeClient.CoreV1().Pods(agentNamespace).
|
||||
List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -63,20 +77,20 @@ var _ = Describe("Test Cases: Create klusterlet and then create a configmap by m
|
||||
return fmt.Errorf("the pods are not deleted in ns %v", agentNamespace)
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout*5, eventuallyInterval*5).Should(Succeed())
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
|
||||
By("check that managed cluster namespace should not be deleted")
|
||||
By(fmt.Sprintf("check that managed cluster namespace %v should not be deleted", clusterName))
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.CoreV1().Namespaces().
|
||||
_, err := t.KubeClient.CoreV1().Namespaces().
|
||||
Get(context.TODO(), clusterName, metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
|
||||
By("check that manifestwork should not be deleted")
|
||||
By(fmt.Sprintf("check that manifestwork %v/%v should not be deleted", clusterName, workName))
|
||||
Eventually(func() error {
|
||||
_, err := workClient.WorkV1().ManifestWorks(clusterName).
|
||||
_, err := t.WorkClient.WorkV1().ManifestWorks(clusterName).
|
||||
Get(context.TODO(), workName, metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user