mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 10:00:11 +00:00
Refactor: timeout and interval usage in e2e (#563)
Signed-off-by: xuezhaojun <zxue@redhat.com>
This commit is contained in:
@@ -7,13 +7,14 @@ if [ "$#" -lt 1 ]; then
|
||||
fi
|
||||
|
||||
# Read command line arguments
|
||||
# Example usage: IMAGE_TAG=e2e sh hack/e2e_flaky_error_test.sh 10 no
|
||||
RUN_TIMES=$1
|
||||
BUILD_IMAGES=${2:-yes} # Default to 'yes' if the third argument is not provided
|
||||
KLUSTERLET_DEPLOY_MODE=${3:-Default} # Use Default if the second argument is not provided
|
||||
|
||||
# Conditionally build images for testing
|
||||
if [ "$BUILD_IMAGES" = "yes" ]; then
|
||||
make images build
|
||||
IMAGE_TAG=$IMAGE_TAG make images
|
||||
fi
|
||||
|
||||
# Create the directory to store test results with timestamp
|
||||
@@ -42,6 +43,8 @@ do
|
||||
test_output=$(IMAGE_TAG=$IMAGE_TAG KLUSTERLET_DEPLOY_MODE=$KLUSTERLET_DEPLOY_MODE KUBECONFIG=.kubeconfig make test-e2e 2>&1)
|
||||
test_exit_code=$?
|
||||
|
||||
echo "$test_output"
|
||||
|
||||
# Determine test result and update the respective list
|
||||
if [ $test_exit_code -eq 0 ]; then
|
||||
echo "Test $i passed"
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
|
||||
const availableLabelValue = "available"
|
||||
|
||||
var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func() {
|
||||
ginkgo.Context("Checking addon lease on managed cluster to update addon status", func() {
|
||||
var addOnName string
|
||||
ginkgo.BeforeEach(func() {
|
||||
@@ -69,7 +69,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
return fmt.Errorf("condition should be available")
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
// check if the cluster has a label for addon with expected value
|
||||
gomega.Eventually(func() bool {
|
||||
@@ -82,7 +82,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
}
|
||||
key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName)
|
||||
return cluster.Labels[key] == availableLabelValue
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("Should update addon status to unavailable if addon stops to update its lease", func() {
|
||||
@@ -107,7 +107,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
return fmt.Errorf("condition should be available")
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
// check if the cluster has a label for addon with expected value
|
||||
gomega.Eventually(func() bool {
|
||||
@@ -120,7 +120,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
}
|
||||
key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName)
|
||||
return cluster.Labels[key] == availableLabelValue
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating lease %q with a past time", addOnName))
|
||||
lease, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Get(context.TODO(), addOnName, metav1.GetOptions{})
|
||||
@@ -138,7 +138,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
return fmt.Errorf("condition should be available")
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
// check if the cluster has a label for addon with expected value
|
||||
gomega.Eventually(func() bool {
|
||||
@@ -151,7 +151,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
}
|
||||
key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName)
|
||||
return cluster.Labels[key] == "unhealthy"
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("Should update addon status to unknown if there is no lease for this addon", func() {
|
||||
@@ -176,7 +176,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
return fmt.Errorf("condition should be available")
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
// check if the cluster has a label for addon with expected value
|
||||
gomega.Eventually(func() bool {
|
||||
@@ -189,7 +189,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
}
|
||||
key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName)
|
||||
return cluster.Labels[key] == availableLabelValue
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting lease %q", addOnName))
|
||||
err = t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
|
||||
@@ -204,7 +204,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
return fmt.Errorf("condition should be available")
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
// check if the cluster has a label for addon with expected value
|
||||
gomega.Eventually(func() bool {
|
||||
@@ -217,7 +217,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
}
|
||||
key := fmt.Sprintf("feature.open-cluster-management.io/addon-%s", addOnName)
|
||||
return cluster.Labels[key] == "unreachable"
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -283,7 +283,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
return fmt.Errorf("available status should be true")
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
// delete registration agent to stop agent update its status
|
||||
ginkgo.By("Stoping klusterlet")
|
||||
@@ -301,7 +301,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("klusterlet is still deleting")
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
// for speeding up test, update managed cluster status to unknown manually
|
||||
ginkgo.By(fmt.Sprintf("Updating managed cluster %s status to unknown", clusterName))
|
||||
@@ -334,7 +334,7 @@ var _ = ginkgo.Describe("Addon Health Check", func() {
|
||||
return fmt.Errorf("available status should be unknown")
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
)
|
||||
|
||||
var _ = Describe("Manage the managed cluster addons", func() {
|
||||
var _ = Describe("Manage the managed cluster addons", Label("addon"), func() {
|
||||
var addOnName string
|
||||
BeforeEach(func() {
|
||||
addOnName = fmt.Sprintf("e2e-addon-%s", rand.String(6))
|
||||
@@ -33,7 +33,7 @@ var _ = Describe("Manage the managed cluster addons", func() {
|
||||
By(fmt.Sprintf("wait the addon %v available condition to be true", addOnName))
|
||||
Eventually(func() error {
|
||||
return t.CheckManagedClusterAddOnStatus(clusterName, addOnName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Create one managed cluster addon and make sure it is available in Hosted mode", func() {
|
||||
@@ -48,6 +48,6 @@ var _ = Describe("Manage the managed cluster addons", func() {
|
||||
By(fmt.Sprintf("wait the addon %v available condition to be true", addOnName))
|
||||
Eventually(func() error {
|
||||
return t.CheckManagedClusterAddOnStatus(clusterName, addOnName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -71,7 +71,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// the addon manager deployment should be running
|
||||
gomega.Eventually(t.CheckHubReady, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.Succeed())
|
||||
gomega.Eventually(t.CheckHubReady).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("create addon template resources for cluster %v", clusterName))
|
||||
err = createResourcesFromYamlFiles(context.Background(), t.HubDynamicClient, t.hubRestMapper, s,
|
||||
@@ -96,7 +96,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
ginkgo.By(fmt.Sprintf("wait the addon %v/%v available condition to be true", clusterName, addOnName))
|
||||
gomega.Eventually(func() error {
|
||||
return t.CheckManagedClusterAddOnStatus(clusterName, addOnName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
@@ -118,7 +118,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
}
|
||||
|
||||
return fmt.Errorf("the managedClusterAddon should be deleted")
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("delete addon template resources for cluster %v", clusterName))
|
||||
err = deleteResourcesFromYamlFiles(context.Background(), t.HubDynamicClient, t.hubRestMapper, s,
|
||||
@@ -162,7 +162,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.It("Template type addon should be functioning", func() {
|
||||
@@ -171,14 +171,14 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
_, err := t.HubKubeClient.CoreV1().Secrets(addonInstallNamespace).Get(context.TODO(),
|
||||
templateagent.HubKubeconfigSecretName(addOnName), metav1.GetOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("Check custom signer secret is created")
|
||||
gomega.Eventually(func() error {
|
||||
_, err := t.HubKubeClient.CoreV1().Secrets(addonInstallNamespace).Get(context.TODO(),
|
||||
templateagent.CustomSignedSecretName(addOnName, customSignerName), metav1.GetOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("Make sure addon is functioning")
|
||||
configmap := &corev1.ConfigMap{
|
||||
@@ -207,7 +207,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
return fmt.Errorf("expected configmap is not correct, %v", copyiedConfig.Data)
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Make sure manifestwork config is configured")
|
||||
manifestWork, err := t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(),
|
||||
@@ -258,7 +258,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
}
|
||||
|
||||
return fmt.Errorf("the configmap should be deleted")
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
_, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(
|
||||
@@ -271,7 +271,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
}
|
||||
|
||||
return fmt.Errorf("the managedClusterAddon should be deleted")
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("The pre-delete job should be deleted ")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -285,7 +285,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
}
|
||||
|
||||
return fmt.Errorf("the job should be deleted")
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.It("Template type addon should be configured by addon deployment config for image override"+
|
||||
@@ -321,12 +321,12 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
_, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(
|
||||
context.Background(), newCluster, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Prepare a AddOnDeploymentConfig for addon image override config")
|
||||
gomega.Eventually(func() error {
|
||||
return prepareImageOverrideAddOnDeploymentConfig(clusterName, addonInstallNamespace)
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Add the configs to ManagedClusterAddOn")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -354,7 +354,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Make sure addon is configured")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -374,7 +374,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Restore the managed cluster annotation")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -389,7 +389,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
_, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(
|
||||
context.Background(), newCluster, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// restore the image override config, because the override image is not available
|
||||
// but it is needed by the pre-delete job
|
||||
@@ -408,7 +408,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Make sure addon config is restored")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -428,14 +428,14 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.It("Template type addon should be configured by addon deployment config for node placement", func() {
|
||||
ginkgo.By("Prepare a AddOnDeploymentConfig for addon image override config")
|
||||
gomega.Eventually(func() error {
|
||||
return prepareNodePlacementAddOnDeploymentConfig(clusterName, addonInstallNamespace)
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Add the configs to ManagedClusterAddOn")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -463,7 +463,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Make sure addon is configured")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -482,7 +482,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
})
|
||||
|
||||
@@ -497,7 +497,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(func() error {
|
||||
return prepareInstallNamespace(clusterName, overrideNamespace.Name)
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Add the configs to ManagedClusterAddOn")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -525,14 +525,14 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Make sure addon is configured")
|
||||
gomega.Eventually(func() error {
|
||||
_, err := t.SpokeKubeClient.AppsV1().Deployments(overrideNamespace.Name).Get(
|
||||
context.Background(), "hello-template-agent", metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.It("Template type addon's image should be overrode by cluster annotation", func() {
|
||||
@@ -561,7 +561,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
_, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(
|
||||
context.Background(), newCluster, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Make sure addon is configured")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -581,7 +581,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// restore the image override config, because the override image is not available
|
||||
// but it is needed by the pre-delete job
|
||||
@@ -598,7 +598,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
_, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(
|
||||
context.Background(), newCluster, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Make sure addon config is restored")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -618,7 +618,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
@@ -67,8 +67,6 @@ type Tester struct {
|
||||
HubDynamicClient dynamic.Interface
|
||||
SpokeDynamicClient dynamic.Interface
|
||||
bootstrapHubSecret *corev1.Secret
|
||||
EventuallyTimeout time.Duration
|
||||
EventuallyInterval time.Duration
|
||||
clusterManagerName string
|
||||
clusterManagerNamespace string
|
||||
operatorNamespace string
|
||||
@@ -81,12 +79,10 @@ type Tester struct {
|
||||
// kubeconfigPath is the path of kubeconfig file, will be get from env "KUBECONFIG" by default.
|
||||
// bootstrapHubSecret is the bootstrap hub kubeconfig secret, and the format is "namespace/secretName".
|
||||
// Default of bootstrapHubSecret is helpers.KlusterletDefaultNamespace/helpers.BootstrapHubKubeConfig.
|
||||
func NewTester(hubKubeConfigPath, spokeKubeConfigPath, registrationImage, workImage, singletonImage string, timeout time.Duration) *Tester {
|
||||
func NewTester(hubKubeConfigPath, spokeKubeConfigPath, registrationImage, workImage, singletonImage string) *Tester {
|
||||
var tester = Tester{
|
||||
hubKubeConfigPath: hubKubeConfigPath,
|
||||
spokeKubeConfigPath: spokeKubeConfigPath,
|
||||
EventuallyTimeout: timeout, // seconds
|
||||
EventuallyInterval: 1 * time.Second, // seconds
|
||||
clusterManagerName: "cluster-manager", // same name as deploy/cluster-manager/config/samples
|
||||
clusterManagerNamespace: helpers.ClusterManagerDefaultNamespace,
|
||||
operatorNamespace: "open-cluster-management",
|
||||
@@ -174,16 +170,6 @@ func (t *Tester) Init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Tester) SetEventuallyTimeout(timeout time.Duration) *Tester {
|
||||
t.EventuallyInterval = timeout
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *Tester) SetEventuallyInterval(timeout time.Duration) *Tester {
|
||||
t.EventuallyTimeout = timeout
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *Tester) SetOperatorNamespace(ns string) *Tester {
|
||||
t.operatorNamespace = ns
|
||||
return t
|
||||
@@ -314,19 +300,19 @@ func (t *Tester) CreateApprovedKlusterlet(name, clusterName, klusterletNamespace
|
||||
gomega.Eventually(func() error {
|
||||
_, err = t.GetCreatedManagedCluster(clusterName)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
return t.ApproveCSR(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
return t.AcceptsClient(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
return t.CheckManagedClusterStatus(clusterName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
return klusterlet, nil
|
||||
}
|
||||
@@ -520,7 +506,7 @@ func (t *Tester) cleanManifestWorks(clusterName, workName string) error {
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), workName, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -549,7 +535,7 @@ func (t *Tester) cleanKlusterletResources(klusterletName, clusterName string) er
|
||||
klog.Infof("get klusterlet %s error: %v", klusterletName, err)
|
||||
}
|
||||
return false
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
// clean the managed clusters
|
||||
err = t.ClusterClient.ClusterV1().ManagedClusters().Delete(context.TODO(), clusterName, metav1.DeleteOptions{})
|
||||
@@ -570,7 +556,7 @@ func (t *Tester) cleanKlusterletResources(klusterletName, clusterName string) er
|
||||
klog.Infof("get managed cluster %s error: %v", klusterletName, err)
|
||||
}
|
||||
return false
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -609,7 +595,7 @@ func (t *Tester) CheckHubReady() error {
|
||||
return fmt.Errorf("deployment %s should have %d but got %d ready replicas", hubRegistrationWebhookDeployment, replicas, readyReplicas)
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
workWebhookDeployment, err := t.HubKubeClient.AppsV1().Deployments(t.clusterManagerNamespace).
|
||||
@@ -623,7 +609,7 @@ func (t *Tester) CheckHubReady() error {
|
||||
return fmt.Errorf("deployment %s should have %d but got %d ready replicas", hubWorkWebhookDeployment, replicas, readyReplicas)
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
|
||||
var hubWorkControllerEnabled, addonManagerControllerEnabled bool
|
||||
if cm.Spec.WorkConfiguration != nil {
|
||||
@@ -649,7 +635,7 @@ func (t *Tester) CheckHubReady() error {
|
||||
return fmt.Errorf("deployment %s should have %d but got %d ready replicas", hubWorkControllerDeployment, replicas, readyReplicas)
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
}
|
||||
|
||||
if _, err := t.HubKubeClient.AppsV1().Deployments(t.clusterManagerNamespace).
|
||||
@@ -670,7 +656,7 @@ func (t *Tester) CheckHubReady() error {
|
||||
return fmt.Errorf("deployment %s should have %d but got %d ready replicas", addonManagerDeployment, replicas, readyReplicas)
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ var (
|
||||
nilExecutorValidating bool
|
||||
deployKlusterlet bool
|
||||
managedKubeconfig string
|
||||
eventuallyTimeout time.Duration
|
||||
registrationImage string
|
||||
workImage string
|
||||
singletonImage string
|
||||
@@ -36,7 +35,6 @@ func init() {
|
||||
flag.BoolVar(&nilExecutorValidating, "nil-executor-validating", false, "Whether validate the nil executor or not (default false)")
|
||||
flag.BoolVar(&deployKlusterlet, "deploy-klusterlet", false, "Whether deploy the klusterlet on the managed cluster or not (default false)")
|
||||
flag.StringVar(&managedKubeconfig, "managed-kubeconfig", "", "The kubeconfig of the managed cluster")
|
||||
flag.DurationVar(&eventuallyTimeout, "eventually-timeout", 60*time.Second, "The timeout of Gomega's Eventually (default 60 seconds)")
|
||||
flag.StringVar(®istrationImage, "registration-image", "", "The image of the registration")
|
||||
flag.StringVar(&workImage, "work-image", "", "The image of the work")
|
||||
flag.StringVar(&singletonImage, "singleton-image", "", "The image of the klusterlet agent")
|
||||
@@ -44,7 +42,7 @@ func init() {
|
||||
}
|
||||
|
||||
func TestE2E(tt *testing.T) {
|
||||
t = NewTester(hubKubeconfig, managedKubeconfig, registrationImage, workImage, singletonImage, eventuallyTimeout)
|
||||
t = NewTester(hubKubeconfig, managedKubeconfig, registrationImage, workImage, singletonImage)
|
||||
|
||||
OutputFail := func(message string, callerSkip ...int) {
|
||||
t.OutputDebugLogs()
|
||||
@@ -61,25 +59,30 @@ func TestE2E(tt *testing.T) {
|
||||
var _ = BeforeSuite(func() {
|
||||
var err error
|
||||
|
||||
// In most OCM cases, we expect user should see the result in 90 seconds.
|
||||
// For cases that need more than 90 seconds, please set the timeout in the test case EXPLICITLY.
|
||||
SetDefaultEventuallyTimeout(90 * time.Second)
|
||||
SetDefaultEventuallyPollingInterval(5 * time.Second)
|
||||
|
||||
Expect(t.Init()).ToNot(HaveOccurred())
|
||||
|
||||
Eventually(t.CheckHubReady, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
Eventually(t.CheckHubReady).Should(Succeed())
|
||||
|
||||
Eventually(t.CheckKlusterletOperatorReady, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
Eventually(t.CheckKlusterletOperatorReady).Should(Succeed())
|
||||
|
||||
err = t.SetBootstrapHubSecret("")
|
||||
|
||||
if nilExecutorValidating {
|
||||
Eventually(func() error {
|
||||
return t.EnableWorkFeature("NilExecutorValidating")
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
}
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Eventually(func() error {
|
||||
return t.EnableWorkFeature("ManifestWorkReplicaSet")
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
Eventually(t.CheckHubReady, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
Eventually(t.CheckHubReady).Should(Succeed())
|
||||
|
||||
if deployKlusterlet {
|
||||
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
var _ = Describe("Delete hosted klusterlet CR", func() {
|
||||
var _ = Describe("Delete hosted klusterlet CR", Label("klusterlet-hosted"), func() {
|
||||
var klusterletName string
|
||||
var clusterName string
|
||||
var klusterletNamespace string
|
||||
@@ -39,7 +39,7 @@ var _ = Describe("Delete hosted klusterlet CR", func() {
|
||||
Eventually(func() error {
|
||||
err := t.checkKlusterletStatus(klusterletName, "ReadyToApply", "KlusterletPrepareFailed", metav1.ConditionFalse)
|
||||
return err
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("delete the klusterlet %s", klusterletName))
|
||||
err = t.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(),
|
||||
@@ -54,7 +54,7 @@ var _ = Describe("Delete hosted klusterlet CR", func() {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("klusterlet still exists")
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("check the agent namespace %s on the management cluster was deleted", klusterletName))
|
||||
Eventually(func() error {
|
||||
@@ -64,7 +64,7 @@ var _ = Describe("Delete hosted klusterlet CR", func() {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("klusterlet namespace still exists")
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Delete klusterlet CR in Hosted mode when the managed cluster was destroyed", func() {
|
||||
@@ -76,36 +76,36 @@ var _ = Describe("Delete hosted klusterlet CR", func() {
|
||||
Eventually(func() error {
|
||||
_, err := t.GetCreatedManagedCluster(clusterName)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}, 5*time.Minute, 5*time.Second).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("check klusterlet %s status", klusterletName))
|
||||
Eventually(func() error {
|
||||
err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded",
|
||||
"BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}, 5*time.Minute, 5*time.Second).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("approve the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.ApproveCSR(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("accept the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.AcceptsClient(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.CheckManagedClusterStatus(clusterName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}, 5*time.Minute, 5*time.Second).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("check klusterlet %s status", klusterletName))
|
||||
Eventually(func() error {
|
||||
err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded",
|
||||
"HubConnectionFunctional", metav1.ConditionFalse)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
// change the kubeconfig host of external managed kubeconfig secret to a wrong value
|
||||
// to simulate the managed cluster was destroyed
|
||||
@@ -136,6 +136,7 @@ var _ = Describe("Delete hosted klusterlet CR", func() {
|
||||
// in the future, if the eviction can be configured, we can set a short timeout period and
|
||||
// remove the wait and update parts
|
||||
evictionTimestampAnno := "operator.open-cluster-management.io/managed-resources-eviction-timestamp"
|
||||
|
||||
By("Wait for the eviction timestamp annotation", func() {
|
||||
Eventually(func() error {
|
||||
k, err := t.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(),
|
||||
@@ -148,7 +149,7 @@ var _ = Describe("Delete hosted klusterlet CR", func() {
|
||||
return fmt.Errorf("expected annotation %s does not exist", evictionTimestampAnno)
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}, 5*time.Minute, 5*time.Second).Should(Succeed())
|
||||
})
|
||||
|
||||
time.Sleep(3 * time.Second) // after the eviction timestamp exists, wait 3 seconds for cache syncing
|
||||
@@ -166,7 +167,7 @@ var _ = Describe("Delete hosted klusterlet CR", func() {
|
||||
_, err = t.OperatorClient.OperatorV1().Klusterlets().Update(context.TODO(),
|
||||
k, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}, 5*time.Minute, 5*time.Second).Should(Succeed())
|
||||
})
|
||||
|
||||
By("Check manged cluster and klusterlet can be deleted", func() {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
var _ = Describe("Create klusterlet CR", func() {
|
||||
var _ = Describe("Create klusterlet CR", Label("klusterlet"), func() {
|
||||
var klusterletName string
|
||||
var clusterName string
|
||||
var klusterletNamespace string
|
||||
@@ -42,34 +42,34 @@ var _ = Describe("Create klusterlet CR", func() {
|
||||
Eventually(func() error {
|
||||
_, err := t.GetCreatedManagedCluster(clusterName)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("check klusterlet %s status", klusterletName))
|
||||
Eventually(func() error {
|
||||
err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("approve the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.ApproveCSR(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("accept the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.AcceptsClient(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.CheckManagedClusterStatus(clusterName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("check klusterlet %s status", klusterletName))
|
||||
Eventually(func() error {
|
||||
err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "HubConnectionFunctional", metav1.ConditionFalse)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Create klusterlet CR with managed cluster name", func() {
|
||||
@@ -81,34 +81,34 @@ var _ = Describe("Create klusterlet CR", func() {
|
||||
Eventually(func() error {
|
||||
_, err := t.GetCreatedManagedCluster(clusterName)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("check klusterlet %s status", klusterletName))
|
||||
Eventually(func() error {
|
||||
err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("approve the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.ApproveCSR(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("accept the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.AcceptsClient(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.CheckManagedClusterStatus(clusterName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("check klusterlet %s status", klusterletName))
|
||||
Eventually(func() error {
|
||||
err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "HubConnectionFunctional", metav1.ConditionFalse)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Created klusterlet without managed cluster name", func() {
|
||||
@@ -123,34 +123,34 @@ var _ = Describe("Create klusterlet CR", func() {
|
||||
Eventually(func() error {
|
||||
clusterName, err = t.GetRandomClusterName()
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("check klusterlet %s status", klusterletName))
|
||||
Eventually(func() error {
|
||||
err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("approve the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.ApproveCSR(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("accept the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.AcceptsClient(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.CheckManagedClusterStatus(clusterName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("check klusterlet %s status", klusterletName))
|
||||
Eventually(func() error {
|
||||
err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "HubConnectionFunctional", metav1.ConditionFalse)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Update klusterlet CR namespace", func() {
|
||||
@@ -162,22 +162,22 @@ var _ = Describe("Create klusterlet CR", func() {
|
||||
Eventually(func() error {
|
||||
_, err := t.GetCreatedManagedCluster(clusterName)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("approve the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.ApproveCSR(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("accept the created managed cluster %v", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.AcceptsClient(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.CheckManagedClusterStatus(clusterName)
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By("update klusterlet namespace")
|
||||
newNamespace := "open-cluster-management-agent-another"
|
||||
@@ -189,7 +189,7 @@ var _ = Describe("Create klusterlet CR", func() {
|
||||
klusterlet.Spec.Namespace = newNamespace
|
||||
_, err = t.OperatorClient.OperatorV1().Klusterlets().Update(context.TODO(), klusterlet, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By("copy bootstrap secret to the new namespace")
|
||||
Eventually(func() error {
|
||||
@@ -199,7 +199,7 @@ var _ = Describe("Create klusterlet CR", func() {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By("old namespace should be removed")
|
||||
Eventually(func() error {
|
||||
@@ -208,23 +208,23 @@ var _ = Describe("Create klusterlet CR", func() {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("namespace still exists")
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By("addon namespace should be kept")
|
||||
Eventually(func() error {
|
||||
_, err := t.SpokeKubeClient.CoreV1().Namespaces().Get(context.TODO(), helpers.DefaultAddonNamespace, metav1.GetOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("approve the managed cluster %v since it is registered in the new namespace", clusterName))
|
||||
Eventually(func() error {
|
||||
return t.ApproveCSR(clusterName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
|
||||
By("klusterlet status should be ok")
|
||||
Eventually(func() error {
|
||||
err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "HubConnectionFunctional", metav1.ConditionFalse)
|
||||
return err
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(Succeed())
|
||||
}).Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -317,7 +317,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, 90*time.Second, 1*time.Second).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
ginkgo.By("Check addon status")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -331,7 +331,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() {
|
||||
}
|
||||
|
||||
return nil
|
||||
}, 90*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("Delete the addon and check if secret is gone")
|
||||
err = t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
|
||||
@@ -340,7 +340,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() {
|
||||
gomega.Eventually(func() bool {
|
||||
_, err = t.SpokeKubeClient.CoreV1().Secrets(addOnName).Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, 90*time.Second, 1*time.Second).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Cleaning managed cluster addon installation namespace %q", addOnName))
|
||||
err = t.SpokeKubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
|
||||
|
||||
@@ -3,7 +3,6 @@ package e2e
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
ginkgo "github.com/onsi/ginkgo/v2"
|
||||
gomega "github.com/onsi/gomega"
|
||||
@@ -40,7 +39,7 @@ var _ = ginkgo.Describe("ManagedClusterSetBinding", func() {
|
||||
return err
|
||||
}
|
||||
return t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Delete(context.TODO(), clusterSetName, metav1.DeleteOptions{})
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
@@ -68,7 +67,7 @@ var _ = ginkgo.Describe("ManagedClusterSetBinding", func() {
|
||||
return fmt.Errorf("binding %s/%s condition should be false", namespace, clusterSetName)
|
||||
}
|
||||
return nil
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
managedClusterSet := &clusterv1beta2.ManagedClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -90,7 +89,7 @@ var _ = ginkgo.Describe("ManagedClusterSetBinding", func() {
|
||||
return fmt.Errorf("binding %s/%s condition should be true", namespace, clusterSetName)
|
||||
}
|
||||
return nil
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
err = t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Delete(context.TODO(), clusterSetName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
@@ -106,7 +105,7 @@ var _ = ginkgo.Describe("ManagedClusterSetBinding", func() {
|
||||
return fmt.Errorf("binding %s/%s condition should be false", namespace, clusterSetName)
|
||||
}
|
||||
return nil
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -25,7 +25,7 @@ const (
|
||||
mwrSetLabel = "work.open-cluster-management.io/manifestworkreplicaset"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() {
|
||||
var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestworkreplicaset"), func() {
|
||||
var err error
|
||||
var nameSuffix string
|
||||
|
||||
@@ -103,7 +103,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() {
|
||||
|
||||
_, err = t.SpokeKubeClient.CoreV1().Namespaces().Get(context.Background(), ns1, metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("check if manifestworkreplicaset status")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -128,7 +128,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() {
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// TODO we should also update manifestwork replicaset and test
|
||||
|
||||
@@ -304,7 +304,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() {
|
||||
return fmt.Errorf("total number of clusters is not correct, expect %d, got %d", numOfClusters, mwReplicaSet.Status.Summary.Total)
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("Check manifestWorks are created")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -319,7 +319,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() {
|
||||
return fmt.Errorf("manifestworks are not created, expect %d, got %d", numOfClusters, len(manifestWorkList.Items))
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("Delete manifestWorkReplicaSet")
|
||||
err = t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Delete(context.TODO(), mwReplicaSetName, metav1.DeleteOptions{})
|
||||
@@ -338,7 +338,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() {
|
||||
return fmt.Errorf("manifestworks are not deleted, expect %d, got %d", 0, len(manifestWorkList.Items))
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*5, t.EventuallyInterval*5).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -27,7 +27,7 @@ const (
|
||||
// Test cases with lable "sanity-check" could be ran as sanity check on an existing environment with
|
||||
// placement controller installed and well configured . Resource leftovers should be cleaned up on
|
||||
// the hub cluster.
|
||||
var _ = ginkgo.Describe("Placement", ginkgo.Label("sanity-check"), func() {
|
||||
var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), func() {
|
||||
var namespace string
|
||||
var placementName string
|
||||
var clusterSet1Name string
|
||||
@@ -101,7 +101,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("sanity-check"), func() {
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
}
|
||||
|
||||
assertNumberOfDecisions := func(placementName string, desiredNOD int) {
|
||||
@@ -122,7 +122,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("sanity-check"), func() {
|
||||
actualNOD += len(pd.Status.Decisions)
|
||||
}
|
||||
return actualNOD == desiredNOD
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
}
|
||||
|
||||
assertPlacementStatus := func(placementName string, numOfSelectedClusters int, satisfied bool) {
|
||||
@@ -155,7 +155,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("sanity-check"), func() {
|
||||
}
|
||||
|
||||
return placement.Status.NumberOfSelectedClusters == int32(numOfSelectedClusters)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
}
|
||||
|
||||
assertCreatingClusterSet := func(clusterSetName string, matchLabel map[string]string) {
|
||||
@@ -269,7 +269,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("sanity-check"), func() {
|
||||
placement.Spec.NumberOfClusters = &noc
|
||||
_, err = t.ClusterClient.ClusterV1beta1().Placements(namespace).Update(context.Background(), placement, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
assertNumberOfDecisions(placementName, 5)
|
||||
assertPlacementStatus(placementName, 5, false)
|
||||
@@ -296,7 +296,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("sanity-check"), func() {
|
||||
}
|
||||
|
||||
return len(placementDecisions.Items) == 0
|
||||
}, eventuallyTimeout*5, eventuallyInterval*5).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("Should delete placementdecision successfully", func() {
|
||||
@@ -323,7 +323,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("sanity-check"), func() {
|
||||
}
|
||||
_, err = t.ClusterClient.ClusterV1beta1().Placements(namespace).Update(context.Background(), placement, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Create empty placement decision")
|
||||
assertNumberOfDecisions(placementName, 0)
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"open-cluster-management.io/ocm/pkg/registration/hub/taint"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("Taints update check", func() {
|
||||
var _ = ginkgo.Describe("Taints update check", ginkgo.Label("registration-taint"), func() {
|
||||
ginkgo.Context("Check the taint to update according to the condition status", func() {
|
||||
var (
|
||||
err error
|
||||
@@ -57,7 +57,7 @@ var _ = ginkgo.Describe("Taints update check", func() {
|
||||
return fmt.Errorf("the %+v is not equal to UnreachableTaint", managedCluster.Spec.Taints[0])
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Change the LeaseDurationSeconds to 60")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -69,7 +69,7 @@ var _ = ginkgo.Describe("Taints update check", func() {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Add a ManagedClusterConditionAvailable condition")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -85,7 +85,7 @@ var _ = ginkgo.Describe("Taints update check", func() {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("The taints len should be 0")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -96,7 +96,7 @@ var _ = ginkgo.Describe("Taints update check", func() {
|
||||
return fmt.Errorf("managedCluster taints len is not 0")
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Set the ManagedClusterConditionAvailable status to false")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -112,7 +112,7 @@ var _ = ginkgo.Describe("Taints update check", func() {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Should only be one UnavailableTaint")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -126,7 +126,7 @@ var _ = ginkgo.Describe("Taints update check", func() {
|
||||
return fmt.Errorf("the %+v is not equal to UnavailableTaint\n", managedCluster.Spec.Taints[0])
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Set the ManagedClusterConditionAvailable status to unknown")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -142,7 +142,7 @@ var _ = ginkgo.Describe("Taints update check", func() {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
|
||||
ginkgo.By("Should only be one UnreachableTaint")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -156,7 +156,7 @@ var _ = ginkgo.Describe("Taints update check", func() {
|
||||
return fmt.Errorf("the %+v is not equal to UnreachableTaint", managedCluster.Spec.Taints[0])
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
|
||||
}).Should(gomega.BeNil())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -140,7 +140,7 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
}
|
||||
_, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("check if timeAdded of the taint is reset")
|
||||
managedCluster, err = t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
|
||||
@@ -687,7 +687,7 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
return err
|
||||
}
|
||||
return t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Delete(context.TODO(), clusterSetName, metav1.DeleteOptions{})
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
@@ -803,7 +803,7 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
binding.Labels = map[string]string{"owner": "user"}
|
||||
_, err = unauthorizedClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -912,7 +912,7 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
binding.Labels = map[string]string{"owner": "user"}
|
||||
_, err = unauthorizedClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -30,8 +30,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
eventuallyInterval = 1 // seconds
|
||||
|
||||
guestBookCRDJson = `{
|
||||
"apiVersion": "apiextensions.k8s.io/v1",
|
||||
"kind": "CustomResourceDefinition",
|
||||
@@ -229,7 +227,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
|
||||
_, err = t.SpokeKubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// check status conditions in manifestwork status
|
||||
gomega.Eventually(func() error {
|
||||
@@ -237,7 +235,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
expectedManifestStatuses := []metav1.ConditionStatus{
|
||||
metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}
|
||||
return assertManifestWorkAppliedSuccessfully(clusterName, workName, expectedManifestStatuses)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// get the corresponding AppliedManifestWork
|
||||
var appliedManifestWork *workapiv1.AppliedManifestWork
|
||||
@@ -255,7 +253,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
}
|
||||
|
||||
return fmt.Errorf("not found the applied manifest work with suffix %s", workName)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// check applied resources in manifestwork status
|
||||
expectedAppliedResources := []workapiv1.AppliedManifestResourceMeta{
|
||||
@@ -295,7 +293,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
work.Spec.Workload.Manifests = newWork.Spec.Workload.Manifests
|
||||
work, err = t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.Succeed())
|
||||
}).Should(gomega.Succeed())
|
||||
|
||||
// check if cm1 is removed from applied resources list in status
|
||||
gomega.Eventually(func() error {
|
||||
@@ -315,7 +313,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
expectedManifestStatuses := []metav1.ConditionStatus{
|
||||
metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}
|
||||
return assertManifestWorkAppliedSuccessfully(clusterName, workName, expectedManifestStatuses)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// check if cm1 is deleted
|
||||
_, err = t.SpokeKubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
@@ -333,7 +331,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
}
|
||||
|
||||
return nil
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("delete manifestwork")
|
||||
err = t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), workName, metav1.DeleteOptions{})
|
||||
@@ -354,7 +352,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), workName, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
// Once manifest work is deleted, its corresponding appliedManifestWorks should be deleted as well
|
||||
_, err = t.SpokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWork.Name, metav1.GetOptions{})
|
||||
@@ -406,7 +404,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
return fmt.Errorf("condition %s is not true", workapiv1.WorkAvailable)
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Ensure pod is created
|
||||
gomega.Eventually(func() error {
|
||||
@@ -420,7 +418,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
}
|
||||
|
||||
return nil
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("delete manifestwork")
|
||||
err = t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), workName, metav1.DeleteOptions{})
|
||||
@@ -438,7 +436,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
}
|
||||
|
||||
return nil
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -483,7 +481,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
expectedManifestStatuses := []metav1.ConditionStatus{
|
||||
metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}
|
||||
return assertManifestWorkAppliedSuccessfully(clusterName, workName, expectedManifestStatuses)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Upgrade crd/cr and check if cr resource is recreated.
|
||||
// Get UID of cr resource at first.
|
||||
@@ -527,7 +525,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
return fmt.Errorf("expect UID to be the same, expected: %q, actual %q", currentUID, guestbook.GetUID())
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -614,8 +612,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
return fmt.Errorf("statusFeedbackSynced condition should be True")
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout*2, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
}).WithTimeout(2 * time.Minute).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -667,7 +664,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
// check manifest status conditions
|
||||
expectedManifestStatuses := []metav1.ConditionStatus{metav1.ConditionTrue}
|
||||
return assertManifestWorkAppliedSuccessfully(clusterName, work.Name, expectedManifestStatuses)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
cmUID := types.UID("test")
|
||||
@@ -683,7 +680,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
return fmt.Errorf("expected 2 owners, but got %d", len(cm.OwnerReferences))
|
||||
}
|
||||
return nil
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("delete manifestwork mw1")
|
||||
err = t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(ctx, workName, metav1.DeleteOptions{})
|
||||
@@ -693,7 +690,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Get(ctx, workName, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
cm, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
@@ -707,7 +704,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Get(ctx, work2Name, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
_, err = t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
@@ -718,7 +715,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
gomega.Eventually(func() error { // check manifest status conditions
|
||||
expectedManifestStatuses := []metav1.ConditionStatus{metav1.ConditionTrue}
|
||||
return assertManifestWorkAppliedSuccessfully(clusterName, workName, expectedManifestStatuses)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("check if resources are applied for manifests")
|
||||
_, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{})
|
||||
@@ -743,7 +740,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
|
||||
_, err = t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Update(ctx, cm, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
cm, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
@@ -757,13 +754,13 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check")
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Get(ctx, workName, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, t.EventuallyTimeout, t.EventuallyInterval).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
ginkgo.By("check the resource cm was deleted successfully")
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, t.EventuallyInterval, t.EventuallyTimeout).Should(gomega.BeTrue())
|
||||
}).Should(gomega.BeTrue())
|
||||
|
||||
err = t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Delete(ctx, cmOwner.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
Reference in New Issue
Block a user