mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
PlacementRollout to reflect Ready status (#1281)
Some checks failed
Scorecard supply-chain security / Scorecard analysis (push) Failing after 20s
Post / images (amd64, placement) (push) Failing after 45s
Post / images (amd64, registration) (push) Failing after 42s
Post / images (amd64, registration-operator) (push) Failing after 40s
Post / images (amd64, work) (push) Failing after 41s
Post / images (arm64, addon-manager) (push) Failing after 41s
Post / images (arm64, placement) (push) Failing after 40s
Post / images (arm64, registration) (push) Failing after 39s
Post / images (arm64, registration-operator) (push) Failing after 39s
Post / images (arm64, work) (push) Failing after 41s
Post / images (amd64, addon-manager) (push) Failing after 7m30s
Post / image manifest (addon-manager) (push) Has been skipped
Post / image manifest (placement) (push) Has been skipped
Post / image manifest (registration) (push) Has been skipped
Post / image manifest (registration-operator) (push) Has been skipped
Post / image manifest (work) (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped
Post / coverage (push) Failing after 9m44s
Some checks failed
Scorecard supply-chain security / Scorecard analysis (push) Failing after 20s
Post / images (amd64, placement) (push) Failing after 45s
Post / images (amd64, registration) (push) Failing after 42s
Post / images (amd64, registration-operator) (push) Failing after 40s
Post / images (amd64, work) (push) Failing after 41s
Post / images (arm64, addon-manager) (push) Failing after 41s
Post / images (arm64, placement) (push) Failing after 40s
Post / images (arm64, registration) (push) Failing after 39s
Post / images (arm64, registration-operator) (push) Failing after 39s
Post / images (arm64, work) (push) Failing after 41s
Post / images (amd64, addon-manager) (push) Failing after 7m30s
Post / image manifest (addon-manager) (push) Has been skipped
Post / image manifest (placement) (push) Has been skipped
Post / image manifest (registration) (push) Has been skipped
Post / image manifest (registration-operator) (push) Has been skipped
Post / image manifest (work) (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped
Post / coverage (push) Failing after 9m44s
Update with success count Remove status references Add unit tests Fix unit tests Update unit tests Test fix Fix tests for lastTransitionTime Fix integration tests Signed-off-by: annelau <annelau@salesforce.com> Co-authored-by: annelau <annelau@salesforce.com>
This commit is contained in:
@@ -35,7 +35,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
|
||||
var errs []error
|
||||
var plcsSummary []workapiv1alpha1.PlacementSummary
|
||||
minRequeue := maxRequeueTime
|
||||
count, total := 0, 0
|
||||
count, total, succeededCount := 0, 0, 0
|
||||
|
||||
// Clean up ManifestWorks from placements no longer in the spec
|
||||
currentPlacementNames := sets.New[string]()
|
||||
@@ -65,6 +65,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
|
||||
for _, placementRef := range mwrSet.Spec.PlacementRefs {
|
||||
var existingRolloutClsStatus []clustersdkv1alpha1.ClusterRolloutStatus
|
||||
existingClusterNames := sets.New[string]()
|
||||
succeededClusterNames := sets.New[string]()
|
||||
placement, err := d.placementLister.Placements(mwrSet.Namespace).Get(placementRef.Name)
|
||||
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -100,6 +101,11 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
|
||||
continue
|
||||
}
|
||||
existingRolloutClsStatus = append(existingRolloutClsStatus, rolloutClusterStatus)
|
||||
|
||||
// Only count clusters that are done progressing (Succeeded status)
|
||||
if rolloutClusterStatus.Status == clustersdkv1alpha1.Succeeded {
|
||||
succeededClusterNames.Insert(mw.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
placeTracker := helper.GetPlacementTracker(d.placeDecisionLister, placement, existingClusterNames)
|
||||
@@ -171,6 +177,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
|
||||
plcsSummary = append(plcsSummary, plcSummary)
|
||||
|
||||
count += len(existingClusterNames)
|
||||
succeededCount += len(succeededClusterNames)
|
||||
}
|
||||
// Set the placements summary
|
||||
mwrSet.Status.PlacementsSummary = plcsSummary
|
||||
@@ -191,7 +198,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
|
||||
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonAsExpected, ""))
|
||||
}
|
||||
|
||||
if total == count {
|
||||
if total == succeededCount {
|
||||
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementRollOut(workapiv1alpha1.ReasonComplete, ""))
|
||||
} else {
|
||||
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementRollOut(workapiv1alpha1.ReasonProgressing, ""))
|
||||
@@ -330,7 +337,7 @@ func GetPlacementDecisionVerified(reason string, message string) metav1.Conditio
|
||||
return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified, reason, message, metav1.ConditionFalse)
|
||||
}
|
||||
|
||||
// GetPlacementRollout return only True status if there are clusters selected
|
||||
// GetPlacementRollout return only True status if all the clusters selected by the placement have succeeded
|
||||
func GetPlacementRollOut(reason string, message string) metav1.Condition {
|
||||
if reason == workapiv1alpha1.ReasonComplete {
|
||||
return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, reason, message, metav1.ConditionTrue)
|
||||
|
||||
@@ -325,7 +325,36 @@ func TestDeployWithRolloutStrategyReconcileAsExpected(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, mwrSet.Status.Summary, mwrSetSummary)
|
||||
|
||||
// Check the RollOut conditions
|
||||
// Check the RollOut conditions - should be Complete when all clusters have succeeded
|
||||
// At this point, all ManifestWorks are created but none have Succeeded status yet
|
||||
rollOutCondition = apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut)
|
||||
assert.NotNil(t, rollOutCondition)
|
||||
assert.Equal(t, rollOutCondition.Status, metav1.ConditionFalse)
|
||||
assert.Equal(t, rollOutCondition.Reason, workapiv1alpha1.ReasonProgressing)
|
||||
|
||||
// Now mark all ManifestWorks as Succeeded to achieve ReasonComplete
|
||||
for i := 0; i < len(clusters); i++ {
|
||||
mw := helpertest.CreateTestManifestWork(mwrSet.Name, mwrSet.Namespace, "place-test", clusters[i])
|
||||
apimeta.SetStatusCondition(&mw.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkApplied,
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: mw.Generation,
|
||||
Reason: "Applied",
|
||||
})
|
||||
apimeta.SetStatusCondition(&mw.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
ObservedGeneration: mw.Generation,
|
||||
Reason: "Completed",
|
||||
})
|
||||
err = workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Update(mw)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
mwrSet, _, err = pmwDeployController.reconcile(context.TODO(), mwrSet)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Now RollOut should be Complete since all clusters have Succeeded status
|
||||
rollOutCondition = apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut)
|
||||
assert.NotNil(t, rollOutCondition)
|
||||
assert.Equal(t, rollOutCondition.Status, metav1.ConditionTrue)
|
||||
@@ -532,9 +561,37 @@ func TestDeployMWRSetSpecChangesReconcile(t *testing.T) {
|
||||
assert.Equal(t, placeCondition.Status, metav1.ConditionTrue)
|
||||
assert.Equal(t, placeCondition.Reason, workapiv1alpha1.ReasonAsExpected)
|
||||
|
||||
// Check the RollOut conditions
|
||||
// Check the RollOut conditions - need to mark ManifestWorks as Succeeded first
|
||||
rollOutCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut)
|
||||
assert.NotNil(t, rollOutCondition)
|
||||
assert.Equal(t, rollOutCondition.Status, metav1.ConditionFalse)
|
||||
assert.Equal(t, rollOutCondition.Reason, workapiv1alpha1.ReasonProgressing)
|
||||
|
||||
// Mark all ManifestWorks as Succeeded
|
||||
for i := 0; i < int(placement.Status.NumberOfSelectedClusters); i++ {
|
||||
mw := helpertest.CreateTestManifestWork(mwrSet.Name, mwrSet.Namespace, placement.Name, clusters[i])
|
||||
apimeta.SetStatusCondition(&mw.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkApplied,
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: mw.Generation,
|
||||
Reason: "Applied",
|
||||
})
|
||||
apimeta.SetStatusCondition(&mw.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
ObservedGeneration: mw.Generation,
|
||||
Reason: "Completed",
|
||||
})
|
||||
err = workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Update(mw)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
mwrSet, _, err = pmwDeployController.reconcile(context.TODO(), mwrSet)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Now RollOut should be Complete
|
||||
rollOutCondition = apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut)
|
||||
assert.NotNil(t, rollOutCondition)
|
||||
assert.Equal(t, rollOutCondition.Status, metav1.ConditionTrue)
|
||||
assert.Equal(t, rollOutCondition.Reason, workapiv1alpha1.ReasonComplete)
|
||||
|
||||
@@ -807,6 +864,112 @@ func TestDeployReconcileWithMultiplePlacementChanges(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, currentMW3.Labels[workapiv1alpha1.ManifestWorkReplicaSetPlacementNameLabelKey], "place-test3")
|
||||
}
|
||||
func TestDeployRolloutProgressingWhenNotAllSucceeded(t *testing.T) {
|
||||
// Test case where all ManifestWorks are created (count = total) but not all have succeeded
|
||||
// This should result in PlacementRolledOut = False with Reason = Progressing
|
||||
clusters := []string{"cls1", "cls2", "cls3"}
|
||||
placement, placementDecision := helpertest.CreateTestPlacement("place-test", "default", clusters...)
|
||||
fClusterClient := fakeclusterclient.NewSimpleClientset(placement, placementDecision)
|
||||
clusterInformerFactory := clusterinformers.NewSharedInformerFactoryWithOptions(fClusterClient, 1*time.Second)
|
||||
|
||||
err := clusterInformerFactory.Cluster().V1beta1().Placements().Informer().GetStore().Add(placement)
|
||||
assert.Nil(t, err)
|
||||
err = clusterInformerFactory.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(placementDecision)
|
||||
assert.Nil(t, err)
|
||||
|
||||
placementLister := clusterInformerFactory.Cluster().V1beta1().Placements().Lister()
|
||||
placementDecisionLister := clusterInformerFactory.Cluster().V1beta1().PlacementDecisions().Lister()
|
||||
|
||||
mwrSet := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test")
|
||||
|
||||
// Create ManifestWorks for all clusters
|
||||
mw1 := helpertest.CreateTestManifestWork(mwrSet.Name, mwrSet.Namespace, placement.Name, clusters[0])
|
||||
mw2 := helpertest.CreateTestManifestWork(mwrSet.Name, mwrSet.Namespace, placement.Name, clusters[1])
|
||||
mw3 := helpertest.CreateTestManifestWork(mwrSet.Name, mwrSet.Namespace, placement.Name, clusters[2])
|
||||
|
||||
// Set cluster 1 as Succeeded
|
||||
apimeta.SetStatusCondition(&mw1.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkApplied,
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: mw1.Generation,
|
||||
Reason: "Applied",
|
||||
})
|
||||
apimeta.SetStatusCondition(&mw1.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
ObservedGeneration: mw1.Generation,
|
||||
Reason: "Completed",
|
||||
})
|
||||
|
||||
// Set cluster 2 as Progressing
|
||||
apimeta.SetStatusCondition(&mw2.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkApplied,
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: mw2.Generation,
|
||||
Reason: "Applied",
|
||||
})
|
||||
apimeta.SetStatusCondition(&mw2.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkProgressing,
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: mw2.Generation,
|
||||
Reason: "Applying",
|
||||
})
|
||||
|
||||
// Set cluster 3 as Failed (Progressing=True + Degraded=True)
|
||||
apimeta.SetStatusCondition(&mw3.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkApplied,
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: mw3.Generation,
|
||||
Reason: "Applied",
|
||||
})
|
||||
apimeta.SetStatusCondition(&mw3.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkProgressing,
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: mw3.Generation,
|
||||
Reason: "Applying",
|
||||
})
|
||||
apimeta.SetStatusCondition(&mw3.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkDegraded,
|
||||
Status: metav1.ConditionTrue,
|
||||
ObservedGeneration: mw3.Generation,
|
||||
Reason: "ApplyFailed",
|
||||
})
|
||||
|
||||
fWorkClient := fakeworkclient.NewSimpleClientset(mwrSet, mw1, mw2, mw3)
|
||||
workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second)
|
||||
|
||||
err = workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw1)
|
||||
assert.Nil(t, err)
|
||||
err = workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw2)
|
||||
assert.Nil(t, err)
|
||||
err = workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw3)
|
||||
assert.Nil(t, err)
|
||||
err = workInformerFactory.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetStore().Add(mwrSet)
|
||||
assert.Nil(t, err)
|
||||
|
||||
mwLister := workInformerFactory.Work().V1().ManifestWorks().Lister()
|
||||
|
||||
pmwDeployController := deployReconciler{
|
||||
workApplier: workapplier.NewWorkApplierWithTypedClient(fWorkClient, mwLister),
|
||||
manifestWorkLister: mwLister,
|
||||
placeDecisionLister: placementDecisionLister,
|
||||
placementLister: placementLister,
|
||||
}
|
||||
|
||||
mwrSet, _, err = pmwDeployController.reconcile(context.TODO(), mwrSet)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Verify that Summary.Total equals the number of clusters
|
||||
assert.Equal(t, len(clusters), mwrSet.Status.Summary.Total, "Summary.Total should equal the number of clusters")
|
||||
|
||||
// Verify PlacementRolledOut is False because not all clusters have succeeded
|
||||
// Only 1 out of 3 clusters has succeeded
|
||||
rollOutCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut)
|
||||
assert.NotNil(t, rollOutCondition)
|
||||
assert.Equal(t, metav1.ConditionFalse, rollOutCondition.Status, "PlacementRolledOut should be False when not all clusters have succeeded")
|
||||
assert.Equal(t, workapiv1alpha1.ReasonProgressing, rollOutCondition.Reason, "Reason should be Progressing when not all clusters have succeeded")
|
||||
}
|
||||
|
||||
func TestClusterRolloutStatusFunc(t *testing.T) {
|
||||
mwrSet := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test")
|
||||
now := metav1.Now()
|
||||
|
||||
@@ -89,7 +89,7 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
return nil, clusterNames, err
|
||||
}
|
||||
|
||||
_, err = hubClusterClient.ClusterV1beta1().Placements(placement.Namespace).Create(context.TODO(), placement, metav1.CreateOptions{})
|
||||
createdPlacement, err := hubClusterClient.ClusterV1beta1().Placements(placement.Namespace).Create(context.TODO(), placement, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, clusterNames, err
|
||||
}
|
||||
@@ -113,6 +113,13 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
}
|
||||
|
||||
_, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus(context.TODO(), decision, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return nil, clusterNames, err
|
||||
}
|
||||
|
||||
// Update placement status with NumberOfSelectedClusters
|
||||
createdPlacement.Status.NumberOfSelectedClusters = int32(numberOfClusters)
|
||||
_, err = hubClusterClient.ClusterV1beta1().Placements(placement.Namespace).UpdateStatus(context.TODO(), createdPlacement, metav1.UpdateOptions{})
|
||||
return manifestWorkReplicaSet, clusterNames, err
|
||||
}
|
||||
})
|
||||
@@ -169,10 +176,17 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("PlacementRolledOut should be False when works are not yet applied")
|
||||
gomega.Eventually(
|
||||
assertCondition(
|
||||
workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, metav1.ConditionFalse, manifestWorkReplicaSet),
|
||||
eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
for _, work := range works.Items {
|
||||
workCopy := work.DeepCopy()
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "ApplyTest"})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "ApplyTest", ObservedGeneration: workCopy.Generation})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "ApplyTest"})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkProgressing, Status: metav1.ConditionFalse, Reason: "AppliedManifestWorkComplete", ObservedGeneration: workCopy.Generation})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(workCopy.Namespace).UpdateStatus(context.TODO(), workCopy, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
@@ -183,6 +197,12 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
Available: 1,
|
||||
}, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("PlacementRolledOut should be True when all works succeed")
|
||||
gomega.Eventually(
|
||||
assertCondition(
|
||||
workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, metav1.ConditionTrue, manifestWorkReplicaSet),
|
||||
eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
works, err = hubWorkClient.WorkV1().ManifestWorks(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("work.open-cluster-management.io/manifestworkreplicaset=%s", key),
|
||||
})
|
||||
@@ -190,16 +210,26 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
|
||||
for _, work := range works.Items {
|
||||
workCopy := work.DeepCopy()
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionFalse, Reason: "ApplyTest"})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionFalse, Reason: "ResourceNotAvailable"})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkProgressing, Status: metav1.ConditionTrue, Reason: "Applying", ObservedGeneration: workCopy.Generation})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkDegraded, Status: metav1.ConditionTrue, Reason: "ResourceDegraded", ObservedGeneration: workCopy.Generation})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(workCopy.Namespace).UpdateStatus(context.TODO(), workCopy, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
gomega.Eventually(assertSummary(workapiv1alpha1.ManifestWorkReplicaSetSummary{
|
||||
Total: 1,
|
||||
Applied: 1,
|
||||
Available: 0,
|
||||
Total: 1,
|
||||
Applied: 1,
|
||||
Available: 0,
|
||||
Degraded: 1,
|
||||
Progressing: 1,
|
||||
}, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("PlacementRolledOut should become False when a work is progressing")
|
||||
gomega.Eventually(
|
||||
assertCondition(
|
||||
workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, metav1.ConditionFalse, manifestWorkReplicaSet),
|
||||
eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
})
|
||||
|
||||
ginkgo.It("should delete manifestworks from old placement when placementRef changes", func() {
|
||||
@@ -210,7 +240,7 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
Namespace: namespaceName,
|
||||
},
|
||||
}
|
||||
_, err := hubClusterClient.ClusterV1beta1().Placements(placement1.Namespace).Create(context.TODO(), placement1, metav1.CreateOptions{})
|
||||
createdPlacement1, err := hubClusterClient.ClusterV1beta1().Placements(placement1.Namespace).Create(context.TODO(), placement1, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
placementDecision1 := &clusterv1beta1.PlacementDecision{
|
||||
@@ -241,6 +271,11 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
_, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision1.Namespace).UpdateStatus(context.TODO(), decision1, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Update placement1 status with NumberOfSelectedClusters
|
||||
createdPlacement1.Status.NumberOfSelectedClusters = 2
|
||||
_, err = hubClusterClient.ClusterV1beta1().Placements(placement1.Namespace).UpdateStatus(context.TODO(), createdPlacement1, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Create manifestWorkReplicaSet with first placement
|
||||
manifests := []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap("defaut", cm1, map[string]string{"a": "b"}, nil)),
|
||||
@@ -290,7 +325,7 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
Namespace: namespaceName,
|
||||
},
|
||||
}
|
||||
_, err = hubClusterClient.ClusterV1beta1().Placements(placement2.Namespace).Create(context.TODO(), placement2, metav1.CreateOptions{})
|
||||
createdPlacement2, err := hubClusterClient.ClusterV1beta1().Placements(placement2.Namespace).Create(context.TODO(), placement2, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
placementDecision2 := &clusterv1beta1.PlacementDecision{
|
||||
@@ -321,6 +356,11 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
_, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision2.Namespace).UpdateStatus(context.TODO(), decision2, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Update placement2 status with NumberOfSelectedClusters
|
||||
createdPlacement2.Status.NumberOfSelectedClusters = 2
|
||||
_, err = hubClusterClient.ClusterV1beta1().Placements(placement2.Namespace).UpdateStatus(context.TODO(), createdPlacement2, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Update manifestWorkReplicaSet to use second placement
|
||||
placementRef2 := workapiv1alpha1.LocalPlacementReference{
|
||||
Name: placement2.Name,
|
||||
@@ -378,6 +418,12 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(assertWorksByReplicaSet(clusterNames, manifestWorkReplicaSet, 2), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("PlacementRolledOut should be False during progressive rollout")
|
||||
gomega.Eventually(
|
||||
assertCondition(
|
||||
workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, metav1.ConditionFalse, manifestWorkReplicaSet),
|
||||
eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("set work status to true")
|
||||
key := fmt.Sprintf("%s.%s", manifestWorkReplicaSet.Namespace, manifestWorkReplicaSet.Name)
|
||||
works, err := hubWorkClient.WorkV1().ManifestWorks(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{
|
||||
@@ -392,18 +438,28 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
Reason: "AppliedManifestWorkComplete",
|
||||
ObservedGeneration: workCopy.Generation,
|
||||
})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkAvailable,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "AppliedManifestWorkComplete",
|
||||
})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "AppliedManifestWorkComplete",
|
||||
ObservedGeneration: workCopy.Generation,
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(workCopy.Namespace).UpdateStatus(context.TODO(), workCopy, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
gomega.Eventually(assertWorksByReplicaSet(clusterNames, manifestWorkReplicaSet, 4), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("PlacementRolledOut should still be False with partial rollout")
|
||||
gomega.Eventually(
|
||||
assertCondition(
|
||||
workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, metav1.ConditionFalse, manifestWorkReplicaSet),
|
||||
eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
works, err = hubWorkClient.WorkV1().ManifestWorks(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("work.open-cluster-management.io/manifestworkreplicaset=%s", key),
|
||||
})
|
||||
@@ -416,17 +472,55 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
Reason: "AppliedManifestWorkComplete",
|
||||
ObservedGeneration: workCopy.Generation,
|
||||
})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkAvailable,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "AppliedManifestWorkComplete",
|
||||
})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "AppliedManifestWorkComplete",
|
||||
ObservedGeneration: workCopy.Generation,
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(workCopy.Namespace).UpdateStatus(context.TODO(), workCopy, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
gomega.Eventually(assertWorksByReplicaSet(clusterNames, manifestWorkReplicaSet, 6), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("set all 6 works status to complete")
|
||||
works, err = hubWorkClient.WorkV1().ManifestWorks(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("work.open-cluster-management.io/manifestworkreplicaset=%s", key),
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
for _, work := range works.Items {
|
||||
workCopy := work.DeepCopy()
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkApplied,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "AppliedManifestWorkComplete",
|
||||
ObservedGeneration: workCopy.Generation,
|
||||
})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkAvailable,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "AppliedManifestWorkComplete",
|
||||
})
|
||||
meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{
|
||||
Type: workapiv1.WorkProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "AppliedManifestWorkComplete",
|
||||
ObservedGeneration: workCopy.Generation,
|
||||
})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(workCopy.Namespace).UpdateStatus(context.TODO(), workCopy, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
ginkgo.By("PlacementRolledOut should be True when all works complete")
|
||||
gomega.Eventually(
|
||||
assertCondition(
|
||||
workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, metav1.ConditionTrue, manifestWorkReplicaSet),
|
||||
eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
})
|
||||
|
||||
ginkgo.It("rolling exceeds max failure", func() {
|
||||
@@ -523,6 +617,13 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
Reason: "Applied",
|
||||
ObservedGeneration: workCopy.Generation,
|
||||
})
|
||||
meta.SetStatusCondition(
|
||||
&workCopy.Status.Conditions,
|
||||
metav1.Condition{
|
||||
Type: workapiv1.WorkAvailable,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Applied",
|
||||
})
|
||||
meta.SetStatusCondition(
|
||||
&workCopy.Status.Conditions,
|
||||
metav1.Condition{
|
||||
@@ -531,7 +632,6 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
Reason: "AppliedManifestWorkComplete",
|
||||
ObservedGeneration: workCopy.Generation,
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err := hubWorkClient.WorkV1().
|
||||
ManifestWorks(workCopy.Namespace).UpdateStatus(context.TODO(), workCopy, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
@@ -582,6 +682,12 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
assertWorksByReplicaSet(clusterNames, manifestWorkReplicaSet, 1), eventuallyTimeout, eventuallyInterval).
|
||||
Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("PlacementRolledOut should be False during rollout")
|
||||
gomega.Eventually(
|
||||
assertCondition(
|
||||
workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, metav1.ConditionFalse, manifestWorkReplicaSet),
|
||||
eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("set work status to fail")
|
||||
key := fmt.Sprintf("%s.%s", manifestWorkReplicaSet.Namespace, manifestWorkReplicaSet.Name)
|
||||
works, err := hubWorkClient.WorkV1().ManifestWorks(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{
|
||||
@@ -624,6 +730,12 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() {
|
||||
gomega.Eventually(
|
||||
assertWorksByReplicaSet(clusterNames, manifestWorkReplicaSet, 2), eventuallyTimeout, eventuallyInterval).
|
||||
Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("PlacementRolledOut should still be False when not all clusters have succeeded")
|
||||
gomega.Eventually(
|
||||
assertCondition(
|
||||
workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, metav1.ConditionFalse, manifestWorkReplicaSet),
|
||||
eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user