🐛 Use generate name for manfiestwork generated by manifestworkreplicaset (#1421)

* fix replicaset

Signed-off-by: shivansh-source <shivanshsiddhi1234@gmail.com>

* few changes

Signed-off-by: shivansh-source <shivanshsiddhi1234@gmail.com>

* last one

Signed-off-by: shivansh-source <shivanshsiddhi1234@gmail.com>

* Refactor with updated tests

Signed-off-by: Jian Qiu <jqiu@redhat.com>

---------

Signed-off-by: shivansh-source <shivanshsiddhi1234@gmail.com>
Signed-off-by: Jian Qiu <jqiu@redhat.com>
Co-authored-by: shivansh-source <shivanshsiddhi1234@gmail.com>
This commit is contained in:
Jian Qiu
2026-03-19 09:46:46 +08:00
committed by GitHub
parent c6e6591fdb
commit aebb548bc2
9 changed files with 198 additions and 129 deletions

View File

@@ -14,6 +14,7 @@ import (
"k8s.io/apimachinery/pkg/selection"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
@@ -44,6 +45,61 @@ type ManifestWorkReplicaSetController struct {
reconcilers []ManifestWorkReplicaSetReconcile
}
// manifestWorkInReplicaSet is to record all the current manifestwork relating to a manifestWorkReplicaSet
type manifestWorkInReplicaSet struct {
workByCluster map[string]*manifestWorkWithPlacements
workByPlacement map[string][]*workapiv1.ManifestWork
}
type manifestWorkWithPlacements struct {
work *workapiv1.ManifestWork
placements sets.Set[string]
}
func getManifestWorkInReplicaSet(mwrs *workapiv1alpha1.ManifestWorkReplicaSet,
manifestWorkLister worklisterv1.ManifestWorkLister) (*manifestWorkInReplicaSet, []*workapiv1.ManifestWork, error) {
req, err := labels.NewRequirement(
workapiv1alpha1.ManifestWorkReplicaSetControllerNameLabelKey,
selection.Equals, []string{manifestWorkReplicaSetKey(mwrs)})
if err != nil {
return nil, nil, err
}
selector := labels.NewSelector().Add(*req)
mws, err := manifestWorkLister.List(selector)
if err != nil {
return nil, nil, err
}
result := &manifestWorkInReplicaSet{
workByCluster: make(map[string]*manifestWorkWithPlacements),
workByPlacement: make(map[string][]*workapiv1.ManifestWork),
}
for _, mw := range mws {
recordedWork := &manifestWorkWithPlacements{
work: mw,
placements: sets.New[string](),
}
result.workByCluster[mw.Namespace] = recordedWork
// TODO(qiujian) actually a work could be related to multiple placements, we should not
// use label, but annotation to record all placements in the mw
placementName, ok := mw.Labels[workapiv1alpha1.ManifestWorkReplicaSetPlacementNameLabelKey]
if !ok {
continue
}
recordedWork.placements.Insert(placementName)
if _, ok := result.workByPlacement[placementName]; !ok {
result.workByPlacement[placementName] = []*workapiv1.ManifestWork{mw}
} else {
result.workByPlacement[placementName] = append(result.workByPlacement[placementName], mw)
}
}
return result, mws, nil
}
// ManifestWorkReplicaSetReconcile is a interface for reconcile logic. It returns an updated manifestWorkReplicaSet and whether further
// reconcile needs to proceed.
type ManifestWorkReplicaSetReconcile interface {
@@ -195,34 +251,3 @@ func (m *ManifestWorkReplicaSetController) sync(ctx context.Context, controllerC
return nil
}
func listManifestWorksByManifestWorkReplicaSet(mwrs *workapiv1alpha1.ManifestWorkReplicaSet,
manifestWorkLister worklisterv1.ManifestWorkLister) ([]*workapiv1.ManifestWork, error) {
req, err := labels.NewRequirement(
workapiv1alpha1.ManifestWorkReplicaSetControllerNameLabelKey,
selection.Equals, []string{manifestWorkReplicaSetKey(mwrs)})
if err != nil {
return nil, err
}
selector := labels.NewSelector().Add(*req)
return manifestWorkLister.List(selector)
}
func listManifestWorksByMWRSetPlacementRef(mwrs *workapiv1alpha1.ManifestWorkReplicaSet, placementName string,
manifestWorkLister worklisterv1.ManifestWorkLister) ([]*workapiv1.ManifestWork, error) {
reqMWRSet, err := labels.NewRequirement(workapiv1alpha1.ManifestWorkReplicaSetControllerNameLabelKey,
selection.Equals, []string{manifestWorkReplicaSetKey(mwrs)})
if err != nil {
return nil, err
}
reqPlacementRef, err := labels.NewRequirement(workapiv1alpha1.ManifestWorkReplicaSetPlacementNameLabelKey,
selection.Equals, []string{placementName})
if err != nil {
return nil, err
}
selector := labels.NewSelector().Add(*reqMWRSet, *reqPlacementRef)
return manifestWorkLister.List(selector)
}

View File

@@ -29,8 +29,8 @@ type deployReconciler struct {
placementLister clusterlister.PlacementLister
}
func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha1.ManifestWorkReplicaSet,
) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) {
func (d *deployReconciler) reconcile(
ctx context.Context, mwrSet *workapiv1alpha1.ManifestWorkReplicaSet) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) {
// Manifestwork create/update/delete logic.
var errs []error
var plcsSummary []workapiv1alpha1.PlacementSummary
@@ -44,19 +44,18 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
}
// Get all ManifestWorks belonging to this ManifestWorkReplicaSet
allManifestWorks, err := listManifestWorksByManifestWorkReplicaSet(mwrSet, d.manifestWorkLister)
allManifestWorks, _, err := getManifestWorkInReplicaSet(mwrSet, d.manifestWorkLister)
if err != nil {
return mwrSet, reconcileContinue, fmt.Errorf("failed to list manifestworks: %w", err)
}
// Delete ManifestWorks that belong to placements no longer in the spec
for _, mw := range allManifestWorks {
placementName, ok := mw.Labels[workapiv1alpha1.ManifestWorkReplicaSetPlacementNameLabelKey]
if !ok || !currentPlacementNames.Has(placementName) {
for _, mwRecord := range allManifestWorks.workByCluster {
if mwRecord.placements.Len() == 0 || mwRecord.placements.Difference(currentPlacementNames).Len() > 0 {
// This ManifestWork belongs to a placement that's no longer in the spec, delete it
err := d.workApplier.Delete(ctx, mw.Namespace, mw.Name)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete manifestwork %s/%s for removed placement %s: %w", mw.Namespace, mw.Name, placementName, err))
err := d.workApplier.Delete(ctx, mwRecord.work.Namespace, mwRecord.work.Name)
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, fmt.Errorf("failed to delete manifestwork %s/%s for removed placement: %w", mwRecord.work.Namespace, mwRecord.work.Name, err))
}
}
}
@@ -69,7 +68,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
placement, err := d.placementLister.Placements(mwrSet.Namespace).Get(placementRef.Name)
if errors.IsNotFound(err) {
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonPlacementDecisionNotFound, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getPlacementDecisionVerified(workapiv1alpha1.ReasonPlacementDecisionNotFound, ""))
return mwrSet, reconcileStop, nil
}
@@ -77,11 +76,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
return mwrSet, reconcileContinue, fmt.Errorf("failed get placement %w", err)
}
manifestWorks, err := listManifestWorksByMWRSetPlacementRef(mwrSet, placementRef.Name, d.manifestWorkLister)
if err != nil {
return mwrSet, reconcileContinue, err
}
manifestWorks := allManifestWorks.workByPlacement[placement.Name]
for _, mw := range manifestWorks {
// Check if ManifestWorkTemplate changes, ManifestWork will need to be updated.
newMW := &workv1.ManifestWork{}
@@ -111,7 +106,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
placeTracker := helper.GetPlacementTracker(d.placeDecisionLister, placement, existingClusterNames)
rolloutHandler, err := clustersdkv1alpha1.NewRolloutHandler(placeTracker, d.clusterRolloutStatusFunc)
if err != nil {
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonNotAsExpected, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getPlacementDecisionVerified(workapiv1alpha1.ReasonNotAsExpected, ""))
return mwrSet, reconcileContinue, utilerrors.NewAggregate(errs)
}
@@ -126,7 +121,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
if err != nil {
errs = append(errs, err)
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonNotAsExpected, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getPlacementDecisionVerified(workapiv1alpha1.ReasonNotAsExpected, ""))
continue
}
@@ -135,33 +130,39 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
minRequeue = *rolloutResult.RecheckAfter
}
// Create ManifestWorks
for _, rolloutStatue := range rolloutResult.ClustersToRollout {
if rolloutStatue.Status == clustersdkv1alpha1.ToApply {
mw, err := CreateManifestWork(mwrSet, rolloutStatue.ClusterName, placementRef.Name)
if err != nil {
errs = append(errs, err)
continue
// Create or update ManifestWorks
for _, rolloutStatus := range rolloutResult.ClustersToRollout {
if rolloutStatus.Status == clustersdkv1alpha1.ToApply {
var workName string
if relatedManifestWork, ok := allManifestWorks.workByCluster[rolloutStatus.ClusterName]; ok {
if relatedManifestWork.placements.Has(placementRef.Name) {
workName = relatedManifestWork.work.Name
}
}
mw := buildManifestWork(mwrSet, workName, rolloutStatus.ClusterName, placementRef.Name)
_, err = d.workApplier.Apply(ctx, mw)
if err != nil {
fmt.Printf("err is %v\n", err)
errs = append(errs, err)
continue
}
existingClusterNames.Insert(rolloutStatue.ClusterName)
existingClusterNames.Insert(rolloutStatus.ClusterName)
}
}
for _, cls := range rolloutResult.ClustersRemoved {
// Delete manifestWork for removed clusters
err = d.workApplier.Delete(ctx, cls.ClusterName, mwrSet.Name)
if err != nil {
errs = append(errs, err)
continue
clusterWorks, _ := allManifestWorks.workByPlacement[placementRef.Name]
for _, mw := range clusterWorks {
if mw.Namespace != cls.ClusterName {
continue
}
err := d.workApplier.Delete(ctx, mw.Namespace, mw.Name)
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
} else {
existingClusterNames.Delete(cls.ClusterName)
}
}
existingClusterNames.Delete(cls.ClusterName)
}
total += int(placement.Status.NumberOfSelectedClusters)
@@ -193,15 +194,15 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
mwrSet.Status.Summary.Available = 0
mwrSet.Status.Summary.Degraded = 0
mwrSet.Status.Summary.Progressing = 0
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonPlacementDecisionEmpty, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getPlacementDecisionVerified(workapiv1alpha1.ReasonPlacementDecisionEmpty, ""))
} else {
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonAsExpected, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getPlacementDecisionVerified(workapiv1alpha1.ReasonAsExpected, ""))
}
if total == succeededCount {
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementRollOut(workapiv1alpha1.ReasonComplete, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getPlacementRollOut(workapiv1alpha1.ReasonComplete, ""))
} else {
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementRollOut(workapiv1alpha1.ReasonProgressing, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getPlacementRollOut(workapiv1alpha1.ReasonProgressing, ""))
}
if len(errs) > 0 {
@@ -318,8 +319,8 @@ func isConditionReady(cond *metav1.Condition, generation int64, requireTrue bool
return true
}
// GetManifestworkApplied return only True status if there all clusters have manifests applied as expected
func GetManifestworkApplied(reason string, message string) metav1.Condition {
// getManifestworkApplied return only True status if there all clusters have manifests applied as expected
func getManifestworkApplied(reason string, message string) metav1.Condition {
if reason == workapiv1alpha1.ReasonAsExpected {
return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionManifestworkApplied, reason, message, metav1.ConditionTrue)
}
@@ -328,8 +329,8 @@ func GetManifestworkApplied(reason string, message string) metav1.Condition {
}
// GetPlacementDecisionVerified return only True status if there are clusters selected
func GetPlacementDecisionVerified(reason string, message string) metav1.Condition {
// getPlacementDecisionVerified return only True status if there are clusters selected
func getPlacementDecisionVerified(reason string, message string) metav1.Condition {
if reason == workapiv1alpha1.ReasonAsExpected {
return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified, reason, message, metav1.ConditionTrue)
}
@@ -337,8 +338,8 @@ func GetPlacementDecisionVerified(reason string, message string) metav1.Conditio
return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified, reason, message, metav1.ConditionFalse)
}
// GetPlacementRollout return only True status if all the clusters selected by the placement have succeeded
func GetPlacementRollOut(reason string, message string) metav1.Condition {
// getPlacementRollOut return only True status if all the clusters selected by the placement have succeeded
func getPlacementRollOut(reason string, message string) metav1.Condition {
if reason == workapiv1alpha1.ReasonComplete {
return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, reason, message, metav1.ConditionTrue)
}
@@ -356,20 +357,15 @@ func getCondition(conditionType string, reason string, message string, status me
}
}
func CreateManifestWork(
func buildManifestWork(
mwrSet *workapiv1alpha1.ManifestWorkReplicaSet,
name string,
clusterNS string,
placementRefName string,
) (*workv1.ManifestWork, error) {
if clusterNS == "" {
return nil, fmt.Errorf("invalid cluster namespace")
}
) *workv1.ManifestWork {
// Get ManifestWorkReplicaSet labels
labels := mwrSet.Labels
// TODO consider how to trace the manifestworks spec changes for cloudevents work client
// Merge mwrSet.Labels with the required labels
mergedLabels := make(map[string]string)
for k, v := range labels {
@@ -379,14 +375,20 @@ func CreateManifestWork(
mergedLabels[workapiv1alpha1.ManifestWorkReplicaSetControllerNameLabelKey] = manifestWorkReplicaSetKey(mwrSet)
mergedLabels[workapiv1alpha1.ManifestWorkReplicaSetPlacementNameLabelKey] = placementRefName
return &workv1.ManifestWork{
mw := &workv1.ManifestWork{
ObjectMeta: metav1.ObjectMeta{
Name: mwrSet.Name,
Namespace: clusterNS,
Labels: mergedLabels,
},
Spec: mwrSet.Spec.ManifestWorkTemplate,
}, nil
}
if name != "" {
mw.Name = name
return mw
}
mw.GenerateName = mwrSet.Name
return mw
}
func getAvailableDecisionGroupProgressMessage(groupNum int, existingClsCount int, totalCls int32) string {

View File

@@ -3,17 +3,20 @@ package manifestworkreplicasetcontroller
import (
"context"
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
fakeclusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
workclient "open-cluster-management.io/api/client/work/clientset/versioned"
fakeworkclient "open-cluster-management.io/api/client/work/clientset/versioned/fake"
workinformers "open-cluster-management.io/api/client/work/informers/externalversions"
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
@@ -28,7 +31,7 @@ import (
func TestDeployReconcileAsExpected(t *testing.T) {
mwrSet := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test")
mw, _ := CreateManifestWork(mwrSet, "cls1", "place-test")
mw := buildManifestWork(mwrSet, "", "cls1", "place-test")
fWorkClient := fakeworkclient.NewSimpleClientset(mwrSet, mw)
workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second)
@@ -647,7 +650,7 @@ func TestRequeueWithProgressDeadline(t *testing.T) {
},
},
}
mw, _ := CreateManifestWork(mwrSet, "cls1", "place-test")
mw := buildManifestWork(mwrSet, "", "cls1", "place-test")
// Set Applied=True first to ensure work has been applied by hub controller
apimeta.SetStatusCondition(&mw.Status.Conditions, metav1.Condition{
Type: workapiv1.WorkApplied,
@@ -715,8 +718,8 @@ func TestDeployReconcileDeletesOrphanedManifestWorks(t *testing.T) {
mwrSet := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test2")
// Create ManifestWorks for both placement1 (old, should be deleted) and placement2 (current, should be kept)
mwOldPlacement, _ := CreateManifestWork(mwrSet, "cls1", "place-test1")
mwCurrentPlacement, _ := CreateManifestWork(mwrSet, "cls2", "place-test2")
mwOldPlacement := buildManifestWork(mwrSet, "", "cls1", "place-test1")
mwCurrentPlacement := buildManifestWork(mwrSet, "", "cls2", "place-test2")
fWorkClient := fakeworkclient.NewSimpleClientset(mwrSet, mwOldPlacement, mwCurrentPlacement)
workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second)
@@ -761,20 +764,22 @@ func TestDeployReconcileDeletesOrphanedManifestWorks(t *testing.T) {
}
// Verify that the ManifestWork from the old placement (place-test1) was deleted
deletedMW, err := fWorkClient.WorkV1().ManifestWorks("cls1").Get(context.TODO(), mwrSet.Name, metav1.GetOptions{})
if err == nil {
t.Fatalf("Expected ManifestWork for old placement to be deleted, but it still exists: %v", deletedMW)
}
// Verify that the ManifestWork from the current placement (place-test2) still exists
currentMW, err := fWorkClient.WorkV1().ManifestWorks("cls2").Get(context.TODO(), mwrSet.Name, metav1.GetOptions{})
works, err := listWorksByMWRS(context.TODO(), fWorkClient, "cls1", mwrSet.Namespace, mwrSet.Name, "place-test1")
if err != nil {
t.Fatalf("Expected ManifestWork for current placement to exist, but got error: %v", err)
t.Fatal(err)
}
if currentMW == nil {
t.Fatal("Expected ManifestWork for current placement to exist, but it is nil")
if len(works) != 0 {
t.Fatalf("Expected ManifestWork for old placement to be deleted, but it still exists")
}
// Verify that the ManifestWork from the current placement (place-test2) still exists
works, err = listWorksByMWRS(context.TODO(), fWorkClient, "cls2", mwrSet.Namespace, mwrSet.Name, "place-test2")
if err != nil {
t.Fatal(err)
}
if len(works) != 1 {
t.Fatalf("Expected ManifestWork for current placement to exist, but got %d, works: %v", len(works), works)
}
currentMW := works[0]
// Verify the placement label is correct
if currentMW.Labels[workapiv1alpha1.ManifestWorkReplicaSetPlacementNameLabelKey] != "place-test2" {
t.Fatalf("Expected placement label to be 'place-test2', got '%s'",
@@ -789,9 +794,9 @@ func TestDeployReconcileWithMultiplePlacementChanges(t *testing.T) {
map[string]clusterv1alpha1.RolloutStrategy{"place-test2": allRollOut, "place-test3": allRollOut})
// Create ManifestWorks for placement1 (old), placement2 (current), and placement3 (current)
mwOldPlacement, _ := CreateManifestWork(mwrSet, "cls1", "place-test1")
mwCurrentPlacement2, _ := CreateManifestWork(mwrSet, "cls2", "place-test2")
mwCurrentPlacement3, _ := CreateManifestWork(mwrSet, "cls3", "place-test3")
mwOldPlacement := buildManifestWork(mwrSet, mwrSet.Name, "cls1", "place-test1")
mwCurrentPlacement2 := buildManifestWork(mwrSet, mwrSet.Name, "cls2", "place-test2")
mwCurrentPlacement3 := buildManifestWork(mwrSet, mwrSet.Name, "cls3", "place-test3")
fWorkClient := fakeworkclient.NewSimpleClientset(mwrSet, mwOldPlacement, mwCurrentPlacement2, mwCurrentPlacement3)
workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second)
@@ -846,22 +851,33 @@ func TestDeployReconcileWithMultiplePlacementChanges(t *testing.T) {
}
// Verify that the ManifestWork from the old placement (place-test1) was deleted
_, err = fWorkClient.WorkV1().ManifestWorks("cls1").Get(context.TODO(), mwrSet.Name, metav1.GetOptions{})
if err == nil {
t.Fatal("Expected ManifestWork for old placement to be deleted, but it still exists")
works, err := listWorksByMWRS(context.TODO(), fWorkClient, "cls1", mwrSet.Namespace, mwrSet.Name, "place-test1")
if err != nil {
t.Fatal(err)
}
if len(works) != 0 {
t.Fatalf("Expected ManifestWorks for old placement to be deleted, found %d", len(works))
}
// Verify that ManifestWorks from current placements (place-test2 and place-test3) still exist
currentMW2, err := fWorkClient.WorkV1().ManifestWorks("cls2").Get(context.TODO(), mwrSet.Name, metav1.GetOptions{})
works, err = listWorksByMWRS(context.TODO(), fWorkClient, "cls2", mwrSet.Namespace, mwrSet.Name, "place-test2")
if err != nil {
t.Fatalf("Expected ManifestWork for placement2 to exist, but got error: %v", err)
t.Fatal(err)
}
if len(works) != 1 {
t.Fatalf("Expected ManifestWork for placement2 to exist, but got error: %d, works: %v", len(works), works)
}
currentMW2 := works[0]
assert.Equal(t, currentMW2.Labels[workapiv1alpha1.ManifestWorkReplicaSetPlacementNameLabelKey], "place-test2")
currentMW3, err := fWorkClient.WorkV1().ManifestWorks("cls3").Get(context.TODO(), mwrSet.Name, metav1.GetOptions{})
works, err = listWorksByMWRS(context.TODO(), fWorkClient, "cls3", mwrSet.Namespace, mwrSet.Name, "place-test3")
if err != nil {
t.Fatalf("Expected ManifestWork for placement3 to exist, but got error: %v", err)
}
if len(works) != 1 {
t.Fatalf("Expected ManifestWork for placement3 to exist, but got error: %d, works: %v", len(works), works)
}
currentMW3 := works[0]
assert.Equal(t, currentMW3.Labels[workapiv1alpha1.ManifestWorkReplicaSetPlacementNameLabelKey], "place-test3")
}
func TestDeployRolloutProgressingWhenNotAllSucceeded(t *testing.T) {
@@ -1375,6 +1391,30 @@ func TestClusterRolloutStatusFunc(t *testing.T) {
})
}
}
func listWorksByMWRS(
ctx context.Context,
client workclient.Interface,
cluster string,
mwrNamespace string,
mwrName string,
placement string,
) ([]workapiv1.ManifestWork, error) {
selector := labels.Set{
workapiv1alpha1.ManifestWorkReplicaSetControllerNameLabelKey: fmt.Sprintf("%s.%s", mwrNamespace, mwrName),
workapiv1alpha1.ManifestWorkReplicaSetPlacementNameLabelKey: placement,
}.AsSelector().String()
list, err := client.WorkV1().
ManifestWorks(cluster).
List(ctx, metav1.ListOptions{LabelSelector: selector})
if err != nil {
return nil, err
}
return list.Items, nil
}
func TestShouldReturnToApply(t *testing.T) {
now := metav1.Now()

View File

@@ -37,7 +37,7 @@ func (f *finalizeReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alp
// remove finalizer until all works are gone if deletionPolicy is Foreground
// otherwise remove finalizer after delete all works
if mwrSet.Spec.CascadeDeletionPolicy == workapiv1alpha1.Foreground {
manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(mwrSet, f.manifestWorkLister)
_, manifestWorks, err := getManifestWorkInReplicaSet(mwrSet, f.manifestWorkLister)
if err != nil {
return mwrSet, reconcileContinue, err
}
@@ -54,7 +54,7 @@ func (f *finalizeReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alp
}
func (f *finalizeReconciler) finalizeManifestWorkReplicaSet(ctx context.Context, manifestWorkReplicaSet *workapiv1alpha1.ManifestWorkReplicaSet) error {
manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(manifestWorkReplicaSet, f.manifestWorkLister)
_, manifestWorks, err := getManifestWorkInReplicaSet(manifestWorkReplicaSet, f.manifestWorkLister)
if err != nil {
return err
}

View File

@@ -34,7 +34,7 @@ func TestFinalizeReconcile(t *testing.T) {
mwrSetTest.DeletionTimestamp = &timeNow
mwrSetTest.Finalizers = append(mwrSetTest.Finalizers, workapiv1alpha1.ManifestWorkReplicaSetFinalizer)
mw, _ := CreateManifestWork(mwrSetTest, "cluster1", "place-test")
mw := buildManifestWork(mwrSetTest, "", "cluster1", "place-test")
return mwrSetTest, []*workapiv1.ManifestWork{mw}
},
validateActions: func(t *testing.T, manifestWorkReplicaSet *workapiv1alpha1.ManifestWorkReplicaSet,
@@ -59,8 +59,8 @@ func TestFinalizeReconcile(t *testing.T) {
mwrSetTest.Finalizers = append(mwrSetTest.Finalizers, workapiv1alpha1.ManifestWorkReplicaSetFinalizer)
mwrSetTest.Spec.CascadeDeletionPolicy = workapiv1alpha1.Foreground
mw1, _ := CreateManifestWork(mwrSetTest, "cluster1", "place-test")
mw2, _ := CreateManifestWork(mwrSetTest, "cluster2", "place-test")
mw1 := buildManifestWork(mwrSetTest, "", "cluster1", "place-test")
mw2 := buildManifestWork(mwrSetTest, "", "cluster2", "place-test")
mw2.Finalizers = append(mw2.Finalizers, workapiv1.ManifestWorkFinalizer)
return mwrSetTest, []*workapiv1.ManifestWork{mw1, mw2}
},

View File

@@ -18,7 +18,7 @@ import (
func TestPlaceMWControllerIndex(t *testing.T) {
mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test")
mwrSetTest.Status.Summary.Total = 1
mw, _ := CreateManifestWork(mwrSetTest, "cls1", "place-test")
mw := buildManifestWork(mwrSetTest, "test-1", "cls1", "place-test")
fWorkClient := fakeworkclient.NewSimpleClientset(mwrSetTest, mw)
workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second)

View File

@@ -22,21 +22,23 @@ func (d *statusReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
if mwrSet.Status.Summary.Total == 0 {
condition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified)
if condition != nil && condition.Reason == workapiv1alpha1.ReasonPlacementDecisionEmpty {
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetManifestworkApplied(workapiv1alpha1.ReasonPlacementDecisionEmpty, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getManifestworkApplied(workapiv1alpha1.ReasonPlacementDecisionEmpty, ""))
} else {
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetManifestworkApplied(workapiv1alpha1.ReasonNotAsExpected, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getManifestworkApplied(workapiv1alpha1.ReasonNotAsExpected, ""))
}
return mwrSet, reconcileContinue, nil
}
appliedCount, availableCount, degradCount, processingCount := 0, 0, 0, 0
for id, plcSummary := range mwrSet.Status.PlacementsSummary {
manifestWorks, err := listManifestWorksByMWRSetPlacementRef(mwrSet, plcSummary.Name, d.manifestWorkLister)
if err != nil {
return mwrSet, reconcileContinue, err
}
allManifestWorks, _, err := getManifestWorkInReplicaSet(mwrSet, d.manifestWorkLister)
if err != nil {
return mwrSet, reconcileContinue, err
}
for id, plcSummary := range mwrSet.Status.PlacementsSummary {
manifestWorks := allManifestWorks.workByPlacement[plcSummary.Name]
applied, available, degrad, processing := 0, 0, 0, 0
for _, mw := range manifestWorks {
if !mw.DeletionTimestamp.IsZero() {
@@ -86,11 +88,11 @@ func (d *statusReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
if mwrSet.Status.Summary.Available == mwrSet.Status.Summary.Total && //nolint:gocritic
mwrSet.Status.Summary.Progressing == 0 && mwrSet.Status.Summary.Degraded == 0 {
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetManifestworkApplied(workapiv1alpha1.ReasonAsExpected, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getManifestworkApplied(workapiv1alpha1.ReasonAsExpected, ""))
} else if mwrSet.Status.Summary.Progressing > 0 && mwrSet.Status.Summary.Degraded == 0 {
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetManifestworkApplied(workapiv1alpha1.ReasonProcessing, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getManifestworkApplied(workapiv1alpha1.ReasonProcessing, ""))
} else {
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetManifestworkApplied(workapiv1alpha1.ReasonNotAsExpected, ""))
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, getManifestworkApplied(workapiv1alpha1.ReasonNotAsExpected, ""))
}
return mwrSet, reconcileContinue, nil

View File

@@ -40,7 +40,7 @@ func TestStatusReconcileAsExpected(t *testing.T) {
}
for _, cls := range clusters {
mw, _ := CreateManifestWork(mwrSetTest, cls, plcName)
mw := buildManifestWork(mwrSetTest, mwrSetTest.Name, cls, plcName)
cond := getCondition(workv1.WorkApplied, "", "", metav1.ConditionTrue)
apimeta.SetStatusCondition(&mw.Status.Conditions, cond)
@@ -115,7 +115,7 @@ func TestStatusReconcileAsProcessing(t *testing.T) {
}
for id, cls := range clusters {
mw, _ := CreateManifestWork(mwrSetTest, cls, plcName)
mw := buildManifestWork(mwrSetTest, mwrSetTest.Name, cls, plcName)
cond := getCondition(workv1.WorkApplied, "", "", metav1.ConditionTrue)
apimeta.SetStatusCondition(&mw.Status.Conditions, cond)
@@ -197,7 +197,7 @@ func TestStatusReconcileNotAsExpected(t *testing.T) {
avaCount, processingCount, degradCount := 0, 0, 0
for id, cls := range clusters {
mw, _ := CreateManifestWork(mwrSetTest, cls, plcName)
mw := buildManifestWork(mwrSetTest, mwrSetTest.Name, cls, plcName)
cond := getCondition(workv1.WorkApplied, "", "", metav1.ConditionTrue)
apimeta.SetStatusCondition(&mw.Status.Conditions, cond)

View File

@@ -68,7 +68,7 @@ func CreateTestManifestWorks(name, namespace string, placementName string, clust
var works []runtime.Object
for _, c := range clusters {
mw, _ := spoketesting.NewManifestWork(0, obj)
mw.Name = name
mw.Name = fmt.Sprintf("%s-%s", name, c)
mw.Namespace = c
mw.Labels = map[string]string{
"work.open-cluster-management.io/manifestworkreplicaset": fmt.Sprintf("%s.%s", namespace, name),
@@ -90,7 +90,7 @@ func CreateTestManifestWorks(name, namespace string, placementName string, clust
func CreateTestManifestWork(name, namespace string, placementName string, clusterName string) *workapiv1.ManifestWork {
obj := testingcommon.NewUnstructured("v1", "kind", "test-ns", "test-name")
mw, _ := spoketesting.NewManifestWork(0, obj)
mw.Name = name
mw.Name = fmt.Sprintf("%s-%s", name, clusterName)
mw.Namespace = clusterName
mw.Labels = map[string]string{
"work.open-cluster-management.io/manifestworkreplicaset": fmt.Sprintf("%s.%s", namespace, name),