support managed namespaces (#1193)
Some checks failed
Scorecard supply-chain security / Scorecard analysis (push) Failing after 1m6s
Post / coverage (push) Failing after 30s
Post / images (amd64, addon-manager) (push) Failing after 19s
Post / images (amd64, placement) (push) Failing after 24s
Post / images (amd64, registration) (push) Failing after 18s
Post / images (amd64, registration-operator) (push) Failing after 14s
Post / images (amd64, work) (push) Failing after 14s
Post / images (arm64, addon-manager) (push) Failing after 22s
Post / images (arm64, placement) (push) Failing after 16s
Post / images (arm64, registration) (push) Failing after 21s
Post / images (arm64, registration-operator) (push) Failing after 16s
Post / images (arm64, work) (push) Failing after 17s
Post / image manifest (addon-manager) (push) Has been skipped
Post / image manifest (placement) (push) Has been skipped
Post / image manifest (registration) (push) Has been skipped
Post / image manifest (registration-operator) (push) Has been skipped
Post / image manifest (work) (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped
Close stale issues and PRs / stale (push) Failing after 45s

Signed-off-by: Yang Le <yangle@redhat.com>
This commit is contained in:
Yang Le
2025-09-25 16:19:30 +08:00
committed by GitHub
parent 35bab4476a
commit db92ed79d4
17 changed files with 2155 additions and 19 deletions

View File

@@ -16,6 +16,10 @@ rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
# Allow agent to create/get/list/update/watch/patch namespaces
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["create", "get", "list", "update", "watch", "patch"]
# Allow agent to list clusterclaims
- apiGroups: ["cluster.open-cluster-management.io"]
resources: ["clusterclaims"]

View File

@@ -0,0 +1,243 @@
package managedcluster
import (
"context"
"fmt"
"reflect"
"sort"
"github.com/openshift/library-go/pkg/controller/factory"
"github.com/openshift/library-go/pkg/operator/events"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
clientset "open-cluster-management.io/api/client/cluster/clientset/versioned"
clusterinformerv1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1"
clusterinformerv1beta2 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta2"
clusterlisterv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
clusterlisterv1beta2 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta2"
v1 "open-cluster-management.io/api/cluster/v1"
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
clustersdkv1beta2 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta2"
"open-cluster-management.io/sdk-go/pkg/patcher"
)
// managedNamespaceController reconciles managed namespaces for ManagedClusters
// by watching ManagedCluster changes and updating their managed namespace status based on
// all ManagedClusterSets they belong to
type managedNamespaceController struct {
clusterPatcher patcher.Patcher[*v1.ManagedCluster, v1.ManagedClusterSpec, v1.ManagedClusterStatus]
clusterLister clusterlisterv1.ManagedClusterLister
clusterSetLister clusterlisterv1beta2.ManagedClusterSetLister
eventRecorder events.Recorder
}
// NewManagedNamespaceController creates a new managed namespace controller
func NewManagedNamespaceController(
clusterClient clientset.Interface,
clusterInformer clusterinformerv1.ManagedClusterInformer,
clusterSetInformer clusterinformerv1beta2.ManagedClusterSetInformer,
recorder events.Recorder) factory.Controller {
controllerName := "managed-namespace-controller"
syncCtx := factory.NewSyncContext(controllerName, recorder)
c := &managedNamespaceController{
clusterPatcher: patcher.NewPatcher[
*v1.ManagedCluster, v1.ManagedClusterSpec, v1.ManagedClusterStatus](
clusterClient.ClusterV1().ManagedClusters()),
clusterLister: clusterInformer.Lister(),
clusterSetLister: clusterSetInformer.Lister(),
eventRecorder: recorder.WithComponentSuffix("managed-namespace-controller"),
}
// Add explicit event handlers for ManagedCluster
_, err := clusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cluster, ok := obj.(*v1.ManagedCluster)
if !ok {
utilruntime.HandleError(fmt.Errorf("error to get ManagedCluster object: %v", obj))
return
}
syncCtx.Queue().Add(cluster.Name)
},
UpdateFunc: func(oldObj, newObj interface{}) {
oldCluster, ok := oldObj.(*v1.ManagedCluster)
if !ok {
utilruntime.HandleError(fmt.Errorf("error to get old ManagedCluster object: %v", oldObj))
return
}
newCluster, ok := newObj.(*v1.ManagedCluster)
if !ok {
utilruntime.HandleError(fmt.Errorf("error to get new ManagedCluster object: %v", newObj))
return
}
// Only care about label changes that affect cluster set membership
if !reflect.DeepEqual(oldCluster.Labels, newCluster.Labels) {
syncCtx.Queue().Add(newCluster.Name)
}
},
})
if err != nil {
utilruntime.HandleError(err)
}
return factory.New().
WithSyncContext(syncCtx).
WithBareInformers(clusterInformer.Informer()).
WithInformersQueueKeysFunc(c.clusterSetToClusterQueueKeysFunc, clusterSetInformer.Informer()).
WithSync(c.sync).
ToController("ManagedNamespaceController", recorder)
}
func (c *managedNamespaceController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
logger := klog.FromContext(ctx)
clusterName := syncCtx.QueueKey()
if len(clusterName) == 0 {
return nil
}
logger.V(4).Info("Reconciling managed namespaces for ManagedCluster", "clusterName", clusterName)
cluster, err := c.clusterLister.Get(clusterName)
if errors.IsNotFound(err) {
// Cluster deleted - nothing to do
logger.V(4).Info("ManagedCluster not found, skipping", "clusterName", clusterName)
return nil
}
if err != nil {
return err
}
// If cluster is being deleted, skip processing
if !cluster.DeletionTimestamp.IsZero() {
logger.V(4).Info("ManagedCluster is being deleted, skipping", "clusterName", clusterName)
return nil
}
if err := c.syncManagedNamespacesForCluster(ctx, cluster); err != nil {
return fmt.Errorf("failed to sync managed namespaces for ManagedCluster %q: %w", cluster.Name, err)
}
return nil
}
// syncManagedNamespacesForCluster updates the managed namespace configuration for a specific cluster
// based on all cluster sets it belongs to
func (c *managedNamespaceController) syncManagedNamespacesForCluster(ctx context.Context, cluster *v1.ManagedCluster) error {
logger := klog.FromContext(ctx)
// Get all cluster sets this cluster belongs to
clusterSets, err := clustersdkv1beta2.GetClusterSetsOfCluster(cluster, c.clusterSetLister)
if err != nil {
return fmt.Errorf("failed to get cluster sets for cluster %q: %w", cluster.Name, err)
}
// Build the complete list of managed namespaces from all cluster sets
var allManagedNamespaces []v1.ClusterSetManagedNamespaceConfig
for _, clusterSet := range clusterSets {
// Skip cluster sets that are being deleted
if !clusterSet.DeletionTimestamp.IsZero() {
logger.V(4).Info("Skipping cluster set being deleted", "clusterSetName", clusterSet.Name, "clusterName", cluster.Name)
continue
}
for _, nsConfig := range clusterSet.Spec.ManagedNamespaces {
managedNS := v1.ClusterSetManagedNamespaceConfig{
ManagedNamespaceConfig: nsConfig,
ClusterSet: clusterSet.Name,
}
allManagedNamespaces = append(allManagedNamespaces, managedNS)
}
}
// Sort by cluster set name first, then by namespace name for consistent ordering
sort.Slice(allManagedNamespaces, func(i, j int) bool {
if allManagedNamespaces[i].ClusterSet == allManagedNamespaces[j].ClusterSet {
// Same cluster set, sort by namespace name
return allManagedNamespaces[i].Name < allManagedNamespaces[j].Name
}
// Different cluster sets, sort by cluster set name
return allManagedNamespaces[i].ClusterSet < allManagedNamespaces[j].ClusterSet
})
// Update cluster status
updatedCluster := cluster.DeepCopy()
updatedCluster.Status.ManagedNamespaces = allManagedNamespaces
updated, err := c.clusterPatcher.PatchStatus(ctx, updatedCluster, updatedCluster.Status, cluster.Status)
if err != nil {
return fmt.Errorf("failed to update ManagedCluster status for cluster %q: %w", cluster.Name, err)
}
// Only record event if there was an actual update
if updated {
logger.V(4).Info("Updated managed namespaces for cluster", "clusterName", cluster.Name, "namespacesCount", len(allManagedNamespaces))
c.eventRecorder.Eventf("ManagedNamespacesUpdated", "Updated managed namespaces for cluster %q (total: %d)", cluster.Name, len(allManagedNamespaces))
}
return nil
}
// clusterSetToClusterQueueKeysFunc maps ManagedClusterSet changes to cluster names that should be reconciled
func (c *managedNamespaceController) clusterSetToClusterQueueKeysFunc(obj runtime.Object) []string {
clusterSet, ok := obj.(*clusterv1beta2.ManagedClusterSet)
if !ok {
utilruntime.HandleError(fmt.Errorf("expected ManagedClusterSet, got %T", obj))
return nil
}
if clusterSet == nil {
return nil
}
clusterNames := sets.Set[string]{}
// Get all clusters that currently belong to this cluster set
currentClusters, err := clustersdkv1beta2.GetClustersFromClusterSet(clusterSet, c.clusterLister)
if err != nil {
utilruntime.HandleError(fmt.Errorf("error getting clusters from cluster set %q: %v", clusterSet.Name, err))
} else {
for _, cluster := range currentClusters {
clusterNames.Insert(cluster.Name)
}
}
// Get all clusters that previously had managed namespaces from this cluster set
previousClusters, err := c.getClustersPreviouslyInSet(clusterSet.Name)
if err != nil {
utilruntime.HandleError(fmt.Errorf("error getting clusters previously in cluster set %q: %v", clusterSet.Name, err))
} else {
for _, cluster := range previousClusters {
clusterNames.Insert(cluster.Name)
}
}
// Convert set to slice
return clusterNames.UnsortedList()
}
// getClustersPreviouslyInSet returns all clusters that have managed namespaces from the specified cluster set
func (c *managedNamespaceController) getClustersPreviouslyInSet(clusterSetName string) ([]*v1.ManagedCluster, error) {
allClusters, err := c.clusterLister.List(labels.Everything())
if err != nil {
return nil, err
}
var clustersWithNamespaces []*v1.ManagedCluster
for _, cluster := range allClusters {
for _, managedNS := range cluster.Status.ManagedNamespaces {
if managedNS.ClusterSet == clusterSetName {
clustersWithNamespaces = append(clustersWithNamespaces, cluster)
break
}
}
}
return clustersWithNamespaces, nil
}

View File

@@ -0,0 +1,529 @@
package managedcluster
import (
"context"
"testing"
"time"
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterfake "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
clusterv1 "open-cluster-management.io/api/cluster/v1"
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
"open-cluster-management.io/sdk-go/pkg/patcher"
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
)
func TestSyncManagedNamespacesForCluster(t *testing.T) {
cases := []struct {
name string
cluster *clusterv1.ManagedCluster
clusterSets []runtime.Object
expectedManagedNamespaces []clusterv1.ClusterSetManagedNamespaceConfig
expectUpdate bool
}{
{
name: "cluster with no cluster sets",
cluster: newManagedCluster("cluster1", map[string]string{}),
clusterSets: []runtime.Object{},
expectedManagedNamespaces: []clusterv1.ClusterSetManagedNamespaceConfig{},
expectUpdate: false, // No change needed - already empty
},
{
name: "cluster with existing namespaces but no cluster sets should clear them",
cluster: newManagedClusterWithNamespaces("cluster1", map[string]string{}, []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
}),
clusterSets: []runtime.Object{},
expectedManagedNamespaces: []clusterv1.ClusterSetManagedNamespaceConfig{},
expectUpdate: true, // Should clear existing namespaces
},
{
name: "cluster with single cluster set",
cluster: newManagedCluster("cluster1", map[string]string{"cluster.open-cluster-management.io/clusterset": "set1"}),
clusterSets: []runtime.Object{
newManagedClusterSet("set1", []clusterv1.ManagedNamespaceConfig{
{Name: "namespace1"},
{Name: "namespace2"},
}),
},
expectedManagedNamespaces: []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace2"}, ClusterSet: "set1"},
},
expectUpdate: true,
},
{
name: "cluster with multiple cluster sets",
cluster: newManagedCluster("cluster1", map[string]string{"cluster.open-cluster-management.io/clusterset": "set1", "label1": "value1"}),
clusterSets: []runtime.Object{
newManagedClusterSet("set1", []clusterv1.ManagedNamespaceConfig{
{Name: "namespace1"},
}),
newManagedClusterSetWithLabelSelector("set2", []clusterv1.ManagedNamespaceConfig{
{Name: "namespace2"},
}, map[string]string{"label1": "value1"}),
},
expectedManagedNamespaces: []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace2"}, ClusterSet: "set2"},
},
expectUpdate: true,
},
{
name: "cluster with existing namespaces - no change needed",
cluster: &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
Labels: map[string]string{"cluster.open-cluster-management.io/clusterset": "set1"},
},
Status: clusterv1.ManagedClusterStatus{
ManagedNamespaces: []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
},
},
},
clusterSets: []runtime.Object{
newManagedClusterSet("set1", []clusterv1.ManagedNamespaceConfig{
{Name: "namespace1"},
}),
},
expectedManagedNamespaces: []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
},
expectUpdate: false, // No change needed
},
{
name: "cluster set being deleted should be ignored",
cluster: &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
Labels: map[string]string{"cluster.open-cluster-management.io/clusterset": "set1"},
},
Status: clusterv1.ManagedClusterStatus{
ManagedNamespaces: []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
},
},
},
clusterSets: []runtime.Object{
&clusterv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: "set1",
DeletionTimestamp: &metav1.Time{Time: time.Now()},
},
Spec: clusterv1beta2.ManagedClusterSetSpec{
ManagedNamespaces: []clusterv1.ManagedNamespaceConfig{
{Name: "namespace1"},
},
},
},
},
expectedManagedNamespaces: []clusterv1.ClusterSetManagedNamespaceConfig{},
expectUpdate: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterObjs := []runtime.Object{c.cluster}
clusterClient := clusterfake.NewSimpleClientset(append(clusterObjs, c.clusterSets...)...)
clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10)
clusterInformer := clusterInformerFactory.Cluster().V1().ManagedClusters()
clusterSetInformer := clusterInformerFactory.Cluster().V1beta2().ManagedClusterSets()
for _, obj := range clusterObjs {
if err := clusterInformer.Informer().GetStore().Add(obj); err != nil {
t.Fatal(err)
}
}
for _, obj := range c.clusterSets {
if err := clusterSetInformer.Informer().GetStore().Add(obj); err != nil {
t.Fatal(err)
}
}
controller := &managedNamespaceController{
clusterPatcher: patcher.NewPatcher[
*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus](
clusterClient.ClusterV1().ManagedClusters()),
clusterLister: clusterInformer.Lister(),
clusterSetLister: clusterSetInformer.Lister(),
eventRecorder: eventstesting.NewTestingEventRecorder(t),
}
err := controller.syncManagedNamespacesForCluster(context.TODO(), c.cluster)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// Check that the right number of patch actions were performed
actions := clusterClient.Actions()
updateActions := 0
for _, action := range actions {
if action.GetVerb() == "patch" {
updateActions++
}
}
if c.expectUpdate && updateActions == 0 {
t.Errorf("expected cluster status update but none occurred")
}
if !c.expectUpdate && updateActions > 0 {
t.Errorf("expected no cluster status update but %d occurred", updateActions)
}
})
}
}
func TestClusterSetToClusterQueueKeysFunc(t *testing.T) {
cases := []struct {
name string
clusters []runtime.Object
clusterSet *clusterv1beta2.ManagedClusterSet
expectedClusters []string
}{
{
name: "current membership only",
clusters: []runtime.Object{
newManagedCluster("cluster1", map[string]string{"cluster.open-cluster-management.io/clusterset": "set1"}),
newManagedCluster("cluster2", map[string]string{"cluster.open-cluster-management.io/clusterset": "set1"}),
},
clusterSet: newManagedClusterSet("set1", []clusterv1.ManagedNamespaceConfig{{Name: "namespace1"}}),
expectedClusters: []string{"cluster1", "cluster2"},
},
{
name: "previous membership only",
clusters: []runtime.Object{
newManagedClusterWithNamespaces("cluster1", map[string]string{}, []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
}),
newManagedClusterWithNamespaces("cluster2", map[string]string{}, []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
}),
},
clusterSet: newManagedClusterSet("set1", []clusterv1.ManagedNamespaceConfig{{Name: "namespace1"}}),
expectedClusters: []string{"cluster1", "cluster2"},
},
{
name: "both current and previous membership",
clusters: []runtime.Object{
newManagedCluster("cluster1", map[string]string{"cluster.open-cluster-management.io/clusterset": "set1"}),
newManagedClusterWithNamespaces("cluster2", map[string]string{}, []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
}),
newManagedCluster("cluster3", map[string]string{"cluster.open-cluster-management.io/clusterset": "set1"}),
},
clusterSet: newManagedClusterSet("set1", []clusterv1.ManagedNamespaceConfig{{Name: "namespace1"}}),
expectedClusters: []string{"cluster1", "cluster2", "cluster3"},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(append(c.clusters, c.clusterSet)...)
clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10)
clusterInformer := clusterInformerFactory.Cluster().V1().ManagedClusters()
clusterSetInformer := clusterInformerFactory.Cluster().V1beta2().ManagedClusterSets()
for _, obj := range c.clusters {
if err := clusterInformer.Informer().GetStore().Add(obj); err != nil {
t.Fatal(err)
}
}
if err := clusterSetInformer.Informer().GetStore().Add(c.clusterSet); err != nil {
t.Fatal(err)
}
controller := &managedNamespaceController{
clusterPatcher: patcher.NewPatcher[
*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus](
clusterClient.ClusterV1().ManagedClusters()),
clusterLister: clusterInformer.Lister(),
clusterSetLister: clusterSetInformer.Lister(),
eventRecorder: eventstesting.NewTestingEventRecorder(t),
}
clusterNames := controller.clusterSetToClusterQueueKeysFunc(c.clusterSet)
if len(clusterNames) != len(c.expectedClusters) {
t.Errorf("expected %d cluster names to be returned, got %d", len(c.expectedClusters), len(clusterNames))
}
expectedClusters := make(map[string]bool)
for _, name := range c.expectedClusters {
expectedClusters[name] = false
}
for _, name := range clusterNames {
if _, exists := expectedClusters[name]; exists {
expectedClusters[name] = true
} else {
t.Errorf("unexpected cluster name returned: %s", name)
}
}
for cluster, found := range expectedClusters {
if !found {
t.Errorf("expected cluster %s to be returned but it wasn't", cluster)
}
}
})
}
}
func TestGetClustersPreviouslyInSet(t *testing.T) {
cases := []struct {
name string
clusters []runtime.Object
clusterSetName string
expectedClusters []string
}{
{
name: "no clusters with managed namespaces",
clusters: []runtime.Object{
newManagedCluster("cluster1", map[string]string{}),
newManagedCluster("cluster2", map[string]string{}),
},
clusterSetName: "set1",
expectedClusters: []string{},
},
{
name: "some clusters with managed namespaces from target set",
clusters: []runtime.Object{
newManagedClusterWithNamespaces("cluster1", map[string]string{}, []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
}),
newManagedClusterWithNamespaces("cluster2", map[string]string{}, []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace2"}, ClusterSet: "set2"},
}),
newManagedClusterWithNamespaces("cluster3", map[string]string{}, []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace3"}, ClusterSet: "set1"},
}),
},
clusterSetName: "set1",
expectedClusters: []string{"cluster1", "cluster3"},
},
{
name: "clusters with multiple managed namespaces",
clusters: []runtime.Object{
newManagedClusterWithNamespaces("cluster1", map[string]string{}, []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace2"}, ClusterSet: "set2"},
}),
newManagedClusterWithNamespaces("cluster2", map[string]string{}, []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace3"}, ClusterSet: "set3"},
}),
},
clusterSetName: "set1",
expectedClusters: []string{"cluster1"},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(c.clusters...)
clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10)
clusterInformer := clusterInformerFactory.Cluster().V1().ManagedClusters()
for _, obj := range c.clusters {
if err := clusterInformer.Informer().GetStore().Add(obj); err != nil {
t.Fatal(err)
}
}
controller := &managedNamespaceController{
clusterPatcher: patcher.NewPatcher[
*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus](
clusterClient.ClusterV1().ManagedClusters()),
clusterLister: clusterInformer.Lister(),
eventRecorder: eventstesting.NewTestingEventRecorder(t),
}
clusters, err := controller.getClustersPreviouslyInSet(c.clusterSetName)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(clusters) != len(c.expectedClusters) {
t.Errorf("expected %d clusters to be returned, got %d", len(c.expectedClusters), len(clusters))
}
expectedClusters := make(map[string]bool)
for _, name := range c.expectedClusters {
expectedClusters[name] = false
}
for _, cluster := range clusters {
if _, exists := expectedClusters[cluster.Name]; exists {
expectedClusters[cluster.Name] = true
} else {
t.Errorf("unexpected cluster returned: %s", cluster.Name)
}
}
for cluster, found := range expectedClusters {
if !found {
t.Errorf("expected cluster %s to be returned but it wasn't", cluster)
}
}
})
}
}
func TestSync(t *testing.T) {
cases := []struct {
name string
cluster *clusterv1.ManagedCluster
clusterSets []runtime.Object
expectError bool
queueKey string
}{
{
name: "empty queue key should return nil",
cluster: nil,
clusterSets: []runtime.Object{},
expectError: false,
queueKey: "",
},
{
name: "cluster not found should return nil",
cluster: nil,
clusterSets: []runtime.Object{},
expectError: false,
queueKey: "nonexistent-cluster",
},
{
name: "normal cluster should be reconciled",
cluster: newManagedCluster("cluster1", map[string]string{
"cluster.open-cluster-management.io/clusterset": "set1",
}),
clusterSets: []runtime.Object{
newManagedClusterSet("set1", []clusterv1.ManagedNamespaceConfig{
{Name: "namespace1"},
}),
},
expectError: false,
queueKey: "cluster1",
},
{
name: "terminating cluster should be skipped",
cluster: &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
DeletionTimestamp: &metav1.Time{Time: time.Now()},
},
Status: clusterv1.ManagedClusterStatus{
ManagedNamespaces: []clusterv1.ClusterSetManagedNamespaceConfig{
{ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{Name: "namespace1"}, ClusterSet: "set1"},
},
},
},
clusterSets: []runtime.Object{},
expectError: false,
queueKey: "cluster1",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var objects []runtime.Object
if c.cluster != nil {
objects = append(objects, c.cluster)
}
objects = append(objects, c.clusterSets...)
clusterClient := clusterfake.NewSimpleClientset(objects...)
clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10)
clusterInformer := clusterInformerFactory.Cluster().V1().ManagedClusters()
clusterSetInformer := clusterInformerFactory.Cluster().V1beta2().ManagedClusterSets()
if c.cluster != nil {
if err := clusterInformer.Informer().GetStore().Add(c.cluster); err != nil {
t.Fatal(err)
}
}
for _, obj := range c.clusterSets {
if err := clusterSetInformer.Informer().GetStore().Add(obj); err != nil {
t.Fatal(err)
}
}
controller := &managedNamespaceController{
clusterPatcher: patcher.NewPatcher[
*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus](
clusterClient.ClusterV1().ManagedClusters()),
clusterLister: clusterInformer.Lister(),
clusterSetLister: clusterSetInformer.Lister(),
eventRecorder: eventstesting.NewTestingEventRecorder(t),
}
// Create a fake sync context
syncCtx := testingcommon.NewFakeSyncContext(t, c.queueKey)
err := controller.sync(context.TODO(), syncCtx)
if c.expectError && err == nil {
t.Errorf("expected error but got none")
}
if !c.expectError && err != nil {
t.Errorf("unexpected error: %v", err)
}
})
}
}
func newManagedCluster(name string, labels map[string]string) *clusterv1.ManagedCluster {
return &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
}
}
func newManagedClusterWithNamespaces(name string, labels map[string]string, managedNS []clusterv1.ClusterSetManagedNamespaceConfig) *clusterv1.ManagedCluster {
return &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Status: clusterv1.ManagedClusterStatus{
ManagedNamespaces: managedNS,
},
}
}
func newManagedClusterSet(name string, namespaces []clusterv1.ManagedNamespaceConfig) *clusterv1beta2.ManagedClusterSet {
return &clusterv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: clusterv1beta2.ManagedClusterSetSpec{
ClusterSelector: clusterv1beta2.ManagedClusterSelector{
SelectorType: clusterv1beta2.ExclusiveClusterSetLabel,
},
ManagedNamespaces: namespaces,
},
}
}
func newManagedClusterSetWithLabelSelector(name string, namespaces []clusterv1.ManagedNamespaceConfig, labelSelector map[string]string) *clusterv1beta2.ManagedClusterSet {
return &clusterv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: clusterv1beta2.ManagedClusterSetSpec{
ClusterSelector: clusterv1beta2.ManagedClusterSelector{
SelectorType: clusterv1beta2.LabelSelector,
LabelSelector: &metav1.LabelSelector{
MatchLabels: labelSelector,
},
},
ManagedNamespaces: namespaces,
},
}
}

View File

@@ -102,9 +102,10 @@ func (c *defaultManagedClusterSetController) syncDefaultClusterSet(ctx context.C
return nil
}
// if defaultClusterSet.Spec is changed, rollback the change by update it to the original value.
if !equality.Semantic.DeepEqual(defaultClusterSet.Spec, DefaultManagedClusterSet.Spec) {
defaultClusterSet.Spec = DefaultManagedClusterSet.Spec
// if defaultClusterSet.Spec.ClusterSelector is changed, rollback the change.
// All fields except Spec.ClusterSelector are editable.
if !equality.Semantic.DeepEqual(defaultClusterSet.Spec.ClusterSelector, DefaultManagedClusterSet.Spec.ClusterSelector) {
defaultClusterSet.Spec.ClusterSelector = DefaultManagedClusterSet.Spec.ClusterSelector
_, err := c.clusterSetClient.ManagedClusterSets().Update(ctx, defaultClusterSet, metav1.UpdateOptions{})
if err != nil {

View File

@@ -13,6 +13,7 @@ import (
clusterfake "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
clusterv1 "open-cluster-management.io/api/cluster/v1"
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
@@ -40,15 +41,15 @@ func TestSyncDefaultClusterSet(t *testing.T) {
},
},
{
name: "sync edited default cluster set",
name: "sync edited cluster selector in default cluster set",
existingClusterSet: newDefaultManagedClusterSet(DefaultManagedClusterSetName, editedDefaultManagedClusterSetSpec, false),
validateActions: func(t *testing.T, actions []clienttesting.Action) {
testingcommon.AssertActions(t, actions, "update")
clusterset := actions[0].(clienttesting.UpdateAction).GetObject().(*clusterv1beta2.ManagedClusterSet)
// if spec not rollbacked, error
if !equality.Semantic.DeepEqual(clusterset.Spec, DefaultManagedClusterSet.Spec) {
t.Errorf("Failed to rollback default managed cluster set spec after it is edited")
// if cluster selector not rollbacked, error
if !equality.Semantic.DeepEqual(clusterset.Spec.ClusterSelector, DefaultManagedClusterSet.Spec.ClusterSelector) {
t.Errorf("Failed to rollback default managed cluster set cluster selector after it is edited")
}
},
},
@@ -78,6 +79,30 @@ func TestSyncDefaultClusterSet(t *testing.T) {
testingcommon.AssertNoActions(t, actions)
},
},
{
name: "sync default cluster set with edited managed namespaces - no rollback",
existingClusterSet: newDefaultManagedClusterSetWithManagedNamespaces(DefaultManagedClusterSetName, DefaultManagedClusterSet.Spec.ClusterSelector, false),
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// Since only ClusterSelector is protected, editing ManagedNamespaces should not trigger rollback
testingcommon.AssertNoActions(t, actions)
},
},
{
name: "sync default cluster set with edited cluster selector and managed namespaces - only cluster selector rollback",
existingClusterSet: newDefaultManagedClusterSetWithManagedNamespacesAndEditedSelector(DefaultManagedClusterSetName, false),
validateActions: func(t *testing.T, actions []clienttesting.Action) {
testingcommon.AssertActions(t, actions, "update")
clusterset := actions[0].(clienttesting.UpdateAction).GetObject().(*clusterv1beta2.ManagedClusterSet)
// Only cluster selector should be rollbacked, managed namespaces should remain
if !equality.Semantic.DeepEqual(clusterset.Spec.ClusterSelector, DefaultManagedClusterSet.Spec.ClusterSelector) {
t.Errorf("Failed to rollback default managed cluster set cluster selector")
}
// Managed namespaces should be preserved
if len(clusterset.Spec.ManagedNamespaces) == 0 {
t.Errorf("ManagedNamespaces should be preserved during cluster selector rollback")
}
},
},
}
for _, c := range cases {
@@ -147,3 +172,60 @@ func newDefaultManagedClusterSetWithAnnotation(
return clusterSet
}
func newDefaultManagedClusterSetWithManagedNamespaces(
name string, clusterSelector clusterv1beta2.ManagedClusterSelector, terminating bool) *clusterv1beta2.ManagedClusterSet {
spec := clusterv1beta2.ManagedClusterSetSpec{
ClusterSelector: clusterSelector,
ManagedNamespaces: []clusterv1.ManagedNamespaceConfig{
{
Name: "test-namespace-1",
},
{
Name: "test-namespace-2",
},
},
}
clusterSet := &clusterv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: spec,
}
if terminating {
now := metav1.Now()
clusterSet.DeletionTimestamp = &now
}
return clusterSet
}
func newDefaultManagedClusterSetWithManagedNamespacesAndEditedSelector(
name string, terminating bool) *clusterv1beta2.ManagedClusterSet {
editedSelector := clusterv1beta2.ManagedClusterSelector{
SelectorType: "non-LegacyClusterSetLabel",
}
spec := clusterv1beta2.ManagedClusterSetSpec{
ClusterSelector: editedSelector,
ManagedNamespaces: []clusterv1.ManagedNamespaceConfig{
{
Name: "test-namespace-1",
},
{
Name: "test-namespace-2",
},
},
}
clusterSet := &clusterv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: spec,
}
if terminating {
now := metav1.Now()
clusterSet.DeletionTimestamp = &now
}
return clusterSet
}

View File

@@ -102,9 +102,10 @@ func (c *globalManagedClusterSetController) applyGlobalClusterSet(ctx context.Co
return nil
}
// if globalClusterSet.Spec is changed, rollback the change by update it to the original value.
if !equality.Semantic.DeepEqual(globalClusterSet.Spec, GlobalManagedClusterSet.Spec) {
globalClusterSet.Spec = GlobalManagedClusterSet.Spec
// if globalClusterSet.Spec.ClusterSelector is changed, rollback the change.
// Fields except Spec.ClusterSelector are editable.
if !equality.Semantic.DeepEqual(globalClusterSet.Spec.ClusterSelector, GlobalManagedClusterSet.Spec.ClusterSelector) {
globalClusterSet.Spec.ClusterSelector = GlobalManagedClusterSet.Spec.ClusterSelector
_, err := c.clusterSetClient.ManagedClusterSets().Update(ctx, globalClusterSet, metav1.UpdateOptions{})
if err != nil {

View File

@@ -13,6 +13,7 @@ import (
clusterfake "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
clusterv1 "open-cluster-management.io/api/cluster/v1"
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
@@ -40,15 +41,15 @@ func TestSyncGlobalClusterSet(t *testing.T) {
},
},
{
name: "sync edited global cluster set",
name: "sync edited cluster selector in global cluster set",
existingClusterSet: newGlobalManagedClusterSet(GlobalManagedClusterSetName, editedGlobalManagedClusterSetSpec, false),
validateActions: func(t *testing.T, actions []clienttesting.Action) {
testingcommon.AssertActions(t, actions, "update")
clusterset := actions[0].(clienttesting.UpdateAction).GetObject().(*clusterv1beta2.ManagedClusterSet)
// if spec not rollbacked, error
if !equality.Semantic.DeepEqual(clusterset.Spec, GlobalManagedClusterSet.Spec) {
t.Errorf("Failed to rollback global managed cluster set spec after it is edited")
// if cluster selector not rollbacked, error
if !equality.Semantic.DeepEqual(clusterset.Spec.ClusterSelector, GlobalManagedClusterSet.Spec.ClusterSelector) {
t.Errorf("Failed to rollback global managed cluster set cluster selector after it is edited")
}
},
},
@@ -71,6 +72,30 @@ func TestSyncGlobalClusterSet(t *testing.T) {
testingcommon.AssertNoActions(t, actions)
},
},
{
name: "sync global cluster set with edited managed namespaces - no rollback",
existingClusterSet: newGlobalManagedClusterSetWithManagedNamespaces(GlobalManagedClusterSetName, GlobalManagedClusterSet.Spec.ClusterSelector, false),
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// Since only ClusterSelector is protected, editing ManagedNamespaces should not trigger rollback
testingcommon.AssertNoActions(t, actions)
},
},
{
name: "sync global cluster set with edited cluster selector and managed namespaces - only cluster selector rollback",
existingClusterSet: newGlobalManagedClusterSetWithManagedNamespacesAndEditedSelector(GlobalManagedClusterSetName, false),
validateActions: func(t *testing.T, actions []clienttesting.Action) {
testingcommon.AssertActions(t, actions, "update")
clusterset := actions[0].(clienttesting.UpdateAction).GetObject().(*clusterv1beta2.ManagedClusterSet)
// Only cluster selector should be rollbacked, managed namespaces should remain
if !equality.Semantic.DeepEqual(clusterset.Spec.ClusterSelector, GlobalManagedClusterSet.Spec.ClusterSelector) {
t.Errorf("Failed to rollback global managed cluster set cluster selector")
}
// Managed namespaces should be preserved
if len(clusterset.Spec.ManagedNamespaces) == 0 {
t.Errorf("ManagedNamespaces should be preserved during cluster selector rollback")
}
},
},
}
for _, c := range cases {
@@ -139,3 +164,60 @@ func newGlobalManagedClusterSetWithAnnotation(
return clusterSet
}
func newGlobalManagedClusterSetWithManagedNamespaces(
name string, clusterSelector clusterv1beta2.ManagedClusterSelector, terminating bool) *clusterv1beta2.ManagedClusterSet {
spec := clusterv1beta2.ManagedClusterSetSpec{
ClusterSelector: clusterSelector,
ManagedNamespaces: []clusterv1.ManagedNamespaceConfig{
{
Name: "test-namespace-1",
},
{
Name: "test-namespace-2",
},
},
}
clusterSet := &clusterv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: spec,
}
if terminating {
now := metav1.Now()
clusterSet.DeletionTimestamp = &now
}
return clusterSet
}
func newGlobalManagedClusterSetWithManagedNamespacesAndEditedSelector(
name string, terminating bool) *clusterv1beta2.ManagedClusterSet {
editedSelector := clusterv1beta2.ManagedClusterSelector{
SelectorType: "non-LegacyClusterSetLabel",
}
spec := clusterv1beta2.ManagedClusterSetSpec{
ClusterSelector: editedSelector,
ManagedNamespaces: []clusterv1.ManagedNamespaceConfig{
{
Name: "test-namespace-1",
},
{
Name: "test-namespace-2",
},
},
}
clusterSet := &clusterv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: spec,
}
if terminating {
now := metav1.Now()
clusterSet.DeletionTimestamp = &now
}
return clusterSet
}

View File

@@ -276,6 +276,13 @@ func (m *HubManagerOptions) RunControllerManagerWithInformers(
controllerContext.EventRecorder,
)
managedNamespaceController := managedcluster.NewManagedNamespaceController(
clusterClient,
clusterInformers.Cluster().V1().ManagedClusters(),
clusterInformers.Cluster().V1beta2().ManagedClusterSets(),
controllerContext.EventRecorder,
)
managedClusterSetBindingController := managedclustersetbinding.NewManagedClusterSetBindingController(
clusterClient,
clusterInformers.Cluster().V1beta2().ManagedClusterSets(),
@@ -373,6 +380,7 @@ func (m *HubManagerOptions) RunControllerManagerWithInformers(
go leaseController.Run(ctx, 1)
go clockSyncController.Run(ctx, 1)
go managedClusterSetController.Run(ctx, 1)
go managedNamespaceController.Run(ctx, 1)
go managedClusterSetBindingController.Run(ctx, 1)
go clusterroleController.Run(ctx, 1)
go addOnHealthCheckController.Run(ctx, 1)

View File

@@ -206,8 +206,11 @@ func TestSync(t *testing.T) {
}
ctrl := newManagedClusterStatusController(
testinghelpers.TestManagedClusterName,
"test-hub-hash",
clusterClient,
kubefake.NewSimpleClientset(),
clusterInformerFactory.Cluster().V1().ManagedClusters(),
kubeInformerFactory.Core().V1().Namespaces(),
discoveryClient,
clusterInformerFactory.Cluster().V1alpha1().ClusterClaims(),
clusterPropertyInformerFactory.About().V1alpha1().ClusterProperties(),
@@ -582,8 +585,11 @@ func TestExposeClaims(t *testing.T) {
}
ctrl := newManagedClusterStatusController(
testinghelpers.TestManagedClusterName,
"test-hub-hash",
clusterClient,
kubefake.NewSimpleClientset(),
clusterInformerFactory.Cluster().V1().ManagedClusters(),
kubeInformerFactory.Core().V1().Namespaces(),
discoveryClient,
clusterInformerFactory.Cluster().V1alpha1().ClusterClaims(),
clusterPropertyInformerFactory.About().V1alpha1().ClusterProperties(),

View File

@@ -10,7 +10,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kubeinformers "k8s.io/client-go/informers"
fakekube "k8s.io/client-go/kubernetes/fake"
kubefake "k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
aboutclusterfake "sigs.k8s.io/about-api/pkg/generated/clientset/versioned/fake"
@@ -85,7 +84,7 @@ func TestSyncManagedCluster(t *testing.T) {
}
}
fakeHubClient := fakekube.NewSimpleClientset()
fakeHubClient := kubefake.NewSimpleClientset()
ctx := context.TODO()
hubEventRecorder, err := helpers.NewEventRecorder(ctx,
clusterscheme.Scheme, fakeHubClient.EventsV1(), "test")
@@ -94,8 +93,11 @@ func TestSyncManagedCluster(t *testing.T) {
}
ctrl := newManagedClusterStatusController(
testinghelpers.TestManagedClusterName,
"test-hub-hash",
clusterClient,
kubefake.NewSimpleClientset(),
clusterInformerFactory.Cluster().V1().ManagedClusters(),
kubeInformerFactory.Core().V1().Namespaces(),
discoveryClient,
clusterInformerFactory.Cluster().V1alpha1().ClusterClaims(),
clusterPropertyInformerFactory.About().V1alpha1().ClusterProperties(),

View File

@@ -0,0 +1,221 @@
package managedcluster
import (
"context"
"fmt"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
clusterv1 "open-cluster-management.io/api/cluster/v1"
)
const (
// ClusterSetLabelPrefix for hub-specific clusterset labels using hub hash
ClusterSetLabelPrefix = "clusterset.open-cluster-management.io/"
// MaxLabelNameLength is the maximum length for the name part of Kubernetes labels (after prefix/)
// This ensures compliance with Kubernetes label naming rules for prefixed labels
MaxLabelNameLength = 63
// Condition types for managed namespaces
ConditionNamespaceAvailable = "NamespaceAvailable"
// Condition reasons
ReasonNamespaceApplied = "NamespaceApplied"
ReasonNamespaceApplyFail = "NamespaceApplyFailed"
)
// GetHubClusterSetLabel returns the hub-specific cluster set label for the given hub hash.
// It handles truncation if the hub hash exceeds the maximum label name length.
func GetHubClusterSetLabel(hubHash string) string {
truncatedHubHash := hubHash
if len(hubHash) > MaxLabelNameLength {
truncatedHubHash = hubHash[:MaxLabelNameLength]
}
return ClusterSetLabelPrefix + truncatedHubHash
}
// managedNamespaceReconcile manages namespaces on the spoke cluster based on
// the managed namespace configuration from a hub cluster
type managedNamespaceReconcile struct {
// This label stores the hub hash and is added to a managed namespace with value 'true'.
// When the namespace becomes unmanaged or the cluster is detached from the hub, the value
// is set to 'false'. In some edge cases (e.g., when the agent switches to a new hub via
// bootstrap kubeconfig update), the value may remain 'true', which could lead to cleanup
// issues if managed namespace deletion is supported in the future.
hubClusterSetLabel string
spokeKubeClient kubernetes.Interface
spokeNamespaceLister corev1listers.NamespaceLister
eventRecorder events.Recorder
}
// reconcile implements the statusReconcile interface for managed namespace management
func (r *managedNamespaceReconcile) reconcile(ctx context.Context, cluster *clusterv1.ManagedCluster) (*clusterv1.ManagedCluster, reconcileState, error) {
logger := klog.FromContext(ctx)
logger.V(4).Info("Reconciling managed namespaces", "clusterName", cluster.Name)
// Skip if cluster is being deleted
if !cluster.DeletionTimestamp.IsZero() {
logger.V(4).Info("ManagedCluster is being deleted, cleanup all managed namespaces")
if err := r.cleanupPreviouslyManagedNamespaces(ctx, sets.Set[string]{}); err != nil {
logger.Error(err, "Failed to cleanup previously managed namespaces")
return cluster, reconcileContinue, err
}
return cluster, reconcileContinue, nil
}
// Skip if no managed namespaces are configured
if len(cluster.Status.ManagedNamespaces) == 0 {
logger.V(4).Info("No managed namespaces configured")
// Still need to cleanup previously managed namespaces
if err := r.cleanupPreviouslyManagedNamespaces(ctx, sets.Set[string]{}); err != nil {
logger.Error(err, "Failed to cleanup previously managed namespaces")
return cluster, reconcileContinue, err
}
return cluster, reconcileContinue, nil
}
updatedCluster := cluster.DeepCopy()
var allErrors []error
// Get current managed namespace names for quick lookup
currentManagedNS := sets.Set[string]{}
for _, managedNS := range updatedCluster.Status.ManagedNamespaces {
currentManagedNS.Insert(managedNS.Name)
}
// Process each managed namespace from cluster status
for i, managedNS := range updatedCluster.Status.ManagedNamespaces {
nsName := managedNS.Name
clusterSetName := managedNS.ClusterSet
logger.V(4).Info("Processing managed namespace", "namespace", nsName, "clusterSet", clusterSetName)
// Create or update the namespace
if err := r.createOrUpdateNamespace(ctx, nsName, clusterSetName); err != nil {
// Update condition: Failed
condition := metav1.Condition{
Type: ConditionNamespaceAvailable,
Status: metav1.ConditionFalse,
Reason: ReasonNamespaceApplyFail,
Message: fmt.Sprintf("Failed to apply namespace: %v", err),
LastTransitionTime: metav1.Now(),
}
meta.SetStatusCondition(&updatedCluster.Status.ManagedNamespaces[i].Conditions, condition)
logger.Error(err, "Failed to create managed namespace", "namespace", nsName, "clusterSet", clusterSetName)
allErrors = append(allErrors, fmt.Errorf("failed to create namespace %q: %w", nsName, err))
continue
}
// Update condition: Success
condition := metav1.Condition{
Type: ConditionNamespaceAvailable,
Status: metav1.ConditionTrue,
Reason: ReasonNamespaceApplied,
Message: "Namespace successfully applied and managed",
LastTransitionTime: metav1.Now(),
}
meta.SetStatusCondition(&updatedCluster.Status.ManagedNamespaces[i].Conditions, condition)
logger.V(4).Info("Successfully processed managed namespace", "namespace", nsName, "clusterSet", clusterSetName)
}
// Clean up previously managed namespaces by setting label to 'false'
// Keeps a record of which namespaces were previously managed by this hub
if err := r.cleanupPreviouslyManagedNamespaces(ctx, currentManagedNS); err != nil {
logger.Error(err, "Failed to cleanup previously managed namespaces")
allErrors = append(allErrors, fmt.Errorf("failed to cleanup previously managed namespaces: %w", err))
}
// Return aggregated errors from all operations
aggregatedErr := utilerrors.NewAggregate(allErrors)
return updatedCluster, reconcileContinue, aggregatedErr
}
// createOrUpdateNamespace creates or updates a namespace
func (r *managedNamespaceReconcile) createOrUpdateNamespace(ctx context.Context, nsName, clusterSetName string) error {
logger := klog.FromContext(ctx)
// Add hub-specific clusterset label using hub hash
labels := map[string]string{
r.hubClusterSetLabel: "true",
}
// Create the namespace object
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: nsName,
Labels: labels,
},
}
_, changed, err := resourceapply.ApplyNamespace(ctx, r.spokeKubeClient.CoreV1(), r.eventRecorder, namespace)
if err != nil {
return fmt.Errorf("failed to apply namespace %q: %w", nsName, err)
}
if changed {
r.eventRecorder.Eventf("NamespaceApplied", "Applied managed namespace %q for cluster set %q", nsName, clusterSetName)
logger.V(4).Info("Applied namespace", "namespace", nsName, "clusterSet", clusterSetName)
}
return nil
}
// cleanupPreviouslyManagedNamespaces sets the hub-specific label to 'false' for namespaces
// that are no longer in the current managed namespace list
func (r *managedNamespaceReconcile) cleanupPreviouslyManagedNamespaces(ctx context.Context, currentManagedNS sets.Set[string]) error {
logger := klog.FromContext(ctx)
// Get all namespaces with our hub-specific label
selector := labels.SelectorFromSet(labels.Set{r.hubClusterSetLabel: "true"})
namespaces, err := r.spokeNamespaceLister.List(selector)
if err != nil {
return fmt.Errorf("failed to list namespaces with label %s: %w", r.hubClusterSetLabel, err)
}
var allErrors []error
for _, ns := range namespaces {
// Skip if this namespace is still managed
if currentManagedNS.Has(ns.Name) {
continue
}
// Set label to 'false' for previously managed namespace
labels := map[string]string{
r.hubClusterSetLabel: "false",
}
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns.Name,
Labels: labels,
},
}
_, changed, err := resourceapply.ApplyNamespace(ctx, r.spokeKubeClient.CoreV1(), r.eventRecorder, namespace)
if err != nil {
logger.Error(err, "Failed to update previously managed namespace", "namespace", ns.Name)
allErrors = append(allErrors, fmt.Errorf("failed to update namespace %q: %w", ns.Name, err))
continue
}
if changed {
r.eventRecorder.Eventf("NamespaceUnmanaged", "Set namespace %q as no longer managed", ns.Name)
logger.V(4).Info("Updated previously managed namespace", "namespace", ns.Name)
}
}
return utilerrors.NewAggregate(allErrors)
}

View File

@@ -0,0 +1,522 @@
package managedcluster
import (
"context"
"testing"
"time"
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
kubeinformers "k8s.io/client-go/informers"
kubefake "k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
clusterv1 "open-cluster-management.io/api/cluster/v1"
testinghelpers "open-cluster-management.io/ocm/pkg/registration/helpers/testing"
)
func TestGetHubClusterSetLabel(t *testing.T) {
cases := []struct {
name string
hubHash string
expected string
}{
{
name: "normal hub hash",
hubHash: "abcd1234",
expected: "clusterset.open-cluster-management.io/abcd1234",
},
{
name: "empty hub hash",
hubHash: "",
expected: "clusterset.open-cluster-management.io/",
},
{
name: "long hub hash gets truncated",
hubHash: "this-is-a-very-long-hub-hash-that-exceeds-the-maximum-label-name-length-limit",
expected: "clusterset.open-cluster-management.io/this-is-a-very-long-hub-hash-that-exceeds-the-maximum-label-nam",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
result := GetHubClusterSetLabel(c.hubHash)
if result != c.expected {
t.Errorf("expected %q, got %q", c.expected, result)
}
})
}
}
func TestManagedNamespaceReconcile_reconcile(t *testing.T) {
testHubHash := "test-hub-hash"
testClusterSetLabel := GetHubClusterSetLabel(testHubHash)
cases := []struct {
name string
cluster *clusterv1.ManagedCluster
existingNamespaces []runtime.Object
validateActions func(t *testing.T, actions []clienttesting.Action)
validateClusterStatus func(t *testing.T, cluster *clusterv1.ManagedCluster)
expectedReconcileState reconcileState
expectedErr string
}{
{
name: "cluster being deleted should cleanup all namespaces",
cluster: testinghelpers.NewDeletingManagedCluster(),
existingNamespaces: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-namespace",
Labels: map[string]string{
testClusterSetLabel: "true",
},
},
},
},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// resourceapply.ApplyNamespace performs GET then CREATE/UPDATE for each namespace
if len(actions) != 2 {
t.Errorf("expected 2 actions (get + update), got %d", len(actions))
return
}
if actions[0].GetVerb() != "get" {
t.Errorf("expected first action to be get, got %s", actions[0].GetVerb())
}
if actions[1].GetVerb() != "update" {
t.Errorf("expected second action to be update, got %s", actions[1].GetVerb())
}
},
expectedReconcileState: reconcileContinue,
},
{
name: "no managed namespaces should cleanup previously managed ones",
cluster: testinghelpers.NewJoinedManagedCluster(),
existingNamespaces: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "old-managed-namespace",
Labels: map[string]string{
testClusterSetLabel: "true",
},
},
},
},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// resourceapply.ApplyNamespace performs GET then CREATE/UPDATE for each namespace
if len(actions) != 2 {
t.Errorf("expected 2 actions (get + update), got %d", len(actions))
return
}
if actions[0].GetVerb() != "get" {
t.Errorf("expected first action to be get, got %s", actions[0].GetVerb())
}
if actions[1].GetVerb() != "update" {
t.Errorf("expected second action to be update, got %s", actions[1].GetVerb())
}
},
expectedReconcileState: reconcileContinue,
},
{
name: "create new managed namespaces",
cluster: newManagedClusterWithManagedNamespaces([]clusterv1.ClusterSetManagedNamespaceConfig{
{
ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{
Name: "test-namespace-1",
},
ClusterSet: "clusterset-1",
},
{
ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{
Name: "test-namespace-2",
},
ClusterSet: "clusterset-2",
},
}),
existingNamespaces: []runtime.Object{},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// resourceapply.ApplyNamespace performs GET then CREATE for each new namespace (2 namespaces = 4 actions)
if len(actions) != 4 {
t.Errorf("expected 4 actions (2 * (get + create)), got %d", len(actions))
return
}
// Check that we have alternating get and create actions
for i := 0; i < len(actions); i += 2 {
if actions[i].GetVerb() != "get" {
t.Errorf("expected action %d to be get, got %s", i, actions[i].GetVerb())
}
if i+1 < len(actions) && actions[i+1].GetVerb() != "create" {
t.Errorf("expected action %d to be create, got %s", i+1, actions[i+1].GetVerb())
}
}
},
validateClusterStatus: func(t *testing.T, cluster *clusterv1.ManagedCluster) {
if len(cluster.Status.ManagedNamespaces) != 2 {
t.Errorf("expected 2 managed namespaces, got %d", len(cluster.Status.ManagedNamespaces))
return
}
for _, managedNS := range cluster.Status.ManagedNamespaces {
if len(managedNS.Conditions) != 1 {
t.Errorf("expected 1 condition for namespace %s, got %d", managedNS.Name, len(managedNS.Conditions))
continue
}
condition := managedNS.Conditions[0]
if condition.Type != ConditionNamespaceAvailable {
t.Errorf("expected condition type %s, got %s", ConditionNamespaceAvailable, condition.Type)
}
if condition.Status != metav1.ConditionTrue {
t.Errorf("expected condition status True, got %s", condition.Status)
}
if condition.Reason != ReasonNamespaceApplied {
t.Errorf("expected reason %s, got %s", ReasonNamespaceApplied, condition.Reason)
}
}
},
expectedReconcileState: reconcileContinue,
},
{
name: "update existing managed namespaces and cleanup old ones",
cluster: newManagedClusterWithManagedNamespaces([]clusterv1.ClusterSetManagedNamespaceConfig{
{
ManagedNamespaceConfig: clusterv1.ManagedNamespaceConfig{
Name: "test-namespace-1",
},
ClusterSet: "clusterset-1",
},
}),
existingNamespaces: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-namespace-1",
Labels: map[string]string{
"other-label": "value", // Different labels to ensure update is needed
},
},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "old-namespace",
Labels: map[string]string{
testClusterSetLabel: "true",
},
},
},
},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// Should have 4 actions: (get + update) for existing namespace, (get + update) for cleaning up old namespace
if len(actions) != 4 {
t.Errorf("expected 4 actions (2 * (get + update)), got %d", len(actions))
return
}
// Check that we have alternating get and update actions
for i := 0; i < len(actions); i += 2 {
if actions[i].GetVerb() != "get" {
t.Errorf("expected action %d to be get, got %s", i, actions[i].GetVerb())
}
if i+1 < len(actions) && actions[i+1].GetVerb() != "update" {
t.Errorf("expected action %d to be update, got %s", i+1, actions[i+1].GetVerb())
}
}
},
expectedReconcileState: reconcileContinue,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
// Create fake clients
spokeKubeClient := kubefake.NewSimpleClientset(c.existingNamespaces...)
spokeKubeInformerFactory := kubeinformers.NewSharedInformerFactory(spokeKubeClient, time.Minute*10)
// Add existing namespaces to informer store
namespaceStore := spokeKubeInformerFactory.Core().V1().Namespaces().Informer().GetStore()
for _, obj := range c.existingNamespaces {
if ns, ok := obj.(*corev1.Namespace); ok {
if err := namespaceStore.Add(ns); err != nil {
t.Fatal(err)
}
}
}
// Create reconciler
reconciler := &managedNamespaceReconcile{
hubClusterSetLabel: GetHubClusterSetLabel(testHubHash),
spokeKubeClient: spokeKubeClient,
spokeNamespaceLister: spokeKubeInformerFactory.Core().V1().Namespaces().Lister(),
eventRecorder: eventstesting.NewTestingEventRecorder(t),
}
// Run reconcile
ctx := context.TODO()
updatedCluster, state, err := reconciler.reconcile(ctx, c.cluster)
// Validate error
if c.expectedErr == "" && err != nil {
t.Errorf("unexpected error: %v", err)
}
if c.expectedErr != "" && (err == nil || err.Error() != c.expectedErr) {
t.Errorf("expected error %q, got %v", c.expectedErr, err)
}
// Validate reconcile state
if state != c.expectedReconcileState {
t.Errorf("expected reconcile state %v, got %v", c.expectedReconcileState, state)
}
// Validate actions
if c.validateActions != nil {
c.validateActions(t, spokeKubeClient.Actions())
}
// Validate cluster status
if c.validateClusterStatus != nil {
c.validateClusterStatus(t, updatedCluster)
}
})
}
}
func TestManagedNamespaceReconcile_createOrUpdateNamespace(t *testing.T) {
testHubHash := "test-hub-hash"
testClusterSetLabel := GetHubClusterSetLabel(testHubHash)
cases := []struct {
name string
nsName string
clusterSetName string
existingNamespaces []runtime.Object
validateActions func(t *testing.T, actions []clienttesting.Action)
expectedErr string
}{
{
name: "create new namespace",
nsName: "test-namespace",
clusterSetName: "test-clusterset",
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// resourceapply.ApplyNamespace performs GET then CREATE for new namespace
if len(actions) != 2 {
t.Errorf("expected 2 actions (get + create), got %d", len(actions))
return
}
if actions[0].GetVerb() != "get" {
t.Errorf("expected first action to be get, got %s", actions[0].GetVerb())
}
if actions[1].GetVerb() != "create" {
t.Errorf("expected second action to be create, got %s", actions[1].GetVerb())
}
if actions[0].GetResource().Resource != "namespaces" {
t.Errorf("expected namespaces resource, got %s", actions[0].GetResource().Resource)
}
},
},
{
name: "update existing namespace",
nsName: "existing-namespace",
clusterSetName: "test-clusterset",
existingNamespaces: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "existing-namespace",
Labels: map[string]string{
"other-label": "value",
},
},
},
},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// resourceapply.ApplyNamespace performs GET then UPDATE for existing namespace
if len(actions) != 2 {
t.Errorf("expected 2 actions (get + update), got %d", len(actions))
return
}
if actions[0].GetVerb() != "get" {
t.Errorf("expected first action to be get, got %s", actions[0].GetVerb())
}
if actions[1].GetVerb() != "update" {
t.Errorf("expected second action to be update, got %s", actions[1].GetVerb())
}
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
// Create fake client
spokeKubeClient := kubefake.NewSimpleClientset(c.existingNamespaces...)
// Create reconciler
reconciler := &managedNamespaceReconcile{
hubClusterSetLabel: testClusterSetLabel,
spokeKubeClient: spokeKubeClient,
eventRecorder: eventstesting.NewTestingEventRecorder(t),
}
// Run createOrUpdateNamespace
ctx := context.TODO()
err := reconciler.createOrUpdateNamespace(ctx, c.nsName, c.clusterSetName)
// Validate error
if c.expectedErr == "" && err != nil {
t.Errorf("unexpected error: %v", err)
}
if c.expectedErr != "" && (err == nil || err.Error() != c.expectedErr) {
t.Errorf("expected error %q, got %v", c.expectedErr, err)
}
// Validate actions
if c.validateActions != nil {
c.validateActions(t, spokeKubeClient.Actions())
}
})
}
}
func TestManagedNamespaceReconcile_cleanupPreviouslyManagedNamespaces(t *testing.T) {
testHubHash := "test-hub-hash"
testClusterSetLabel := GetHubClusterSetLabel(testHubHash)
cases := []struct {
name string
currentManagedNS sets.Set[string]
existingNamespaces []runtime.Object
validateActions func(t *testing.T, actions []clienttesting.Action)
expectedErr string
}{
{
name: "no existing namespaces",
currentManagedNS: sets.Set[string]{},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 0 {
t.Errorf("expected 0 actions, got %d", len(actions))
}
},
},
{
name: "cleanup unmanaged namespaces",
currentManagedNS: sets.New("keep-namespace"),
existingNamespaces: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "keep-namespace",
Labels: map[string]string{
testClusterSetLabel: "true",
},
},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "cleanup-namespace",
Labels: map[string]string{
testClusterSetLabel: "true",
},
},
},
},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// Should only cleanup the namespace that's no longer managed (get + update)
if len(actions) != 2 {
t.Errorf("expected 2 actions (get + update), got %d", len(actions))
return
}
if actions[0].GetVerb() != "get" {
t.Errorf("expected first action to be get, got %s", actions[0].GetVerb())
}
if actions[1].GetVerb() != "update" {
t.Errorf("expected second action to be update, got %s", actions[1].GetVerb())
}
},
},
{
name: "cleanup all namespaces when none are managed",
currentManagedNS: sets.Set[string]{},
existingNamespaces: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "cleanup-namespace-1",
Labels: map[string]string{
testClusterSetLabel: "true",
},
},
},
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "cleanup-namespace-2",
Labels: map[string]string{
testClusterSetLabel: "true",
},
},
},
},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// Should cleanup both namespaces (2 * (get + update) = 4 actions)
if len(actions) != 4 {
t.Errorf("expected 4 actions (2 * (get + update)), got %d", len(actions))
return
}
// Check that we have alternating get and update actions
for i := 0; i < len(actions); i += 2 {
if actions[i].GetVerb() != "get" {
t.Errorf("expected action %d to be get, got %s", i, actions[i].GetVerb())
}
if i+1 < len(actions) && actions[i+1].GetVerb() != "update" {
t.Errorf("expected action %d to be update, got %s", i+1, actions[i+1].GetVerb())
}
}
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
// Create fake client and informer
spokeKubeClient := kubefake.NewSimpleClientset(c.existingNamespaces...)
spokeKubeInformerFactory := kubeinformers.NewSharedInformerFactory(spokeKubeClient, time.Minute*10)
// Add existing namespaces to informer store
namespaceStore := spokeKubeInformerFactory.Core().V1().Namespaces().Informer().GetStore()
for _, obj := range c.existingNamespaces {
if ns, ok := obj.(*corev1.Namespace); ok {
if err := namespaceStore.Add(ns); err != nil {
t.Fatal(err)
}
}
}
// Create reconciler
reconciler := &managedNamespaceReconcile{
hubClusterSetLabel: testClusterSetLabel,
spokeKubeClient: spokeKubeClient,
spokeNamespaceLister: spokeKubeInformerFactory.Core().V1().Namespaces().Lister(),
eventRecorder: eventstesting.NewTestingEventRecorder(t),
}
// Run cleanupPreviouslyManagedNamespaces
ctx := context.TODO()
err := reconciler.cleanupPreviouslyManagedNamespaces(ctx, c.currentManagedNS)
// Validate error
if c.expectedErr == "" && err != nil {
t.Errorf("unexpected error: %v", err)
}
if c.expectedErr != "" && (err == nil || err.Error() != c.expectedErr) {
t.Errorf("expected error %q, got %v", c.expectedErr, err)
}
// Validate actions
if c.validateActions != nil {
c.validateActions(t, spokeKubeClient.Actions())
}
})
}
}
// Helper function to create a ManagedCluster with managed namespaces
func newManagedClusterWithManagedNamespaces(managedNamespaces []clusterv1.ClusterSetManagedNamespaceConfig) *clusterv1.ManagedCluster {
cluster := testinghelpers.NewJoinedManagedCluster()
cluster.Status.ManagedNamespaces = managedNamespaces
return cluster
}

View File

@@ -15,7 +15,6 @@ import (
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/discovery"
kubeinformers "k8s.io/client-go/informers"
fakekube "k8s.io/client-go/kubernetes/fake"
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
clienttesting "k8s.io/client-go/testing"
@@ -318,7 +317,7 @@ func TestHealthCheck(t *testing.T) {
serverResponse.httpStatus = c.httpStatus
serverResponse.responseMsg = c.responseMsg
fakeHubClient := fakekube.NewSimpleClientset()
fakeHubClient := kubefake.NewSimpleClientset()
ctx := context.TODO()
hubEventRecorder, err := helpers.NewEventRecorder(ctx,
@@ -328,8 +327,11 @@ func TestHealthCheck(t *testing.T) {
}
ctrl := newManagedClusterStatusController(
testinghelpers.TestManagedClusterName,
"test-hub-hash",
clusterClient,
kubefake.NewSimpleClientset(),
clusterInformerFactory.Cluster().V1().ManagedClusters(),
kubeInformerFactory.Core().V1().Namespaces(),
discoveryClient,
clusterInformerFactory.Cluster().V1alpha1().ClusterClaims(),
clusterPropertyInformerFactory.About().V1alpha1().ClusterProperties(),

View File

@@ -13,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/discovery"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
kevents "k8s.io/client-go/tools/events"
aboutv1alpha1informer "sigs.k8s.io/about-api/pkg/generated/informers/externalversions/apis/v1alpha1"
@@ -52,8 +53,11 @@ const (
// NewManagedClusterStatusController creates a managed cluster status controller on managed cluster.
func NewManagedClusterStatusController(
clusterName string,
hubHash string,
hubClusterClient clientset.Interface,
spokeKubeClient kubernetes.Interface,
hubClusterInformer clusterv1informer.ManagedClusterInformer,
spokeNamespaceInformer corev1informers.NamespaceInformer,
managedClusterDiscoveryClient discovery.DiscoveryInterface,
claimInformer clusterv1alpha1informer.ClusterClaimInformer,
propertyInformer aboutv1alpha1informer.ClusterPropertyInformer,
@@ -65,8 +69,11 @@ func NewManagedClusterStatusController(
hubEventRecorder kevents.EventRecorder) factory.Controller {
c := newManagedClusterStatusController(
clusterName,
hubHash,
hubClusterClient,
spokeKubeClient,
hubClusterInformer,
spokeNamespaceInformer,
managedClusterDiscoveryClient,
claimInformer,
propertyInformer,
@@ -78,7 +85,7 @@ func NewManagedClusterStatusController(
)
controllerFactory := factory.New().
WithInformers(hubClusterInformer.Informer(), nodeInformer.Informer()).
WithInformers(hubClusterInformer.Informer(), nodeInformer.Informer(), spokeNamespaceInformer.Informer()).
WithSync(c.sync).ResyncEvery(resyncInterval)
if features.SpokeMutableFeatureGate.Enabled(ocmfeature.ClusterClaim) {
@@ -93,8 +100,11 @@ func NewManagedClusterStatusController(
func newManagedClusterStatusController(
clusterName string,
hubHash string,
hubClusterClient clientset.Interface,
spokeKubeClient kubernetes.Interface,
hubClusterInformer clusterv1informer.ManagedClusterInformer,
spokeNamespaceInformer corev1informers.NamespaceInformer,
managedClusterDiscoveryClient discovery.DiscoveryInterface,
claimInformer clusterv1alpha1informer.ClusterClaimInformer,
propertyInformer aboutv1alpha1informer.ClusterPropertyInformer,
@@ -116,6 +126,12 @@ func newManagedClusterStatusController(
reservedClusterClaimSuffixes: reservedClusterClaimSuffixes,
aboutLister: propertyInformer.Lister(),
},
&managedNamespaceReconcile{
hubClusterSetLabel: GetHubClusterSetLabel(hubHash),
spokeKubeClient: spokeKubeClient,
spokeNamespaceLister: spokeNamespaceInformer.Lister(),
eventRecorder: recorder,
},
},
hubClusterLister: hubClusterInformer.Lister(),
hubEventRecorder: hubEventRecorder,

View File

@@ -2,6 +2,7 @@ package spoke
import (
"context"
"crypto/sha256"
"fmt"
"os"
"time"
@@ -9,11 +10,14 @@ import (
"github.com/openshift/library-go/pkg/controller/controllercmd"
"github.com/openshift/library-go/pkg/controller/factory"
"github.com/openshift/library-go/pkg/operator/events"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
aboutclient "sigs.k8s.io/about-api/pkg/generated/clientset/versioned"
aboutinformers "sigs.k8s.io/about-api/pkg/generated/informers/externalversions"
@@ -358,11 +362,33 @@ func (o *SpokeAgentConfig) RunSpokeAgentWithSpokeInformers(ctx context.Context,
if err != nil {
return fmt.Errorf("failed to create event recorder: %w", err)
}
// get hub hash for namespace management
hubHash, err := o.getHubHash()
if err != nil {
return fmt.Errorf("failed to get hub hash: %w", err)
}
// create filtered namespace informer for hub-specific namespaces
hubClusterSetLabel := managedcluster.GetHubClusterSetLabel(hubHash)
labelSelector := labels.SelectorFromSet(labels.Set{hubClusterSetLabel: "true"})
filteredNamespaceInformerFactory := informers.NewSharedInformerFactoryWithOptions(
spokeKubeClient,
10*time.Minute,
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.LabelSelector = labelSelector.String()
}),
)
// create NewManagedClusterStatusController to update the spoke cluster status
// now includes managed namespace reconciler
managedClusterHealthCheckController := managedcluster.NewManagedClusterStatusController(
o.agentOptions.SpokeClusterName,
hubHash,
hubClient.ClusterClient,
spokeKubeClient,
hubClient.ClusterInformer,
filteredNamespaceInformerFactory.Core().V1().Namespaces(),
spokeKubeClient.Discovery(),
spokeClusterInformerFactory.Cluster().V1alpha1().ClusterClaims(),
aboutInformers.About().V1alpha1().ClusterProperties(),
@@ -437,6 +463,7 @@ func (o *SpokeAgentConfig) RunSpokeAgentWithSpokeInformers(ctx context.Context,
go hubClient.AddonInformer.Informer().Run(ctx.Done())
go spokeKubeInformerFactory.Start(ctx.Done())
go filteredNamespaceInformerFactory.Start(ctx.Done())
if features.SpokeMutableFeatureGate.Enabled(ocmfeature.ClusterClaim) {
go spokeClusterInformerFactory.Start(ctx.Done())
}
@@ -491,3 +518,11 @@ func (o *SpokeAgentConfig) getSpokeClusterCABundle(kubeConfig *rest.Config) ([]b
}
return data, nil
}
func (o *SpokeAgentConfig) getHubHash() (string, error) {
kubeConfig, err := clientcmd.BuildConfigFromFlags("", o.currentBootstrapKubeConfig)
if err != nil {
return "", fmt.Errorf("unable to load bootstrap kubeconfig: %w", err)
}
return fmt.Sprintf("%x", sha256.Sum256([]byte(kubeConfig.Host))), nil
}

View File

@@ -2,6 +2,7 @@ package e2e
import (
"context"
"crypto/sha256"
"flag"
"fmt"
"os"
@@ -14,6 +15,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
operatorapiv1 "open-cluster-management.io/api/operator/v1"
@@ -40,6 +42,9 @@ var (
// expected image tag for validation
expectedImageTag string
// hub hash
hubHash string
// bootstrap-hub-kubeconfig
// It's a secret named 'bootstrap-hub-kubeconfig' under the namespace 'open-cluster-management-agent',
// the content of the secret is a kubeconfig file.
@@ -127,6 +132,13 @@ var _ = BeforeSuite(func() {
bootstrapHubKubeConfigSecret.ObjectMeta.ResourceVersion = ""
bootstrapHubKubeConfigSecret.ObjectMeta.Namespace = ""
By("Calculate Hub Hash")
kubeconfigData, err := clientcmd.Load(bootstrapHubKubeConfigSecret.Data["kubeconfig"])
Expect(err).NotTo(HaveOccurred())
kubeconfig, err := clientcmd.NewDefaultClientConfig(*kubeconfigData, nil).ClientConfig()
Expect(err).NotTo(HaveOccurred())
hubHash = fmt.Sprintf("%x", sha256.Sum256([]byte(kubeconfig.Host)))
By("Check Hub Ready")
Eventually(func() error {
return hub.CheckHubReady()

View File

@@ -0,0 +1,370 @@
package e2e
import (
"context"
"fmt"
"time"
ginkgo "github.com/onsi/ginkgo/v2"
gomega "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
clusterv1 "open-cluster-management.io/api/cluster/v1"
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
"open-cluster-management.io/ocm/pkg/registration/spoke/managedcluster"
)
var _ = ginkgo.Describe("ManagedNamespace", func() {
ginkgo.Context("ManagedClusterSet with ManagedNamespaces", func() {
var expectedNamespaces []string
var originalManagedNamespaces []clusterv1.ManagedNamespaceConfig
ginkgo.BeforeEach(func() {
suffix := rand.String(6)
expectedNamespaces = []string{
fmt.Sprintf("test-ns-1-%s", suffix),
fmt.Sprintf("test-ns-2-%s", suffix),
}
// Update the existing universal cluster set with managed namespaces
gomega.Eventually(func() error {
clusterSet, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Get(
context.TODO(), universalClusterSetName, metav1.GetOptions{})
if err != nil {
return err
}
// Store original managed namespaces for restoration
originalManagedNamespaces = clusterSet.Spec.ManagedNamespaces
// Add test managed namespaces
clusterSet.Spec.ManagedNamespaces = []clusterv1.ManagedNamespaceConfig{
{Name: expectedNamespaces[0]},
{Name: expectedNamespaces[1]},
}
_, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Update(
context.TODO(), clusterSet, metav1.UpdateOptions{})
return err
}, 30*time.Second, 2*time.Second).Should(gomega.Succeed())
})
ginkgo.AfterEach(func() {
// Restore original managed namespaces in the universal cluster set
gomega.Eventually(func() error {
clusterSet, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Get(
context.TODO(), universalClusterSetName, metav1.GetOptions{})
if err != nil {
return err
}
// Restore original managed namespaces
clusterSet.Spec.ManagedNamespaces = originalManagedNamespaces
_, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Update(
context.TODO(), clusterSet, metav1.UpdateOptions{})
return err
}, 30*time.Second, 2*time.Second).Should(gomega.Succeed())
// Clean up any test namespaces that might have been created on the spoke cluster
for _, nsName := range expectedNamespaces {
err := spoke.KubeClient.CoreV1().Namespaces().Delete(
context.TODO(), nsName, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
ginkgo.GinkgoLogr.Error(err, "failed to delete test namespace", "namespace", nsName)
}
}
})
ginkgo.It("should update ManagedCluster status with managed namespaces from ManagedClusterSet", func() {
ginkgo.By("Waiting for the hub-side managed namespace controller to update the ManagedCluster status")
gomega.Eventually(func() bool {
cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(
context.TODO(), universalClusterName, metav1.GetOptions{})
if err != nil {
return false
}
// Check if managed namespaces are populated in status
if len(cluster.Status.ManagedNamespaces) != len(expectedNamespaces) {
return false
}
// Verify all expected namespaces are present
managedNSNames := make(map[string]bool)
for _, managedNS := range cluster.Status.ManagedNamespaces {
managedNSNames[managedNS.Name] = true
if managedNS.ClusterSet != universalClusterSetName {
return false
}
}
for _, expectedNS := range expectedNamespaces {
if !managedNSNames[expectedNS] {
return false
}
}
return true
}, 60*time.Second, 2*time.Second).Should(gomega.BeTrue(),
"ManagedCluster status should be updated with managed namespaces")
})
ginkgo.It("should create managed namespaces on spoke cluster with correct labels", func() {
// Get expected hub cluster set label
expectedLabel := managedcluster.GetHubClusterSetLabel(hubHash)
ginkgo.By("Waiting for the spoke-side managed namespace controller to create namespaces")
gomega.Eventually(func() bool {
// List all namespaces with the expected label set to "true"
namespaceList, err := spoke.KubeClient.CoreV1().Namespaces().List(
context.TODO(), metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=true", expectedLabel),
})
if err != nil {
return false
}
// Create a set of found namespaces for comparison
foundNamespaces := sets.Set[string]{}
for _, ns := range namespaceList.Items {
foundNamespaces.Insert(ns.Name)
}
// Check if all expected namespaces are found with the correct label
return foundNamespaces.HasAll(expectedNamespaces...)
}, 120*time.Second, 3*time.Second).Should(gomega.BeTrue(),
"All expected namespaces should be created with correct hub-specific label")
})
ginkgo.It("should update managed namespace conditions when namespace creation succeeds", func() {
ginkgo.By("Waiting for successful namespace creation conditions")
gomega.Eventually(func() bool {
cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(
context.TODO(), universalClusterName, metav1.GetOptions{})
if err != nil {
return false
}
// Check conditions on all managed namespaces
for _, managedNS := range cluster.Status.ManagedNamespaces {
condition := meta.FindStatusCondition(managedNS.Conditions, "NamespaceAvailable")
if condition == nil || condition.Status != metav1.ConditionTrue {
return false
}
if condition.Reason != "NamespaceApplied" {
return false
}
}
return len(cluster.Status.ManagedNamespaces) == len(expectedNamespaces)
}, 120*time.Second, 3*time.Second).Should(gomega.BeTrue(),
"All managed namespaces should have successful conditions")
})
ginkgo.It("should cleanup previously managed namespaces when removed from cluster set", func() {
// Get expected hub cluster set label
expectedLabel := managedcluster.GetHubClusterSetLabel(hubHash)
ginkgo.By("Waiting for initial namespaces to be created")
for _, expectedNS := range expectedNamespaces {
nsName := expectedNS
gomega.Eventually(func() error {
_, err := spoke.KubeClient.CoreV1().Namespaces().Get(
context.TODO(), nsName, metav1.GetOptions{})
return err
}, 120*time.Second, 3*time.Second).Should(gomega.Succeed())
}
ginkgo.By("Removing one namespace from the ManagedClusterSet")
// Update the cluster set to remove the first namespace
gomega.Eventually(func() error {
clusterSet, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Get(
context.TODO(), universalClusterSetName, metav1.GetOptions{})
if err != nil {
return err
}
// Remove the first namespace
clusterSet.Spec.ManagedNamespaces = []clusterv1.ManagedNamespaceConfig{
{Name: expectedNamespaces[1]}, // Keep only the second namespace
}
_, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Update(
context.TODO(), clusterSet, metav1.UpdateOptions{})
return err
}, 30*time.Second, 2*time.Second).Should(gomega.Succeed())
ginkgo.By("Verifying the removed namespace label is set to 'false'")
removedNS := expectedNamespaces[0]
gomega.Eventually(func() bool {
ns, err := spoke.KubeClient.CoreV1().Namespaces().Get(
context.TODO(), removedNS, metav1.GetOptions{})
if err != nil {
return false
}
// Check if the label is now set to 'false'
if labelValue, exists := ns.Labels[expectedLabel]; !exists || labelValue != "false" {
return false
}
return true
}, 120*time.Second, 3*time.Second).Should(gomega.BeTrue(),
fmt.Sprintf("Removed namespace %s should have label set to 'false'", removedNS))
ginkgo.By("Verifying the remaining namespace still has label set to 'true'")
remainingNS := expectedNamespaces[1]
gomega.Consistently(func() bool {
ns, err := spoke.KubeClient.CoreV1().Namespaces().Get(
context.TODO(), remainingNS, metav1.GetOptions{})
if err != nil {
return false
}
// Check if the label is still 'true'
if labelValue, exists := ns.Labels[expectedLabel]; !exists || labelValue != "true" {
return false
}
return true
}, 30*time.Second, 2*time.Second).Should(gomega.BeTrue(),
fmt.Sprintf("Remaining namespace %s should still have label set to 'true'", remainingNS))
})
})
ginkgo.Context("ManagedNamespace with multiple cluster sets", func() {
var additionalClusterSetName string
var namespace1, namespace2 string
var originalUniversalManagedNamespaces []clusterv1.ManagedNamespaceConfig
ginkgo.BeforeEach(func() {
suffix := rand.String(6)
additionalClusterSetName = fmt.Sprintf("test-clusterset-%s", suffix)
namespace1 = fmt.Sprintf("test-ns-1-%s", suffix)
namespace2 = fmt.Sprintf("test-ns-2-%s", suffix)
// Store original managed namespaces from universal cluster set
gomega.Eventually(func() error {
clusterSet, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Get(
context.TODO(), universalClusterSetName, metav1.GetOptions{})
if err != nil {
return err
}
originalUniversalManagedNamespaces = clusterSet.Spec.ManagedNamespaces
return nil
}, 30*time.Second, 2*time.Second).Should(gomega.Succeed())
// Add managed namespace to universal cluster set
gomega.Eventually(func() error {
clusterSet, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Get(
context.TODO(), universalClusterSetName, metav1.GetOptions{})
if err != nil {
return err
}
clusterSet.Spec.ManagedNamespaces = append(originalUniversalManagedNamespaces, clusterv1.ManagedNamespaceConfig{Name: namespace1})
_, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Update(
context.TODO(), clusterSet, metav1.UpdateOptions{})
return err
}, 30*time.Second, 2*time.Second).Should(gomega.Succeed())
// Create additional ManagedClusterSet with LabelSelector
additionalClusterSet := &clusterv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: additionalClusterSetName,
},
Spec: clusterv1beta2.ManagedClusterSetSpec{
ClusterSelector: clusterv1beta2.ManagedClusterSelector{
SelectorType: clusterv1beta2.LabelSelector,
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster.open-cluster-management.io/clusterset": universalClusterSetName,
},
},
},
ManagedNamespaces: []clusterv1.ManagedNamespaceConfig{
{Name: namespace2},
},
},
}
_, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(
context.TODO(), additionalClusterSet, metav1.CreateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
ginkgo.AfterEach(func() {
// Restore original managed namespaces in the universal cluster set
gomega.Eventually(func() error {
clusterSet, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Get(
context.TODO(), universalClusterSetName, metav1.GetOptions{})
if err != nil {
return err
}
clusterSet.Spec.ManagedNamespaces = originalUniversalManagedNamespaces
_, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Update(
context.TODO(), clusterSet, metav1.UpdateOptions{})
return err
}, 30*time.Second, 2*time.Second).Should(gomega.Succeed())
// Clean up additional cluster set
hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Delete(
context.TODO(), additionalClusterSetName, metav1.DeleteOptions{})
// Clean up test namespaces
spoke.KubeClient.CoreV1().Namespaces().Delete(
context.TODO(), namespace1, metav1.DeleteOptions{})
spoke.KubeClient.CoreV1().Namespaces().Delete(
context.TODO(), namespace2, metav1.DeleteOptions{})
})
ginkgo.It("should manage namespaces from multiple cluster sets", func() {
ginkgo.By("Waiting for ManagedCluster status to include namespaces from both cluster sets")
gomega.Eventually(func() bool {
cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(
context.TODO(), universalClusterName, metav1.GetOptions{})
if err != nil {
return false
}
if len(cluster.Status.ManagedNamespaces) != 2 {
return false
}
foundNamespaces := make(map[string]string) // namespace -> cluster set
for _, managedNS := range cluster.Status.ManagedNamespaces {
foundNamespaces[managedNS.Name] = managedNS.ClusterSet
}
return foundNamespaces[namespace1] == universalClusterSetName &&
foundNamespaces[namespace2] == additionalClusterSetName
}, 60*time.Second, 2*time.Second).Should(gomega.BeTrue(),
"ManagedCluster should have namespaces from both cluster sets")
ginkgo.By("Verifying both namespaces are created on the spoke cluster")
gomega.Eventually(func() bool {
expectedNamespaces := []string{namespace1, namespace2}
for _, nsName := range expectedNamespaces {
_, err := spoke.KubeClient.CoreV1().Namespaces().Get(
context.TODO(), nsName, metav1.GetOptions{})
if err != nil {
return false
}
}
return true
}, 120*time.Second, 3*time.Second).Should(gomega.BeTrue(),
"Both namespaces should be created on the spoke cluster")
})
})
})