mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-05-06 01:07:03 +00:00
🌱 Copy TLS ConfigMap to addon namespaces in klusterlet operator (#1480)
* 🌱 Copy TLS ConfigMap to addon namespaces in klusterlet operator Add AddonTLSConfigController that copies the ocm-tls-profile ConfigMap from the klusterlet operator namespace to addon namespaces (labeled with addon.open-cluster-management.io/namespace: "true"). This allows addon agents to read TLS profile settings without cross-namespace RBAC. The controller mirrors the existing addonsecretcontroller pattern: - Watches namespaces with the addon label via filtered informer - Copies ConfigMap data on namespace creation/update - Deletes target ConfigMap when source is removed - Skips update when target is already up-to-date Assisted by Claude Signed-off-by: zhujian <jiazhu@redhat.com> * 🌱 Fix ConfigMap update to preserve ResourceVersion and add stale-target test - Reuse existing ConfigMap object on update to preserve ResourceVersion, preventing optimistic concurrency conflicts - Add test case for stale target ConfigMap being updated Assisted by Claude Signed-off-by: zhujian <jiazhu@redhat.com> --------- Signed-off-by: zhujian <jiazhu@redhat.com>
This commit is contained in:
@@ -0,0 +1,111 @@
|
||||
package addontlsconfigcontroller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
coreinformer "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
tlslib "open-cluster-management.io/sdk-go/pkg/tls"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
)
|
||||
|
||||
const (
|
||||
addonInstallNamespaceLabelKey = "addon.open-cluster-management.io/namespace"
|
||||
)
|
||||
|
||||
// addonTLSConfigController copies the ocm-tls-profile ConfigMap from the operator namespace
|
||||
// to addon namespaces (labeled with "addon.open-cluster-management.io/namespace":"true").
|
||||
// This allows addon agents to read TLS profile settings without needing cross-namespace access.
|
||||
type addonTLSConfigController struct {
|
||||
operatorNamespace string
|
||||
kubeClient kubernetes.Interface
|
||||
}
|
||||
|
||||
func NewAddonTLSConfigController(kubeClient kubernetes.Interface, operatorNamespace string,
|
||||
namespaceInformer coreinformer.NamespaceInformer) factory.Controller {
|
||||
c := &addonTLSConfigController{
|
||||
operatorNamespace: operatorNamespace,
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
return factory.New().WithFilteredEventsInformersQueueKeysFunc(
|
||||
queue.QueueKeyByMetaName,
|
||||
queue.FileterByLabelKeyValue(addonInstallNamespaceLabelKey, "true"),
|
||||
namespaceInformer.Informer()).WithSync(c.sync).ToController("AddonTLSConfigController")
|
||||
}
|
||||
|
||||
func (c *addonTLSConfigController) sync(ctx context.Context, _ factory.SyncContext, namespace string) error {
|
||||
if namespace == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ns, err := c.kubeClient.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ns.DeletionTimestamp.IsZero() {
|
||||
return nil
|
||||
}
|
||||
if ns.Labels[addonInstallNamespaceLabelKey] != "true" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.syncConfigMap(ctx, namespace)
|
||||
}
|
||||
|
||||
func (c *addonTLSConfigController) syncConfigMap(ctx context.Context, targetNamespace string) error {
|
||||
name := tlslib.ConfigMapName
|
||||
|
||||
source, err := c.kubeClient.CoreV1().ConfigMaps(c.operatorNamespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
// Source doesn't exist — clean up target if it exists
|
||||
if delErr := c.kubeClient.CoreV1().ConfigMaps(targetNamespace).Delete(
|
||||
ctx, name, metav1.DeleteOptions{}); delErr != nil && !errors.IsNotFound(delErr) {
|
||||
return delErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existing, err := c.kubeClient.CoreV1().ConfigMaps(targetNamespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// Target exists — update only if data changed
|
||||
if equality.Semantic.DeepEqual(existing.Data, source.Data) {
|
||||
return nil
|
||||
}
|
||||
existing.Data = source.Data
|
||||
_, err = c.kubeClient.CoreV1().ConfigMaps(targetNamespace).Update(ctx, existing, metav1.UpdateOptions{})
|
||||
} else {
|
||||
// Target doesn't exist — create it
|
||||
target := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: targetNamespace,
|
||||
},
|
||||
Data: source.Data,
|
||||
}
|
||||
_, err = c.kubeClient.CoreV1().ConfigMaps(targetNamespace).Create(ctx, target, metav1.CreateOptions{})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.Infof("Synced ConfigMap %s from %s to %s", name, c.operatorNamespace, targetNamespace)
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,225 @@
|
||||
package addontlsconfigcontroller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
tlslib "open-cluster-management.io/sdk-go/pkg/tls"
|
||||
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
)
|
||||
|
||||
func TestSync(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
queueKey string
|
||||
objects []runtime.Object
|
||||
namespaces []runtime.Object
|
||||
verify func(t *testing.T, client *kubefake.Clientset)
|
||||
}{
|
||||
{
|
||||
name: "empty queue key — no actions",
|
||||
verify: func(t *testing.T, client *kubefake.Clientset) {
|
||||
if len(client.Actions()) != 0 {
|
||||
t.Errorf("expected no actions, got: %v", client.Actions())
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace without addon label — no ConfigMap operations",
|
||||
queueKey: "ns1",
|
||||
namespaces: []runtime.Object{
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ns1",
|
||||
},
|
||||
},
|
||||
},
|
||||
verify: func(t *testing.T, client *kubefake.Clientset) {
|
||||
// Only the namespace Get action
|
||||
if len(client.Actions()) != 1 {
|
||||
t.Errorf("expected 1 action (namespace get), got %d: %v",
|
||||
len(client.Actions()), client.Actions())
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace with addon label, source ConfigMap exists — copied",
|
||||
queueKey: "ns1",
|
||||
objects: []runtime.Object{
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tlslib.ConfigMapName,
|
||||
Namespace: "open-cluster-management",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"minTLSVersion": "VersionTLS13",
|
||||
},
|
||||
},
|
||||
},
|
||||
namespaces: []runtime.Object{
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ns1",
|
||||
Labels: map[string]string{addonInstallNamespaceLabelKey: "true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
verify: func(t *testing.T, client *kubefake.Clientset) {
|
||||
cm, err := client.CoreV1().ConfigMaps("ns1").Get(
|
||||
context.TODO(), tlslib.ConfigMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("expected ConfigMap to be copied, got error: %v", err)
|
||||
}
|
||||
if cm.Data["minTLSVersion"] != "VersionTLS13" {
|
||||
t.Errorf("expected minTLSVersion=VersionTLS13, got %v", cm.Data)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace with addon label, source ConfigMap missing — target deleted",
|
||||
queueKey: "ns1",
|
||||
objects: []runtime.Object{
|
||||
// Target ConfigMap exists but source does not
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tlslib.ConfigMapName,
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"minTLSVersion": "VersionTLS12",
|
||||
},
|
||||
},
|
||||
},
|
||||
namespaces: []runtime.Object{
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ns1",
|
||||
Labels: map[string]string{addonInstallNamespaceLabelKey: "true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
verify: func(t *testing.T, client *kubefake.Clientset) {
|
||||
_, err := client.CoreV1().ConfigMaps("ns1").Get(
|
||||
context.TODO(), tlslib.ConfigMapName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
t.Error("expected target ConfigMap to be deleted, but it still exists")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace with addon label, target stale — updated",
|
||||
queueKey: "ns1",
|
||||
objects: []runtime.Object{
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tlslib.ConfigMapName,
|
||||
Namespace: "open-cluster-management",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"minTLSVersion": "VersionTLS13",
|
||||
},
|
||||
},
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tlslib.ConfigMapName,
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"minTLSVersion": "VersionTLS12",
|
||||
},
|
||||
},
|
||||
},
|
||||
namespaces: []runtime.Object{
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ns1",
|
||||
Labels: map[string]string{addonInstallNamespaceLabelKey: "true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
verify: func(t *testing.T, client *kubefake.Clientset) {
|
||||
cm, err := client.CoreV1().ConfigMaps("ns1").Get(
|
||||
context.TODO(), tlslib.ConfigMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("expected ConfigMap to exist, got error: %v", err)
|
||||
}
|
||||
if cm.Data["minTLSVersion"] != "VersionTLS13" {
|
||||
t.Errorf("expected minTLSVersion=VersionTLS13 after update, got %v", cm.Data)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace with addon label, target already up-to-date — no update",
|
||||
queueKey: "ns1",
|
||||
objects: []runtime.Object{
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tlslib.ConfigMapName,
|
||||
Namespace: "open-cluster-management",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"minTLSVersion": "VersionTLS13",
|
||||
},
|
||||
},
|
||||
&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tlslib.ConfigMapName,
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"minTLSVersion": "VersionTLS13",
|
||||
},
|
||||
},
|
||||
},
|
||||
namespaces: []runtime.Object{
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ns1",
|
||||
Labels: map[string]string{addonInstallNamespaceLabelKey: "true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
verify: func(t *testing.T, client *kubefake.Clientset) {
|
||||
// Should have: namespace Get, source ConfigMap Get, target ConfigMap Get — no Create/Update
|
||||
for _, action := range client.Actions() {
|
||||
if action.GetVerb() == "create" || action.GetVerb() == "update" {
|
||||
t.Errorf("expected no create/update, got: %s %s",
|
||||
action.GetVerb(), action.GetResource().Resource)
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
objs := append(tc.objects, tc.namespaces...) //nolint:gocritic
|
||||
kubeClient := kubefake.NewSimpleClientset(objs...)
|
||||
kubeInformer := informers.NewSharedInformerFactory(kubeClient, 5*time.Minute)
|
||||
namespaceStore := kubeInformer.Core().V1().Namespaces().Informer().GetStore()
|
||||
for _, ns := range tc.namespaces {
|
||||
_ = namespaceStore.Add(ns)
|
||||
}
|
||||
|
||||
controller := &addonTLSConfigController{
|
||||
operatorNamespace: "open-cluster-management",
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
|
||||
err := controller.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, tc.queueKey), tc.queueKey)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
tc.verify(t, kubeClient)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/addontlsconfigcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/klusterletcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/ssarcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/statuscontroller"
|
||||
@@ -157,6 +158,12 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext *
|
||||
kubeInformer.Core().V1().Namespaces(),
|
||||
)
|
||||
|
||||
addonTLSConfigController := addontlsconfigcontroller.NewAddonTLSConfigController(
|
||||
kubeClient,
|
||||
helpers.GetOperatorNamespace(),
|
||||
kubeInformer.Core().V1().Namespaces(),
|
||||
)
|
||||
|
||||
go operatorInformer.Start(ctx.Done())
|
||||
go kubeInformer.Start(ctx.Done())
|
||||
go hubConfigSecretInformer.Start(ctx.Done())
|
||||
@@ -168,6 +175,7 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext *
|
||||
go statusController.Run(ctx, 1)
|
||||
go ssarController.Run(ctx, 1)
|
||||
go addonController.Run(ctx, 1)
|
||||
go addonTLSConfigController.Run(ctx, 1)
|
||||
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
|
||||
Reference in New Issue
Block a user