mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 10:00:11 +00:00
🌱 use SDK basecontroller for better logging. (#1269)
* Use basecontroller in sdk-go instead for better logging Signed-off-by: Jian Qiu <jqiu@redhat.com> * Rename to fakeSyncContext Signed-off-by: Jian Qiu <jqiu@redhat.com> --------- Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
1
go.mod
1
go.mod
@@ -132,7 +132,6 @@ require (
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.65.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/robfig/cron v1.2.0 // indirect
|
||||
github.com/rs/xid v1.4.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@@ -312,8 +312,6 @@ github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
||||
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -18,6 +16,7 @@ import (
|
||||
addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1"
|
||||
clusterinformersv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1"
|
||||
clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
addonindex "open-cluster-management.io/ocm/pkg/addon/index"
|
||||
@@ -64,7 +63,6 @@ func NewAddonConfigurationController(
|
||||
placementInformer clusterinformersv1beta1.PlacementInformer,
|
||||
placementDecisionInformer clusterinformersv1beta1.PlacementDecisionInformer,
|
||||
addonFilterFunc factory.EventFilterFunc,
|
||||
recorder events.Recorder,
|
||||
) factory.Controller {
|
||||
c := &addonConfigurationController{
|
||||
addonClient: addonClient,
|
||||
@@ -96,13 +94,12 @@ func NewAddonConfigurationController(
|
||||
WithInformersQueueKeysFunc(
|
||||
addonindex.ClusterManagementAddonByPlacementQueueKey(clusterManagementAddonInformers), placementInformer.Informer())
|
||||
|
||||
return controllerFactory.WithSync(c.sync).ToController("addon-configuration-controller", recorder)
|
||||
return controllerFactory.WithSync(c.sync).ToController("addon-configuration-controller")
|
||||
}
|
||||
|
||||
func (c *addonConfigurationController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
addonName := syncCtx.QueueKey()
|
||||
logger.V(4).Info("Reconciling addon", "addonName", addonName)
|
||||
func (c *addonConfigurationController) sync(ctx context.Context, syncCtx factory.SyncContext, addonName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("addonName", addonName)
|
||||
logger.V(4).Info("Reconciling addon")
|
||||
|
||||
cma, err := c.clusterManagementAddonLister.Get(addonName)
|
||||
switch {
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -20,6 +18,7 @@ import (
|
||||
clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
addonindex "open-cluster-management.io/ocm/pkg/addon/index"
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
@@ -46,8 +45,6 @@ func TestNewAddonConfigurationController(t *testing.T) {
|
||||
return true
|
||||
}
|
||||
|
||||
recorder := eventstesting.NewTestingEventRecorder(t)
|
||||
|
||||
controller := NewAddonConfigurationController(
|
||||
fakeAddonClient,
|
||||
addonInformers.Addon().V1alpha1().ManagedClusterAddOns(),
|
||||
@@ -55,7 +52,6 @@ func TestNewAddonConfigurationController(t *testing.T) {
|
||||
clusterInformers.Cluster().V1beta1().Placements(),
|
||||
clusterInformers.Cluster().V1beta1().PlacementDecisions(),
|
||||
addonFilterFunc,
|
||||
recorder,
|
||||
)
|
||||
|
||||
if controller == nil {
|
||||
@@ -242,7 +238,7 @@ func TestAddonConfigurationControllerSync(t *testing.T) {
|
||||
|
||||
// Test sync method
|
||||
ctx := context.TODO()
|
||||
err = controller.sync(ctx, syncCtx)
|
||||
err = controller.sync(ctx, syncCtx, c.queueKey)
|
||||
|
||||
if c.expectError && err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
|
||||
@@ -3,7 +3,6 @@ package addonmanagement
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned"
|
||||
clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
addonindex "open-cluster-management.io/ocm/pkg/addon/index"
|
||||
)
|
||||
|
||||
@@ -3,8 +3,6 @@ package addonmanagement
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -15,6 +13,7 @@ import (
|
||||
addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1"
|
||||
addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1"
|
||||
clusterinformersv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
addonindex "open-cluster-management.io/ocm/pkg/addon/index"
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -47,7 +46,6 @@ func NewAddonManagementController(
|
||||
placementInformer clusterinformersv1beta1.PlacementInformer,
|
||||
placementDecisionInformer clusterinformersv1beta1.PlacementDecisionInformer,
|
||||
addonFilterFunc factory.EventFilterFunc,
|
||||
recorder events.Recorder,
|
||||
) factory.Controller {
|
||||
c := &addonManagementController{
|
||||
addonClient: addonClient,
|
||||
@@ -76,13 +74,12 @@ func NewAddonManagementController(
|
||||
addonindex.ClusterManagementAddonByPlacementQueueKey(
|
||||
clusterManagementAddonInformers),
|
||||
placementInformer.Informer()).
|
||||
WithSync(c.sync).ToController("addon-management-controller", recorder)
|
||||
WithSync(c.sync).ToController("addon-management-controller")
|
||||
}
|
||||
|
||||
func (c *addonManagementController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
addonName := syncCtx.QueueKey()
|
||||
logger.V(4).Info("Reconciling addon", "addonName", addonName)
|
||||
func (c *addonManagementController) sync(ctx context.Context, syncCtx factory.SyncContext, addonName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("addonName", addonName)
|
||||
logger.V(4).Info("Reconciling addon")
|
||||
|
||||
cma, err := c.clusterManagementAddonLister.Get(addonName)
|
||||
switch {
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
@@ -21,6 +19,7 @@ import (
|
||||
fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
|
||||
clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
addonindex "open-cluster-management.io/ocm/pkg/addon/index"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
@@ -297,8 +296,6 @@ func TestNewAddonManagementController(t *testing.T) {
|
||||
return true
|
||||
}
|
||||
|
||||
recorder := eventstesting.NewTestingEventRecorder(t)
|
||||
|
||||
controller := NewAddonManagementController(
|
||||
fakeAddonClient,
|
||||
addonInformers.Addon().V1alpha1().ManagedClusterAddOns(),
|
||||
@@ -306,7 +303,6 @@ func TestNewAddonManagementController(t *testing.T) {
|
||||
clusterInformers.Cluster().V1beta1().Placements(),
|
||||
clusterInformers.Cluster().V1beta1().PlacementDecisions(),
|
||||
addonFilterFunc,
|
||||
recorder,
|
||||
)
|
||||
|
||||
if controller == nil {
|
||||
@@ -471,7 +467,7 @@ func TestAddonManagementControllerSync(t *testing.T) {
|
||||
|
||||
// Test sync method
|
||||
ctx := context.TODO()
|
||||
err = controller.sync(ctx, syncCtx)
|
||||
err = controller.sync(ctx, syncCtx, c.queueKey)
|
||||
|
||||
if c.expectError && err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
|
||||
@@ -3,8 +3,6 @@ package addonowner
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -15,6 +13,7 @@ import (
|
||||
addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned"
|
||||
addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1"
|
||||
addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
addonindex "open-cluster-management.io/ocm/pkg/addon/index"
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -37,7 +36,6 @@ func NewAddonOwnerController(
|
||||
addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer,
|
||||
clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer,
|
||||
addonFilterFunc factory.EventFilterFunc,
|
||||
recorder events.Recorder,
|
||||
) factory.Controller {
|
||||
c := &addonOwnerController{
|
||||
addonClient: addonClient,
|
||||
@@ -59,13 +57,12 @@ func NewAddonOwnerController(
|
||||
queue.QueueKeyByMetaNamespaceName,
|
||||
addonInformers.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("addon-owner-controller", recorder)
|
||||
ToController("addon-owner-controller")
|
||||
}
|
||||
|
||||
func (c *addonOwnerController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
key := syncCtx.QueueKey()
|
||||
logger.V(4).Info("Reconciling addon", "addon", key)
|
||||
func (c *addonOwnerController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("addon", key)
|
||||
logger.V(4).Info("Reconciling addon")
|
||||
|
||||
namespace, addonName, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
|
||||
@@ -75,16 +75,14 @@ func TestReconcile(t *testing.T) {
|
||||
}
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, c.syncKey)
|
||||
recorder := syncContext.Recorder()
|
||||
|
||||
controller := NewAddonOwnerController(
|
||||
fakeAddonClient,
|
||||
addonInformers.Addon().V1alpha1().ManagedClusterAddOns(),
|
||||
addonInformers.Addon().V1alpha1().ClusterManagementAddOns(),
|
||||
utils.ManagedByAddonManager,
|
||||
recorder)
|
||||
utils.ManagedByAddonManager)
|
||||
|
||||
err := controller.Sync(context.TODO(), syncContext)
|
||||
err := controller.Sync(context.TODO(), syncContext, c.syncKey)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error when sync: %v", err)
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -27,6 +25,7 @@ import (
|
||||
workinformers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1"
|
||||
worklister "open-cluster-management.io/api/client/work/listers/work/v1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
addonindex "open-cluster-management.io/ocm/pkg/addon/index"
|
||||
@@ -49,7 +48,6 @@ func NewAddonProgressingController(
|
||||
clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer,
|
||||
workInformers workinformers.ManifestWorkInformer,
|
||||
addonFilterFunc factory.EventFilterFunc,
|
||||
recorder events.Recorder,
|
||||
) factory.Controller {
|
||||
c := &addonProgressingController{
|
||||
addonClient: addonClient,
|
||||
@@ -79,13 +77,12 @@ func NewAddonProgressingController(
|
||||
return len(accessor.GetLabels()) > 0 && len(accessor.GetLabels()[addonapiv1alpha1.AddonLabelKey]) > 0
|
||||
},
|
||||
workInformers.Informer()).
|
||||
WithSync(c.sync).ToController("addon-progressing-controller", recorder)
|
||||
WithSync(c.sync).ToController("addon-progressing-controller")
|
||||
}
|
||||
|
||||
func (c *addonProgressingController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
key := syncCtx.QueueKey()
|
||||
logger.V(4).Info("Reconciling addon", "addon", key)
|
||||
func (c *addonProgressingController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("addonName", key)
|
||||
logger.V(4).Info("Reconciling addon")
|
||||
|
||||
namespace, addonName, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
|
||||
@@ -736,7 +736,6 @@ func TestReconcile(t *testing.T) {
|
||||
}
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, c.syncKey)
|
||||
recorder := syncContext.Recorder()
|
||||
|
||||
controller := NewAddonProgressingController(
|
||||
fakeAddonClient,
|
||||
@@ -744,10 +743,9 @@ func TestReconcile(t *testing.T) {
|
||||
addonInformers.Addon().V1alpha1().ClusterManagementAddOns(),
|
||||
workInformers.Work().V1().ManifestWorks(),
|
||||
utils.ManagedByAddonManager,
|
||||
recorder,
|
||||
)
|
||||
|
||||
err := controller.Sync(context.TODO(), syncContext)
|
||||
err := controller.Sync(context.TODO(), syncContext, c.syncKey)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error when sync: %v", err)
|
||||
}
|
||||
@@ -1464,7 +1462,6 @@ func TestReconcileHostedAddons(t *testing.T) {
|
||||
}
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, c.syncKey)
|
||||
recorder := syncContext.Recorder()
|
||||
|
||||
controller := NewAddonProgressingController(
|
||||
fakeAddonClient,
|
||||
@@ -1472,10 +1469,9 @@ func TestReconcileHostedAddons(t *testing.T) {
|
||||
addonInformers.Addon().V1alpha1().ClusterManagementAddOns(),
|
||||
workInformers.Work().V1().ManifestWorks(),
|
||||
utils.ManagedByAddonManager,
|
||||
recorder,
|
||||
)
|
||||
|
||||
err := controller.Sync(context.TODO(), syncContext)
|
||||
err := controller.Sync(context.TODO(), syncContext, c.syncKey)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error when sync: %v", err)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -27,6 +25,7 @@ import (
|
||||
workv1client "open-cluster-management.io/api/client/work/clientset/versioned"
|
||||
workv1informers "open-cluster-management.io/api/client/work/informers/externalversions"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
addonindex "open-cluster-management.io/ocm/pkg/addon/index"
|
||||
"open-cluster-management.io/ocm/pkg/addon/templateagent"
|
||||
@@ -51,7 +50,6 @@ type addonTemplateController struct {
|
||||
dynamicInformers dynamicinformer.DynamicSharedInformerFactory
|
||||
workInformers workv1informers.SharedInformerFactory
|
||||
runControllerFunc runController
|
||||
eventRecorder events.Recorder
|
||||
}
|
||||
|
||||
type runController func(ctx context.Context, addonName string) error
|
||||
@@ -66,7 +64,6 @@ func NewAddonTemplateController(
|
||||
clusterInformers clusterv1informers.SharedInformerFactory,
|
||||
dynamicInformers dynamicinformer.DynamicSharedInformerFactory,
|
||||
workInformers workv1informers.SharedInformerFactory,
|
||||
recorder events.Recorder,
|
||||
runController ...runController,
|
||||
) factory.Controller {
|
||||
c := &addonTemplateController{
|
||||
@@ -81,7 +78,6 @@ func NewAddonTemplateController(
|
||||
clusterInformers: clusterInformers,
|
||||
dynamicInformers: dynamicInformers,
|
||||
workInformers: workInformers,
|
||||
eventRecorder: recorder,
|
||||
}
|
||||
|
||||
if len(runController) > 0 {
|
||||
@@ -118,7 +114,7 @@ func NewAddonTemplateController(
|
||||
// addonTemplate lister to get the template object
|
||||
addonInformers.Addon().V1alpha1().AddOnTemplates().Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("addon-template-controller", recorder)
|
||||
ToController("addon-template-controller")
|
||||
}
|
||||
|
||||
func (c *addonTemplateController) stopUnusedManagers(
|
||||
@@ -150,9 +146,9 @@ func (c *addonTemplateController) stopUnusedManagers(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *addonTemplateController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
addonName := syncCtx.QueueKey()
|
||||
func (c *addonTemplateController) sync(ctx context.Context, syncCtx factory.SyncContext, addonName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("addonName", addonName)
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
cma, err := c.cmaLister.Get(addonName)
|
||||
if err != nil {
|
||||
@@ -168,11 +164,11 @@ func (c *addonTemplateController) sync(ctx context.Context, syncCtx factory.Sync
|
||||
|
||||
_, exist := c.addonManagers[addonName]
|
||||
if exist {
|
||||
logger.V(4).Info("There already is a manager started for addon, skipping", "addonName", addonName)
|
||||
logger.V(4).Info("There already is a manager started for addon, skipping")
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Info("Starting an addon manager for addon", "addonName", addonName)
|
||||
logger.Info("Starting an addon manager for addon")
|
||||
|
||||
stopFunc := c.startManager(ctx, addonName)
|
||||
c.addonManagers[addonName] = stopFunc
|
||||
@@ -187,7 +183,7 @@ func (c *addonTemplateController) startManager(
|
||||
go func() {
|
||||
err := c.runControllerFunc(ctx, addonName)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error running controller for addon", "addonName", addonName)
|
||||
logger.Error(err, "Error running controller for addon")
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
|
||||
@@ -199,7 +195,7 @@ func (c *addonTemplateController) startManager(
|
||||
c.dynamicInformers.Start(pctx.Done())
|
||||
|
||||
<-ctx.Done()
|
||||
logger.Info("Addon Manager stopped", "addonName", addonName)
|
||||
logger.Info("Addon Manager stopped")
|
||||
}()
|
||||
return stopFunc
|
||||
}
|
||||
@@ -235,7 +231,6 @@ func (c *addonTemplateController) runController(ctx context.Context, addonName s
|
||||
c.addonClient,
|
||||
c.addonInformers, // use the shared informers, whose cache is synced already
|
||||
kubeInformers.Rbac().V1().RoleBindings().Lister(),
|
||||
c.eventRecorder,
|
||||
// image overrides from cluster annotation has lower priority than from the addonDeploymentConfig
|
||||
getValuesClosure,
|
||||
addonfactory.GetAddOnDeploymentConfigValues(
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||
@@ -193,13 +192,12 @@ func TestReconcile(t *testing.T) {
|
||||
clusterInformers,
|
||||
dynamicInformerFactory,
|
||||
workInformers,
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
runController,
|
||||
)
|
||||
ctx := context.TODO()
|
||||
for _, syncKey := range c.syncKeys {
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, syncKey)
|
||||
err := controller.Sync(ctx, syncContext)
|
||||
err := controller.Sync(ctx, syncContext, syncKey)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error when sync: %v", err)
|
||||
}
|
||||
@@ -383,7 +381,6 @@ func TestStopUnusedManagers(t *testing.T) {
|
||||
clusterInformers: clusterInformers,
|
||||
dynamicInformers: dynamicInformerFactory,
|
||||
workInformers: workInformers,
|
||||
eventRecorder: eventstesting.NewTestingEventRecorder(t),
|
||||
}
|
||||
|
||||
// Start informers and wait for cache sync
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -14,6 +12,7 @@ import (
|
||||
addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned"
|
||||
addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1"
|
||||
addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -33,7 +32,6 @@ func NewCMAInstallProgressionController(
|
||||
addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer,
|
||||
clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer,
|
||||
addonFilterFunc factory.EventFilterFunc,
|
||||
recorder events.Recorder,
|
||||
) factory.Controller {
|
||||
c := &cmaInstallProgressionController{
|
||||
patcher: patcher.NewPatcher[
|
||||
@@ -46,14 +44,13 @@ func NewCMAInstallProgressionController(
|
||||
return factory.New().WithInformersQueueKeysFunc(
|
||||
queue.QueueKeyByMetaName,
|
||||
addonInformers.Informer(), clusterManagementAddonInformers.Informer()).
|
||||
WithSync(c.sync).ToController("cma-install-progression-controller", recorder)
|
||||
WithSync(c.sync).ToController("cma-install-progression-controller")
|
||||
|
||||
}
|
||||
|
||||
func (c *cmaInstallProgressionController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
addonName := syncCtx.QueueKey()
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("Reconciling addon", "addonName", addonName)
|
||||
func (c *cmaInstallProgressionController) sync(ctx context.Context, syncCtx factory.SyncContext, addonName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("addonName", addonName)
|
||||
logger.V(4).Info("Reconciling addon")
|
||||
mgmtAddon, err := c.clusterManagementAddonLister.Get(addonName)
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
|
||||
@@ -340,17 +340,15 @@ func TestReconcile(t *testing.T) {
|
||||
}
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, c.syncKey)
|
||||
recorder := syncContext.Recorder()
|
||||
|
||||
controller := NewCMAInstallProgressionController(
|
||||
fakeAddonClient,
|
||||
addonInformers.Addon().V1alpha1().ManagedClusterAddOns(),
|
||||
addonInformers.Addon().V1alpha1().ClusterManagementAddOns(),
|
||||
utils.ManagedByAddonManager,
|
||||
recorder,
|
||||
)
|
||||
|
||||
err := controller.Sync(context.TODO(), syncContext)
|
||||
err := controller.Sync(context.TODO(), syncContext, c.syncKey)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error when sync: %v", err)
|
||||
}
|
||||
|
||||
@@ -138,7 +138,6 @@ func RunControllerManagerWithInformers(
|
||||
clusterInformers.Cluster().V1beta1().Placements(),
|
||||
clusterInformers.Cluster().V1beta1().PlacementDecisions(),
|
||||
utils.ManagedByAddonManager,
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
addonConfigurationController := addonconfiguration.NewAddonConfigurationController(
|
||||
@@ -148,7 +147,6 @@ func RunControllerManagerWithInformers(
|
||||
clusterInformers.Cluster().V1beta1().Placements(),
|
||||
clusterInformers.Cluster().V1beta1().PlacementDecisions(),
|
||||
utils.ManagedByAddonManager,
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
addonOwnerController := addonowner.NewAddonOwnerController(
|
||||
@@ -156,7 +154,6 @@ func RunControllerManagerWithInformers(
|
||||
addonInformers.Addon().V1alpha1().ManagedClusterAddOns(),
|
||||
addonInformers.Addon().V1alpha1().ClusterManagementAddOns(),
|
||||
utils.ManagedByAddonManager,
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
addonProgressingController := addonprogressing.NewAddonProgressingController(
|
||||
@@ -165,7 +162,6 @@ func RunControllerManagerWithInformers(
|
||||
addonInformers.Addon().V1alpha1().ClusterManagementAddOns(),
|
||||
workinformers.Work().V1().ManifestWorks(),
|
||||
utils.ManagedByAddonManager,
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
mgmtAddonInstallProgressionController := cmainstallprogression.NewCMAInstallProgressionController(
|
||||
@@ -173,7 +169,6 @@ func RunControllerManagerWithInformers(
|
||||
addonInformers.Addon().V1alpha1().ManagedClusterAddOns(),
|
||||
addonInformers.Addon().V1alpha1().ClusterManagementAddOns(),
|
||||
utils.ManagedByAddonManager,
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
addonTemplateController := addontemplate.NewAddonTemplateController(
|
||||
@@ -187,7 +182,6 @@ func RunControllerManagerWithInformers(
|
||||
// these addons only support addontemplate and addondeploymentconfig
|
||||
dynamicInformers,
|
||||
workinformers,
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
go addonManagementController.Run(ctx, 2)
|
||||
|
||||
@@ -23,6 +23,9 @@ import (
|
||||
"open-cluster-management.io/addon-framework/pkg/utils"
|
||||
addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
commonrecorder "open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -471,8 +474,14 @@ func (a *CRDTemplateAgentAddon) createPermissionBinding(clusterName, addonName,
|
||||
binding.OwnerReferences = []metav1.OwnerReference{*owner}
|
||||
}
|
||||
|
||||
// TODO(qiujian16) this should have ctx passed to build the wrapper
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(
|
||||
context.Background(),
|
||||
events.NewContextualLoggingEventRecorder(fmt.Sprintf("addontemplate-%s-%s", clusterName, addonName)),
|
||||
)
|
||||
|
||||
_, modified, err := resourceapply.ApplyRoleBinding(context.TODO(),
|
||||
a.hubKubeClient.RbacV1(), a.eventRecorder, binding)
|
||||
a.hubKubeClient.RbacV1(), recorderWrapper, binding)
|
||||
if err == nil && modified {
|
||||
a.logger.Info("Rolebinding for addon updated", "namespace", binding.Namespace, "name", binding.Name,
|
||||
"clusterName", clusterName, "addonName", addonName)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
"github.com/stretchr/testify/assert"
|
||||
certificatesv1 "k8s.io/api/certificates/v1"
|
||||
certificates "k8s.io/api/certificates/v1beta1"
|
||||
@@ -739,7 +738,7 @@ func TestTemplatePermissionConfigFunc(t *testing.T) {
|
||||
}
|
||||
|
||||
agent := NewCRDTemplateAgentAddon(ctx, c.addon.Name, hubKubeClient, addonClient, addonInformerFactory,
|
||||
kubeInformers.Rbac().V1().RoleBindings().Lister(), eventstesting.NewTestingEventRecorder(t))
|
||||
kubeInformers.Rbac().V1().RoleBindings().Lister())
|
||||
f := agent.TemplatePermissionConfigFunc()
|
||||
err := f(c.cluster, c.addon)
|
||||
if err != c.expectedErr {
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/valyala/fasttemplate"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -73,7 +72,6 @@ type CRDTemplateAgentAddon struct {
|
||||
rolebindingLister rbacv1lister.RoleBindingLister
|
||||
addonName string
|
||||
agentName string
|
||||
eventRecorder events.Recorder
|
||||
}
|
||||
|
||||
// NewCRDTemplateAgentAddon creates a CRDTemplateAgentAddon instance
|
||||
@@ -84,7 +82,6 @@ func NewCRDTemplateAgentAddon(
|
||||
addonClient addonv1alpha1client.Interface,
|
||||
addonInformers addoninformers.SharedInformerFactory,
|
||||
rolebindingLister rbacv1lister.RoleBindingLister,
|
||||
recorder events.Recorder,
|
||||
getValuesFuncs ...addonfactory.GetValuesFunc,
|
||||
) *CRDTemplateAgentAddon {
|
||||
|
||||
@@ -101,7 +98,6 @@ func NewCRDTemplateAgentAddon(
|
||||
rolebindingLister: rolebindingLister,
|
||||
addonName: addonName,
|
||||
agentName: fmt.Sprintf("%s-agent", addonName),
|
||||
eventRecorder: recorder,
|
||||
}
|
||||
|
||||
return a
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
@@ -683,7 +682,6 @@ func TestAddonTemplateAgentManifests(t *testing.T) {
|
||||
addonClient,
|
||||
addonInformerFactory,
|
||||
kubeInformers.Rbac().V1().RoleBindings().Lister(),
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
addonfactory.GetAddOnDeploymentConfigValues(
|
||||
utils.NewAddOnDeploymentConfigGetter(addonClient),
|
||||
addonfactory.ToAddOnCustomizedVariableValues,
|
||||
@@ -952,7 +950,6 @@ func TestAgentInstallNamespace(t *testing.T) {
|
||||
addonClient,
|
||||
addonInformerFactory,
|
||||
kubeInformers.Rbac().V1().RoleBindings().Lister(),
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
addonfactory.GetAddOnDeploymentConfigValues(
|
||||
utils.NewAddOnDeploymentConfigGetter(addonClient),
|
||||
addonfactory.ToAddOnCustomizedVariableValues,
|
||||
@@ -1122,7 +1119,6 @@ func TestAgentManifestConfigs(t *testing.T) {
|
||||
addonClient,
|
||||
addonInformerFactory,
|
||||
kubeInformers.Rbac().V1().RoleBindings().Lister(),
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
addonfactory.GetAddOnDeploymentConfigValues(
|
||||
utils.NewAddOnDeploymentConfigGetter(addonClient),
|
||||
addonfactory.ToAddOnCustomizedVariableValues,
|
||||
|
||||
@@ -4,12 +4,15 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourcehelper"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
commonrecorder "open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
)
|
||||
|
||||
// Getter is a wrapper interface of lister
|
||||
@@ -43,6 +46,8 @@ func Apply[T runtime.Object](
|
||||
}
|
||||
gvk := resourcehelper.GuessObjectGroupVersionKind(required)
|
||||
existing, err := getter.Get(requiredAccessor.GetName())
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, recorder)
|
||||
|
||||
if errors.IsNotFound(err) {
|
||||
actual, createErr := client.Create(ctx, required, metav1.CreateOptions{})
|
||||
switch {
|
||||
@@ -56,12 +61,12 @@ func Apply[T runtime.Object](
|
||||
}
|
||||
existing = actual
|
||||
case createErr == nil:
|
||||
recorder.Eventf(
|
||||
recorderWrapper.Eventf(
|
||||
fmt.Sprintf("%sCreated", gvk.Kind),
|
||||
"Created %s because it was missing", resourcehelper.FormatResourceForCLIWithNamespace(actual))
|
||||
return actual, true, nil
|
||||
default:
|
||||
recorder.Warningf(
|
||||
recorderWrapper.Warningf(
|
||||
fmt.Sprintf("%sCreateFailed", gvk.Kind),
|
||||
"Failed to create %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(required), createErr)
|
||||
return actual, true, createErr
|
||||
@@ -76,11 +81,11 @@ func Apply[T runtime.Object](
|
||||
updated, err = client.Update(ctx, updated, metav1.UpdateOptions{})
|
||||
switch {
|
||||
case err != nil:
|
||||
recorder.Warningf(
|
||||
recorderWrapper.Warningf(
|
||||
fmt.Sprintf("%sUpdateFailed", gvk.Kind),
|
||||
"Failed to update %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(required), err)
|
||||
default:
|
||||
recorder.Eventf(
|
||||
recorderWrapper.Eventf(
|
||||
fmt.Sprintf("%sUpdated", gvk.Kind),
|
||||
"Updated %s", resourcehelper.FormatResourceForCLIWithNamespace(updated))
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
|
||||
@@ -12,6 +11,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
rbacv1listers "k8s.io/client-go/listers/rbac/v1"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
)
|
||||
|
||||
type PermissionApplier struct {
|
||||
|
||||
@@ -5,13 +5,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
)
|
||||
|
||||
@@ -406,7 +407,7 @@ subjects:
|
||||
informerFactory.Rbac().V1().ClusterRoles().Lister(),
|
||||
informerFactory.Rbac().V1().ClusterRoleBindings().Lister(),
|
||||
)
|
||||
results := applier.Apply(context.TODO(), eventstesting.NewTestingEventRecorder(t),
|
||||
results := applier.Apply(context.TODO(), events.NewContextualLoggingEventRecorder(t.Name()),
|
||||
func(name string) ([]byte, error) {
|
||||
return []byte(c.manifest), nil
|
||||
}, "test")
|
||||
|
||||
@@ -8,13 +8,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
clocktesting "k8s.io/utils/clock/testing"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
testinghelpers "open-cluster-management.io/ocm/pkg/registration/helpers/testing"
|
||||
"open-cluster-management.io/ocm/pkg/registration/register"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke/registration"
|
||||
@@ -90,7 +91,7 @@ func TestComplete(t *testing.T) {
|
||||
|
||||
err = registration.DumpSecret(
|
||||
kubeClient.CoreV1(), componentNamespace, "hub-kubeconfig-secret",
|
||||
options.HubKubeconfigDir, context.TODO(), eventstesting.NewTestingEventRecorder(t))
|
||||
options.HubKubeconfigDir, context.TODO(), events.NewContextualLoggingEventRecorder(t.Name()))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package queue
|
||||
|
||||
import (
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
)
|
||||
|
||||
func FileterByLabel(key string) func(obj interface{}) bool {
|
||||
|
||||
@@ -4,10 +4,11 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
)
|
||||
|
||||
func TestFileter(t *testing.T) {
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
package recorder
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
eventsv1 "k8s.io/client-go/kubernetes/typed/events/v1"
|
||||
kevents "k8s.io/client-go/tools/events"
|
||||
)
|
||||
|
||||
// NewEventRecorder creates a new event recorder for the given controller, it will also log the events
|
||||
func NewEventRecorder(ctx context.Context, scheme *runtime.Scheme,
|
||||
eventsClient eventsv1.EventsV1Interface, controllerName string) (kevents.EventRecorder, error) {
|
||||
broadcaster := kevents.NewBroadcaster(&kevents.EventSinkImpl{Interface: eventsClient})
|
||||
err := broadcaster.StartRecordingToSinkWithContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
broadcaster.StartStructuredLogging(0)
|
||||
recorder := broadcaster.NewRecorder(scheme, controllerName)
|
||||
return recorder, nil
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package recorder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
fakekube "k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
|
||||
clusterscheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme"
|
||||
workscheme "open-cluster-management.io/api/client/work/clientset/versioned/scheme"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
)
|
||||
|
||||
func TestNewEventRecorder(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scheme *runtime.Scheme
|
||||
wait time.Duration
|
||||
validateActions func(t *testing.T, actions []clienttesting.Action)
|
||||
}{
|
||||
{
|
||||
name: "test new event recorder, scheme not match ",
|
||||
scheme: workscheme.Scheme,
|
||||
wait: 100 * time.Millisecond,
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertNoActions(t, actions)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test new event recorder, scheme match",
|
||||
scheme: clusterscheme.Scheme,
|
||||
wait: 100 * time.Millisecond,
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertActions(t, actions, "create")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test new event recorder, scheme match, no wait",
|
||||
scheme: clusterscheme.Scheme,
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertNoActions(t, actions)
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
kubeClient := fakekube.NewClientset()
|
||||
recorder, err := NewEventRecorder(ctx, tt.scheme, kubeClient.EventsV1(), "test")
|
||||
if err != nil {
|
||||
t.Errorf("NewEventRecorder() error = %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
object := &clusterv1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
}
|
||||
recorder.Eventf(object, nil, corev1.EventTypeNormal, "test", "test", "")
|
||||
time.Sleep(tt.wait)
|
||||
tt.validateActions(t, kubeClient.Actions())
|
||||
})
|
||||
}
|
||||
}
|
||||
61
pkg/common/recorder/event_recorder_wrapper.go
Normal file
61
pkg/common/recorder/event_recorder_wrapper.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package recorder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
librarygoevents "github.com/openshift/library-go/pkg/operator/events"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
)
|
||||
|
||||
// eventsRecorderWrapper wraps events recorder to a library-go recorder
|
||||
type EventsRecorderWrapper struct {
|
||||
recorder events.Recorder
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func NewEventsRecorderWrapper(ctx context.Context, recorder events.Recorder) librarygoevents.Recorder {
|
||||
return &EventsRecorderWrapper{
|
||||
recorder: recorder,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EventsRecorderWrapper) Event(reason, message string) {
|
||||
e.recorder.Event(e.ctx, reason, message)
|
||||
}
|
||||
|
||||
func (e *EventsRecorderWrapper) Shutdown() {}
|
||||
|
||||
func (e *EventsRecorderWrapper) Eventf(reason, messageFmt string, args ...interface{}) {
|
||||
e.recorder.Eventf(e.ctx, reason, messageFmt, args...)
|
||||
}
|
||||
|
||||
func (e *EventsRecorderWrapper) Warning(reason, message string) {
|
||||
e.recorder.Warning(e.ctx, reason, message)
|
||||
}
|
||||
|
||||
func (e *EventsRecorderWrapper) Warningf(reason, messageFmt string, args ...interface{}) {
|
||||
e.recorder.Warningf(e.ctx, reason, messageFmt, args...)
|
||||
}
|
||||
|
||||
func (e *EventsRecorderWrapper) ForComponent(componentName string) librarygoevents.Recorder {
|
||||
newRecorder := *e
|
||||
newRecorder.recorder = e.recorder.ForComponent(componentName)
|
||||
return &newRecorder
|
||||
}
|
||||
|
||||
func (e *EventsRecorderWrapper) WithComponentSuffix(suffix string) librarygoevents.Recorder {
|
||||
return e.ForComponent(fmt.Sprintf("%s-%s", e.ComponentName(), suffix))
|
||||
}
|
||||
|
||||
func (e *EventsRecorderWrapper) WithContext(ctx context.Context) librarygoevents.Recorder {
|
||||
eCopy := *e
|
||||
eCopy.ctx = ctx
|
||||
return &eCopy
|
||||
}
|
||||
|
||||
func (e *EventsRecorderWrapper) ComponentName() string {
|
||||
return e.recorder.ComponentName()
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
package recorder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ContextualLoggingEventRecorder implements a recorder with contextual logging
|
||||
type ContextualLoggingEventRecorder struct {
|
||||
component string
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (r *ContextualLoggingEventRecorder) WithContext(ctx context.Context) events.Recorder {
|
||||
newRecorder := *r
|
||||
newRecorder.ctx = ctx
|
||||
return &newRecorder
|
||||
}
|
||||
|
||||
// NewContextualLoggingEventRecorder provides event recorder that will log all recorded events via klog.
|
||||
func NewContextualLoggingEventRecorder(component string) events.Recorder {
|
||||
return &ContextualLoggingEventRecorder{
|
||||
component: component,
|
||||
ctx: context.Background(),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ContextualLoggingEventRecorder) ComponentName() string {
|
||||
return r.component
|
||||
}
|
||||
|
||||
func (r *ContextualLoggingEventRecorder) ForComponent(component string) events.Recorder {
|
||||
newRecorder := *r
|
||||
newRecorder.component = component
|
||||
return &newRecorder
|
||||
}
|
||||
|
||||
func (r *ContextualLoggingEventRecorder) Shutdown() {}
|
||||
|
||||
func (r *ContextualLoggingEventRecorder) WithComponentSuffix(suffix string) events.Recorder {
|
||||
return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix))
|
||||
}
|
||||
|
||||
func (r *ContextualLoggingEventRecorder) Event(reason, message string) {
|
||||
logger := klog.FromContext(r.ctx)
|
||||
logger.Info(fmt.Sprintf("INFO: %s", message), "component", r.component, "reason", reason)
|
||||
}
|
||||
|
||||
func (r *ContextualLoggingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) {
|
||||
r.Event(reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
|
||||
func (r *ContextualLoggingEventRecorder) Warning(reason, message string) {
|
||||
logger := klog.FromContext(r.ctx)
|
||||
logger.Info(fmt.Sprintf("WARNING: %s", message), "component", r.component, "reason", reason)
|
||||
}
|
||||
|
||||
func (r *ContextualLoggingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) {
|
||||
r.Warning(reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
@@ -3,44 +3,24 @@ package testing
|
||||
import (
|
||||
"testing"
|
||||
|
||||
openshiftevents "github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
sdkevents "open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
)
|
||||
|
||||
type FakeSyncContext struct {
|
||||
spokeName string
|
||||
recorder openshiftevents.Recorder
|
||||
queue workqueue.RateLimitingInterface
|
||||
}
|
||||
|
||||
func (f FakeSyncContext) Queue() workqueue.RateLimitingInterface { return f.queue }
|
||||
func (f FakeSyncContext) QueueKey() string { return f.spokeName }
|
||||
func (f FakeSyncContext) Recorder() openshiftevents.Recorder { return f.recorder }
|
||||
|
||||
func NewFakeSyncContext(t *testing.T, clusterName string) *FakeSyncContext {
|
||||
return &FakeSyncContext{
|
||||
spokeName: clusterName,
|
||||
recorder: eventstesting.NewTestingEventRecorder(t),
|
||||
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||
}
|
||||
}
|
||||
|
||||
type FakeSDKSyncContext struct {
|
||||
spokeName string
|
||||
recorder sdkevents.Recorder
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
func (f FakeSDKSyncContext) Queue() workqueue.TypedRateLimitingInterface[string] { return f.queue }
|
||||
func (f FakeSDKSyncContext) Recorder() sdkevents.Recorder {
|
||||
func (f FakeSyncContext) Queue() workqueue.TypedRateLimitingInterface[string] { return f.queue }
|
||||
func (f FakeSyncContext) Recorder() sdkevents.Recorder {
|
||||
return f.recorder
|
||||
}
|
||||
|
||||
func NewFakeSDKSyncContext(t *testing.T, clusterName string) *FakeSDKSyncContext {
|
||||
return &FakeSDKSyncContext{
|
||||
func NewFakeSyncContext(t *testing.T, clusterName string) *FakeSyncContext {
|
||||
return &FakeSyncContext{
|
||||
spokeName: clusterName,
|
||||
recorder: sdkevents.NewContextualLoggingEventRecorder(t.Name()),
|
||||
queue: workqueue.NewTypedRateLimitingQueue[string](workqueue.DefaultTypedControllerRateLimiter[string]()),
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/openshift/api"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
|
||||
admissionv1 "k8s.io/api/admissionregistration/v1"
|
||||
@@ -42,6 +41,9 @@ import (
|
||||
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
commonrecorder "open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -233,10 +235,11 @@ func ApplyDeployment(
|
||||
deployment.(*appsv1.Deployment).Spec.Template.Spec.NodeSelector = nodePlacement.NodeSelector
|
||||
deployment.(*appsv1.Deployment).Spec.Template.Spec.Tolerations = nodePlacement.Tolerations
|
||||
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, recorder)
|
||||
updatedDeployment, updated, err := resourceapply.ApplyDeployment(
|
||||
ctx,
|
||||
client.AppsV1(),
|
||||
recorder,
|
||||
recorderWrapper,
|
||||
deployment.(*appsv1.Deployment), generationStatus.LastGeneration)
|
||||
if err != nil {
|
||||
return updatedDeployment, generationStatus, fmt.Errorf("%q (%T): %v", file, deployment, err)
|
||||
@@ -282,6 +285,7 @@ func ApplyDirectly(
|
||||
manifests resourceapply.AssetFunc,
|
||||
files ...string) []resourceapply.ApplyResult {
|
||||
var ret []resourceapply.ApplyResult
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, recorder)
|
||||
|
||||
var genericApplyFiles []string
|
||||
for _, file := range files {
|
||||
@@ -321,7 +325,7 @@ func ApplyDirectly(
|
||||
applyResults := resourceapply.ApplyDirectly(
|
||||
ctx,
|
||||
clientHolder,
|
||||
recorder,
|
||||
recorderWrapper,
|
||||
cache,
|
||||
manifests,
|
||||
genericApplyFiles...,
|
||||
@@ -676,7 +680,7 @@ func SyncSecret(ctx context.Context, client, targetClient coreclientv1.SecretsGe
|
||||
return nil, false, nil
|
||||
}
|
||||
if deleteErr == nil {
|
||||
recorder.Eventf("TargetSecretDeleted", "Deleted target secret %s/%s because source config does not exist", targetNamespace, targetName)
|
||||
recorder.Eventf(ctx, "TargetSecretDeleted", "Deleted target secret %s/%s because source config does not exist", targetNamespace, targetName)
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, deleteErr
|
||||
@@ -706,7 +710,8 @@ func SyncSecret(ctx context.Context, client, targetClient coreclientv1.SecretsGe
|
||||
source.ResourceVersion = ""
|
||||
source.OwnerReferences = ownerRefs
|
||||
source.Labels = labels
|
||||
return resourceapply.ApplySecret(ctx, targetClient, recorder, source)
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, recorder)
|
||||
return resourceapply.ApplySecret(ctx, targetClient, recorderWrapper, source)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,13 +7,10 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
admissionv1 "k8s.io/api/admissionregistration/v1"
|
||||
@@ -32,10 +29,10 @@ import (
|
||||
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
|
||||
"k8s.io/component-base/featuregate"
|
||||
fakeapiregistration "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake"
|
||||
clocktesting "k8s.io/utils/clock/testing"
|
||||
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
)
|
||||
@@ -260,7 +257,7 @@ func TestApplyDirectly(t *testing.T) {
|
||||
results = ApplyDirectly(
|
||||
context.TODO(),
|
||||
fakeKubeClient, nil,
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
events.NewContextualLoggingEventRecorder(t.Name()),
|
||||
cache,
|
||||
fakeApplyFunc,
|
||||
c.applyFileNames...,
|
||||
@@ -269,7 +266,7 @@ func TestApplyDirectly(t *testing.T) {
|
||||
results = ApplyDirectly(
|
||||
context.TODO(),
|
||||
fakeKubeClient, fakeExtensionClient,
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
events.NewContextualLoggingEventRecorder(t.Name()),
|
||||
cache,
|
||||
fakeApplyFunc,
|
||||
c.applyFileNames...,
|
||||
@@ -683,7 +680,7 @@ func TestApplyDeployment(t *testing.T) {
|
||||
func(name string) ([]byte, error) {
|
||||
return json.Marshal(newDeploymentUnstructured(c.deploymentName, c.deploymentNamespace))
|
||||
},
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
events.NewContextualLoggingEventRecorder(t.Name()),
|
||||
c.deploymentName,
|
||||
)
|
||||
if err != nil && !c.expectErr {
|
||||
@@ -1480,7 +1477,7 @@ func TestSyncSecret(t *testing.T) {
|
||||
clientTarget := fakekube.NewSimpleClientset()
|
||||
secret, changed, err := SyncSecret(
|
||||
context.TODO(), client.CoreV1(), clientTarget.CoreV1(),
|
||||
events.NewInMemoryRecorder("test", clocktesting.NewFakePassiveClock(time.Now())), tc.sourceNamespace, tc.sourceName,
|
||||
events.NewContextualLoggingEventRecorder(t.Name()), tc.sourceNamespace, tc.sourceName,
|
||||
tc.targetNamespace, tc.targetName, tc.ownerRefs, nil)
|
||||
|
||||
if (err == nil && len(tc.expectedErr) != 0) || (err != nil && err.Error() != tc.expectedErr) {
|
||||
|
||||
@@ -4,13 +4,13 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -14,6 +13,7 @@ import (
|
||||
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
)
|
||||
|
||||
func newSecret(name, namespace string) *corev1.Secret {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
authv1 "k8s.io/api/authentication/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -20,6 +19,10 @@ import (
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
commonrecorder "open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
)
|
||||
|
||||
type TokenGetterFunc func() (token []byte, expiration []byte, additionalData map[string][]byte, err error)
|
||||
@@ -233,7 +236,8 @@ func applyKubeconfigSecret(ctx context.Context, templateKubeconfig *rest.Config,
|
||||
secret.Data[k] = v
|
||||
}
|
||||
|
||||
_, _, err = resourceapply.ApplySecret(ctx, secretClient, recorder, secret)
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, recorder)
|
||||
_, _, err = resourceapply.ApplySecret(ctx, secretClient, recorderWrapper, secret)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
authv1 "k8s.io/api/authentication/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -15,6 +14,8 @@ import (
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/rest"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
)
|
||||
|
||||
func TestTokenGetter(t *testing.T) {
|
||||
@@ -276,7 +277,7 @@ func TestApplyKubeconfigSecret(t *testing.T) {
|
||||
err := SyncKubeConfigSecret(
|
||||
context.TODO(), secretName, secretNamespace,
|
||||
"/tmp/kubeconfig", tkc, client.CoreV1(), tokenGetter,
|
||||
eventstesting.NewTestingEventRecorder(t), nil)
|
||||
events.NewContextualLoggingEventRecorder(t.Name()), nil)
|
||||
if err != nil && !tt.wantErr {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
errorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -20,6 +18,7 @@ import (
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
operatorv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/certrotation"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -51,7 +50,6 @@ type certRotationController struct {
|
||||
kubeClient kubernetes.Interface
|
||||
secretInformers map[string]corev1informers.SecretInformer
|
||||
configMapInformer corev1informers.ConfigMapInformer
|
||||
recorder events.Recorder
|
||||
clusterManagerLister operatorlister.ClusterManagerLister
|
||||
}
|
||||
|
||||
@@ -66,14 +64,12 @@ func NewCertRotationController(
|
||||
secretInformers map[string]corev1informers.SecretInformer,
|
||||
configMapInformer corev1informers.ConfigMapInformer,
|
||||
clusterManagerInformer operatorinformer.ClusterManagerInformer,
|
||||
recorder events.Recorder,
|
||||
) factory.Controller {
|
||||
c := &certRotationController{
|
||||
rotationMap: make(map[string]rotations),
|
||||
kubeClient: kubeClient,
|
||||
secretInformers: secretInformers,
|
||||
configMapInformer: configMapInformer,
|
||||
recorder: recorder,
|
||||
clusterManagerLister: clusterManagerInformer.Lister(),
|
||||
}
|
||||
return factory.New().
|
||||
@@ -86,11 +82,11 @@ func NewCertRotationController(
|
||||
secretInformers[helpers.RegistrationWebhookSecret].Informer(),
|
||||
secretInformers[helpers.WorkWebhookSecret].Informer(),
|
||||
secretInformers[helpers.GRPCServerSecret].Informer()).
|
||||
ToController("CertRotationController", recorder)
|
||||
ToController("CertRotationController")
|
||||
}
|
||||
|
||||
func (c certRotationController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
key := syncCtx.QueueKey()
|
||||
func (c certRotationController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("key", key)
|
||||
switch {
|
||||
case key == "":
|
||||
return nil
|
||||
@@ -103,7 +99,7 @@ func (c certRotationController) sync(ctx context.Context, syncCtx factory.SyncCo
|
||||
|
||||
// do nothing if there is no cluster manager
|
||||
if len(clustermanagers) == 0 {
|
||||
klog.V(4).Infof("No ClusterManager found")
|
||||
logger.V(4).Info("No ClusterManager found")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -120,7 +116,8 @@ func (c certRotationController) sync(ctx context.Context, syncCtx factory.SyncCo
|
||||
clustermanager, err := c.clusterManagerLister.Get(clustermanagerName)
|
||||
// ClusterManager not found, could have been deleted, do nothing.
|
||||
if errors.IsNotFound(err) {
|
||||
klog.V(4).Infof("ClusterManager %q not found; it may have been deleted", clustermanagerName)
|
||||
logger.V(4).Info("ClusterManager not found; it may have been deleted",
|
||||
"clustermanager", clustermanagerName)
|
||||
return nil
|
||||
}
|
||||
err = c.syncOne(ctx, clustermanager)
|
||||
@@ -135,9 +132,11 @@ func (c certRotationController) syncOne(ctx context.Context, clustermanager *ope
|
||||
clustermanagerName := clustermanager.Name
|
||||
clustermanagerNamespace := helpers.ClusterManagerNamespace(clustermanager.Name, clustermanager.Spec.DeployOption.Mode)
|
||||
|
||||
logger := klog.FromContext(ctx).WithValues("clustermanager", clustermanagerName)
|
||||
|
||||
var err error
|
||||
|
||||
klog.Infof("Reconciling ClusterManager %q", clustermanagerName)
|
||||
logger.Info("Reconciling ClusterManager")
|
||||
// if the cluster manager is deleting, delete the rotation in map as well.
|
||||
if !clustermanager.DeletionTimestamp.IsZero() {
|
||||
// clean up all resources related with this clustermanager
|
||||
@@ -253,7 +252,7 @@ func (c certRotationController) syncOne(ctx context.Context, clustermanager *ope
|
||||
if !slices.Equal(targetRotation.HostNames, hostNames) {
|
||||
targetRotation.HostNames = hostNames
|
||||
cmRotations.targetRotations[helpers.GRPCServerSecret] = targetRotation
|
||||
klog.Warningf("the hosts of grpc server are changed, will update the grpc serving cert")
|
||||
logger.Info("the hosts of grpc server are changed, will update the grpc serving cert")
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
fakeoperatorclient "open-cluster-management.io/api/client/operator/clientset/versioned/fake"
|
||||
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/certrotation"
|
||||
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
@@ -194,11 +194,10 @@ func TestCertRotation(t *testing.T) {
|
||||
}
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, c.queueKey)
|
||||
recorder := syncContext.Recorder()
|
||||
|
||||
controller := NewCertRotationController(kubeClient, secretInformers, configmapInformer, operatorInformers.Operator().V1().ClusterManagers(), recorder)
|
||||
controller := NewCertRotationController(kubeClient, secretInformers, configmapInformer, operatorInformers.Operator().V1().ClusterManagers())
|
||||
|
||||
err := controller.Sync(context.TODO(), syncContext)
|
||||
err := controller.Sync(context.TODO(), syncContext, c.queueKey)
|
||||
c.validate(t, kubeClient, err)
|
||||
})
|
||||
}
|
||||
@@ -332,7 +331,6 @@ func TestCertRotationGRPCAuth(t *testing.T) {
|
||||
}
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, testClusterManagerNameDefault)
|
||||
recorder := syncContext.Recorder()
|
||||
|
||||
// Create the controller to check the rotation map
|
||||
controller := &certRotationController{
|
||||
@@ -340,12 +338,11 @@ func TestCertRotationGRPCAuth(t *testing.T) {
|
||||
kubeClient: kubeClient,
|
||||
secretInformers: secretInformers,
|
||||
configMapInformer: configmapInformer,
|
||||
recorder: recorder,
|
||||
clusterManagerLister: operatorInformers.Operator().V1().ClusterManagers().Lister(),
|
||||
}
|
||||
|
||||
// First sync with initial configuration
|
||||
if err := controller.sync(context.TODO(), syncContext); err != nil {
|
||||
if err := controller.sync(context.TODO(), syncContext, testClusterManagerNameDefault); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -353,7 +350,7 @@ func TestCertRotationGRPCAuth(t *testing.T) {
|
||||
if err := clusterManagerStore.Update(c.updatedClusterManager); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := controller.sync(context.TODO(), syncContext); err != nil {
|
||||
if err := controller.sync(context.TODO(), syncContext, testClusterManagerNameDefault); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -689,19 +686,17 @@ func TestCertRotationGRPCServerHostNames(t *testing.T) {
|
||||
}
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, testClusterManagerNameDefault)
|
||||
recorder := syncContext.Recorder()
|
||||
|
||||
controller := &certRotationController{
|
||||
rotationMap: make(map[string]rotations),
|
||||
kubeClient: kubeClient,
|
||||
secretInformers: secretInformers,
|
||||
configMapInformer: configmapInformer,
|
||||
recorder: recorder,
|
||||
clusterManagerLister: operatorInformers.Operator().V1().ClusterManagers().Lister(),
|
||||
}
|
||||
|
||||
// Sync the controller
|
||||
err := controller.sync(context.TODO(), syncContext)
|
||||
err := controller.sync(context.TODO(), syncContext, testClusterManagerNameDefault)
|
||||
|
||||
// Check if we expect an error
|
||||
if c.expectedErrorSubstr != "" {
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -30,6 +28,8 @@ import (
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
@@ -51,7 +51,6 @@ type clusterManagerController struct {
|
||||
operatorKubeClient kubernetes.Interface
|
||||
operatorKubeconfig *rest.Config
|
||||
configMapLister corev1listers.ConfigMapLister
|
||||
recorder events.Recorder
|
||||
cache resourceapply.ResourceCache
|
||||
// For testcases which don't need these functions, we could set fake funcs
|
||||
ensureSAKubeconfigs func(ctx context.Context, clusterManagerName, clusterManagerNamespace string,
|
||||
@@ -86,7 +85,6 @@ func NewClusterManagerController(
|
||||
clusterManagerInformer operatorinformer.ClusterManagerInformer,
|
||||
deploymentInformer appsinformer.DeploymentInformer,
|
||||
configMapInformer corev1informers.ConfigMapInformer,
|
||||
recorder events.Recorder,
|
||||
skipRemoveCRDs bool,
|
||||
controlPlaneNodeLabelSelector string,
|
||||
deploymentReplicas int32,
|
||||
@@ -101,7 +99,6 @@ func NewClusterManagerController(
|
||||
clusterManagerClient),
|
||||
clusterManagerLister: clusterManagerInformer.Lister(),
|
||||
configMapLister: configMapInformer.Lister(),
|
||||
recorder: recorder,
|
||||
generateHubClusterClients: generateHubClients,
|
||||
ensureSAKubeconfigs: ensureSAKubeconfigs,
|
||||
cache: resourceapply.NewResourceCache(),
|
||||
@@ -120,12 +117,12 @@ func NewClusterManagerController(
|
||||
queue.FilterByNames(helpers.CaBundleConfigmap),
|
||||
configMapInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterManagerInformer.Informer()).
|
||||
ToController("ClusterManagerController", recorder)
|
||||
ToController("ClusterManagerController")
|
||||
}
|
||||
|
||||
func (n *clusterManagerController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
clusterManagerName := controllerContext.QueueKey()
|
||||
klog.V(4).Infof("Reconciling ClusterManager %q", clusterManagerName)
|
||||
func (n *clusterManagerController) sync(ctx context.Context, controllerContext factory.SyncContext, clusterManagerName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("clusterManagmer", clusterManagerName)
|
||||
logger.V(4).Info("Reconciling ClusterManager")
|
||||
|
||||
originalClusterManager, err := n.clusterManagerLister.Get(clusterManagerName)
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -141,7 +138,7 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
|
||||
|
||||
resourceRequirements, err := helpers.ResourceRequirements(clusterManager)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to parse resource requirements for cluster manager %s: %v", clusterManager.Name, err)
|
||||
logger.Error(err, "failed to parse resource requirements for cluster manager")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -259,14 +256,14 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
|
||||
|
||||
var errs []error
|
||||
reconcilers := []clusterManagerReconcile{
|
||||
&crdReconcile{cache: n.cache, recorder: n.recorder, hubAPIExtensionClient: hubApiExtensionClient,
|
||||
&crdReconcile{cache: n.cache, recorder: controllerContext.Recorder(), hubAPIExtensionClient: hubApiExtensionClient,
|
||||
hubMigrationClient: hubMigrationClient, skipRemoveCRDs: n.skipRemoveCRDs},
|
||||
&secretReconcile{cache: n.cache, recorder: n.recorder, operatorKubeClient: n.operatorKubeClient,
|
||||
&secretReconcile{cache: n.cache, recorder: controllerContext.Recorder(), operatorKubeClient: n.operatorKubeClient,
|
||||
hubKubeClient: hubClient, operatorNamespace: n.operatorNamespace, enableSyncLabels: n.enableSyncLabels},
|
||||
&hubReconcile{cache: n.cache, recorder: n.recorder, hubKubeClient: hubClient},
|
||||
&runtimeReconcile{cache: n.cache, recorder: n.recorder, hubKubeConfig: hubKubeConfig, hubKubeClient: hubClient,
|
||||
&hubReconcile{cache: n.cache, recorder: controllerContext.Recorder(), hubKubeClient: hubClient},
|
||||
&runtimeReconcile{cache: n.cache, recorder: controllerContext.Recorder(), hubKubeConfig: hubKubeConfig, hubKubeClient: hubClient,
|
||||
kubeClient: managementClient, ensureSAKubeconfigs: n.ensureSAKubeconfigs},
|
||||
&webhookReconcile{cache: n.cache, recorder: n.recorder, hubKubeClient: hubClient, kubeClient: managementClient},
|
||||
&webhookReconcile{cache: n.cache, recorder: controllerContext.Recorder(), hubKubeClient: hubClient, kubeClient: managementClient},
|
||||
}
|
||||
|
||||
// If the ClusterManager is deleting, we remove its related resources on hub
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -33,6 +31,7 @@ import (
|
||||
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
@@ -288,7 +287,6 @@ func setup(t *testing.T, tc *testController, cd []runtime.Object, crds ...runtim
|
||||
tc.managementKubeClient = fakeManagementKubeClient
|
||||
|
||||
// set clients in clustermanager controller
|
||||
tc.clusterManagerController.recorder = eventstesting.NewTestingEventRecorder(t)
|
||||
tc.clusterManagerController.operatorKubeClient = fakeManagementKubeClient
|
||||
tc.clusterManagerController.generateHubClusterClients = func(hubKubeConfig *rest.Config) (
|
||||
kubernetes.Interface, apiextensionsclient.Interface, migrationclient.StorageVersionMigrationsGetter, error) {
|
||||
@@ -341,7 +339,7 @@ func assertDeployments(t *testing.T, clusterManager *operatorapiv1.ClusterManage
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "testhub")
|
||||
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext)
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext, "testhub")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when sync, %v", err)
|
||||
}
|
||||
@@ -381,7 +379,7 @@ func assertDeletion(t *testing.T, clusterManager *operatorapiv1.ClusterManager,
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "testhub")
|
||||
clusterManagerNamespace := helpers.ClusterManagerNamespace(clusterManager.Name, clusterManager.Spec.DeployOption.Mode)
|
||||
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext)
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext, "testhub")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -494,7 +492,7 @@ func TestSyncSecret(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext)
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext, "testhub")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when sync, %v", err)
|
||||
}
|
||||
@@ -575,7 +573,7 @@ func TestSyncDeployNoWebhook(t *testing.T) {
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "testhub")
|
||||
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext)
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext, "testhub")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when sync, %v", err)
|
||||
}
|
||||
@@ -660,12 +658,12 @@ func TestDeleteCRD(t *testing.T) {
|
||||
|
||||
})
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "testhub")
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext)
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext, "testhub")
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error when sync at first time")
|
||||
}
|
||||
|
||||
err = tc.clusterManagerController.sync(ctx, syncContext)
|
||||
err = tc.clusterManagerController.sync(ctx, syncContext, "testhub")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when sync at second time: %v", err)
|
||||
}
|
||||
@@ -917,7 +915,7 @@ func TestGRPCServiceLoadBalancerType(t *testing.T) {
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, test.clusterManager.Name)
|
||||
|
||||
// Call sync to create resources
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext)
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext, "testhub")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when sync, %v", err)
|
||||
}
|
||||
@@ -1076,7 +1074,7 @@ func TestWorkControllerEnabledByFeatureGates(t *testing.T) {
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "test-cluster-manager")
|
||||
|
||||
// Call sync to trigger the feature gate processing
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext)
|
||||
err := tc.clusterManagerController.sync(ctx, syncContext, "test-cluster-manager")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when sync, %v", err)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
@@ -18,6 +17,7 @@ import (
|
||||
migrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
@@ -124,7 +124,7 @@ func (c *crdReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterManag
|
||||
if err := crdManager.CleanOne(ctx, name, c.skipRemoveCRDs); err != nil {
|
||||
return cm, reconcileStop, err
|
||||
}
|
||||
c.recorder.Eventf("CRDDeleted", "crd %s is deleted", name)
|
||||
c.recorder.Eventf(ctx, "CRDDeleted", "crd %s is deleted", name)
|
||||
}
|
||||
if c.skipRemoveCRDs {
|
||||
return cm, reconcileContinue, nil
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
commonhelpers "open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"context"
|
||||
"reflect"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -21,6 +19,7 @@ import (
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
@@ -46,8 +45,7 @@ type crdStatusController struct {
|
||||
func NewCRDStatusController(
|
||||
kubeconfig *rest.Config,
|
||||
kubeClient kubernetes.Interface,
|
||||
clusterManagerInformer operatorinformer.ClusterManagerInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
clusterManagerInformer operatorinformer.ClusterManagerInformer) factory.Controller {
|
||||
controller := &crdStatusController{
|
||||
kubeconfig: kubeconfig,
|
||||
kubeClient: kubeClient,
|
||||
@@ -57,12 +55,12 @@ func NewCRDStatusController(
|
||||
|
||||
return factory.New().WithSync(controller.sync).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterManagerInformer.Informer()).
|
||||
ToController("CRDStatusController", recorder)
|
||||
ToController("CRDStatusController")
|
||||
}
|
||||
|
||||
func (c *crdStatusController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
clusterManagerName := controllerContext.QueueKey()
|
||||
klog.V(4).Infof("Reconciling ClusterManager %q", clusterManagerName)
|
||||
func (c *crdStatusController) sync(ctx context.Context, controllerContext factory.SyncContext, clusterManagerName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("clusterManager", clusterManagerName)
|
||||
logger.V(4).Info("Reconciling ClusterManager")
|
||||
|
||||
clusterManager, err := c.clusterManagerLister.Get(clusterManagerName)
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -80,7 +78,7 @@ func (c *crdStatusController) sync(ctx context.Context, controllerContext factor
|
||||
// need to wait storage version migrations succeed.
|
||||
if succeeded := meta.IsStatusConditionTrue(clusterManager.Status.Conditions, operatorapiv1.ConditionMigrationSucceeded); !succeeded {
|
||||
controllerContext.Queue().AddRateLimited(clusterManagerName)
|
||||
klog.V(4).Info("Wait storage version migration succeed.")
|
||||
logger.V(4).Info("Wait storage version migration succeed.")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestSync(t *testing.T) {
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "testhub")
|
||||
// Do not support migration
|
||||
err := tc.sync(context.Background(), syncContext)
|
||||
err := tc.sync(context.Background(), syncContext, "testhub")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when sync, %v", err)
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func TestSync(t *testing.T) {
|
||||
}
|
||||
crds := newCrds()
|
||||
tc = newTestController(t, clusterManager, crds...)
|
||||
err = tc.sync(context.Background(), syncContext)
|
||||
err = tc.sync(context.Background(), syncContext, "testhub")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when sync, %v", err)
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourcehelper"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
|
||||
@@ -29,6 +27,8 @@ import (
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
@@ -60,7 +60,6 @@ type crdMigrationController struct {
|
||||
kubeClient kubernetes.Interface
|
||||
patcher patcher.Patcher[*operatorapiv1.ClusterManager, operatorapiv1.ClusterManagerSpec, operatorapiv1.ClusterManagerStatus]
|
||||
clusterManagerLister operatorlister.ClusterManagerLister
|
||||
recorder events.Recorder
|
||||
generateHubClusterClients func(hubConfig *rest.Config) (apiextensionsclient.Interface, migrationv1alpha1client.StorageVersionMigrationsGetter, error)
|
||||
parseMigrations func() ([]*migrationv1alpha1.StorageVersionMigration, error)
|
||||
}
|
||||
@@ -70,8 +69,7 @@ func NewCRDMigrationController(
|
||||
kubeconfig *rest.Config,
|
||||
kubeClient kubernetes.Interface,
|
||||
clusterManagerClient operatorv1client.ClusterManagerInterface,
|
||||
clusterManagerInformer operatorinformer.ClusterManagerInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
clusterManagerInformer operatorinformer.ClusterManagerInformer) factory.Controller {
|
||||
controller := &crdMigrationController{
|
||||
kubeconfig: kubeconfig,
|
||||
kubeClient: kubeClient,
|
||||
@@ -80,18 +78,18 @@ func NewCRDMigrationController(
|
||||
clusterManagerClient),
|
||||
clusterManagerLister: clusterManagerInformer.Lister(),
|
||||
parseMigrations: parseStorageVersionMigrationFiles,
|
||||
recorder: recorder,
|
||||
generateHubClusterClients: generateHubClients,
|
||||
}
|
||||
|
||||
return factory.New().WithSync(controller.sync).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterManagerInformer.Informer()).
|
||||
ToController("CRDMigrationController", recorder)
|
||||
ToController("CRDMigrationController")
|
||||
}
|
||||
|
||||
func (c *crdMigrationController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
clusterManagerName := controllerContext.QueueKey()
|
||||
klog.V(4).Infof("Reconciling ClusterManager %q", clusterManagerName)
|
||||
func (c *crdMigrationController) sync(ctx context.Context, controllerContext factory.SyncContext, clusterManagerName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("clusterManager", clusterManagerName)
|
||||
logger.V(4).Info("Reconciling ClusterManager")
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
// if no migration files exist, do nothing and exit the reconcile
|
||||
migrations, err := c.parseMigrations()
|
||||
@@ -152,23 +150,23 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac
|
||||
|
||||
_, err = c.patcher.PatchStatus(ctx, newClusterManager, newClusterManager.Status, clusterManager.Status)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update ClusterManager status. %v", err)
|
||||
logger.Error(err, "Failed to update ClusterManager status")
|
||||
controllerContext.Queue().AddRateLimited(clusterManagerName)
|
||||
return
|
||||
}
|
||||
|
||||
// If migration not succeed, wait for all StorageVersionMigrations succeed.
|
||||
if migrationCond.Status != metav1.ConditionTrue {
|
||||
klog.V(4).Infof("Wait all StorageVersionMigrations succeed. migrationCond: %v. error: %v", migrationCond, err)
|
||||
logger.V(4).Info("Wait all StorageVersionMigrations succeed", "migrationCond", migrationCond, "error", err)
|
||||
controllerContext.Queue().AddRateLimited(clusterManagerName)
|
||||
}
|
||||
}()
|
||||
|
||||
err = checkCRDStorageVersion(ctx, migrations, apiExtensionClient)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check CRD current storage version. %v", err)
|
||||
logger.Error(err, "Failed to check CRD current storage version")
|
||||
controllerContext.Queue().AddRateLimited(clusterManagerName)
|
||||
c.recorder.Warningf("StorageVersionMigrationFailed", "Failed to check CRD current storage version. %v", err)
|
||||
controllerContext.Recorder().Warningf(ctx, "StorageVersionMigrationFailed", "Failed to check CRD current storage version. %v", err)
|
||||
|
||||
migrationCond = metav1.Condition{
|
||||
Type: operatorapiv1.ConditionMigrationSucceeded,
|
||||
@@ -179,9 +177,9 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac
|
||||
return nil
|
||||
}
|
||||
|
||||
err = createStorageVersionMigrations(ctx, migrations, newClusterManagerOwner(clusterManager), migrationClient, c.recorder)
|
||||
err = createStorageVersionMigrations(ctx, migrations, newClusterManagerOwner(clusterManager), migrationClient, controllerContext.Recorder())
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to apply StorageVersionMigrations. %v", err)
|
||||
logger.Error(err, "Failed to apply StorageVersionMigrations")
|
||||
|
||||
migrationCond = metav1.Condition{
|
||||
Type: operatorapiv1.ConditionMigrationSucceeded,
|
||||
@@ -194,7 +192,7 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac
|
||||
|
||||
migrationCond, err = syncStorageVersionMigrationsCondition(ctx, migrations, migrationClient)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to sync StorageVersionMigrations condition. %v", err)
|
||||
logger.Error(err, "Failed to sync StorageVersionMigrations condition")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -397,11 +395,11 @@ func createStorageVersionMigration(
|
||||
migration.ObjectMeta.OwnerReferences = append(migration.ObjectMeta.OwnerReferences, ownerRefs)
|
||||
actual, err := client.StorageVersionMigrations().Create(context.TODO(), migration, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
recorder.Warningf("StorageVersionMigrationCreateFailed", "Failed to create %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(migration), err)
|
||||
recorder.Warningf(ctx, "StorageVersionMigrationCreateFailed", "Failed to create %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(migration), err)
|
||||
return err
|
||||
}
|
||||
|
||||
recorder.Eventf("StorageVersionMigrationCreated", "Created %s because it was missing", resourcehelper.FormatResourceForCLIWithNamespace(actual))
|
||||
recorder.Eventf(ctx, "StorageVersionMigrationCreated", "Created %s because it was missing", resourcehelper.FormatResourceForCLIWithNamespace(actual))
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
@@ -23,6 +22,7 @@ import (
|
||||
fakeoperatorlient "open-cluster-management.io/api/client/operator/clientset/versioned/fake"
|
||||
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
@@ -230,7 +230,7 @@ func TestCreateStorageVersionMigrations(t *testing.T) {
|
||||
UID: "testhub-uid",
|
||||
},
|
||||
}), fakeMigrationClient.MigrationV1alpha1(),
|
||||
eventstesting.NewTestingEventRecorder(t))
|
||||
events.NewContextualLoggingEventRecorder(t.Name()))
|
||||
if c.expectErr && err != nil {
|
||||
return
|
||||
}
|
||||
@@ -465,7 +465,7 @@ func TestSync(t *testing.T) {
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "testhub")
|
||||
// Do not support migration
|
||||
err := tc.sync(context.Background(), syncContext)
|
||||
err := tc.sync(context.Background(), syncContext, "testhub")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when sync, %v", err)
|
||||
}
|
||||
@@ -490,7 +490,7 @@ func TestSync(t *testing.T) {
|
||||
newFakeCRD(migrationRequestCRDName, "v1", "v1"),
|
||||
newFakeCRD("foos.cluster.open-cluster-management.io", "v1beta2", "v1beta1", "v1beta2"),
|
||||
newFakeCRD("bars.cluster.open-cluster-management.io", "v1beta2", "v1beta1", "v1beta2"))
|
||||
err = tc.sync(context.Background(), syncContext)
|
||||
err = tc.sync(context.Background(), syncContext, "testhub")
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error when sync, %v", err)
|
||||
}
|
||||
@@ -514,7 +514,6 @@ func newTestController(
|
||||
|
||||
crdMigrationController := &crdMigrationController{
|
||||
clusterManagerLister: operatorInformers.Operator().V1().ClusterManagers().Lister(),
|
||||
recorder: eventstesting.NewTestingEventRecorder(t),
|
||||
patcher: patcher.NewPatcher[
|
||||
*operatorapiv1.ClusterManager, operatorapiv1.ClusterManagerSpec, operatorapiv1.ClusterManagerStatus](
|
||||
fakeOperatorClient.OperatorV1().ClusterManagers()),
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -17,6 +15,7 @@ import (
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -33,8 +32,7 @@ type clusterManagerStatusController struct {
|
||||
func NewClusterManagerStatusController(
|
||||
clusterManagerClient operatorv1client.ClusterManagerInterface,
|
||||
clusterManagerInformer operatorinformer.ClusterManagerInformer,
|
||||
deploymentInformer appsinformer.DeploymentInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
deploymentInformer appsinformer.DeploymentInformer) factory.Controller {
|
||||
controller := &clusterManagerStatusController{
|
||||
deploymentLister: deploymentInformer.Lister(),
|
||||
clusterManagerLister: clusterManagerInformer.Lister(),
|
||||
@@ -47,16 +45,16 @@ func NewClusterManagerStatusController(
|
||||
WithInformersQueueKeysFunc(
|
||||
helpers.ClusterManagerDeploymentQueueKeyFunc(controller.clusterManagerLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterManagerInformer.Informer()).
|
||||
ToController("ClusterManagerStatusController", recorder)
|
||||
ToController("ClusterManagerStatusController")
|
||||
}
|
||||
|
||||
func (s *clusterManagerStatusController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
clusterManagerName := controllerContext.QueueKey()
|
||||
func (s *clusterManagerStatusController) sync(ctx context.Context, controllerContext factory.SyncContext, clusterManagerName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("clusterManager", clusterManagerName)
|
||||
if clusterManagerName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.Infof("Reconciling ClusterManager %q", clusterManagerName)
|
||||
logger.V(4).Info("Reconciling ClusterManager")
|
||||
|
||||
clusterManager, err := s.clusterManagerLister.Get(clusterManagerName)
|
||||
// ClusterManager not found, could have been deleted, do nothing.
|
||||
|
||||
@@ -231,7 +231,7 @@ func TestSyncStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, c.queueKey)
|
||||
err := controller.sync(context.TODO(), syncContext)
|
||||
err := controller.sync(context.TODO(), syncContext, c.queueKey)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when update status: %v", err)
|
||||
}
|
||||
|
||||
@@ -84,7 +84,6 @@ func (o *Options) RunClusterManagerOperator(ctx context.Context, controllerConte
|
||||
operatorInformer.Operator().V1().ClusterManagers(),
|
||||
deploymentInformer.Apps().V1().Deployments(),
|
||||
configmapInformer.Core().V1().ConfigMaps(),
|
||||
controllerContext.EventRecorder,
|
||||
o.SkipRemoveCRDs,
|
||||
o.ControlPlaneNodeLabelSelector,
|
||||
o.DeploymentReplicas,
|
||||
@@ -95,28 +94,24 @@ func (o *Options) RunClusterManagerOperator(ctx context.Context, controllerConte
|
||||
statusController := clustermanagerstatuscontroller.NewClusterManagerStatusController(
|
||||
operatorClient.OperatorV1().ClusterManagers(),
|
||||
operatorInformer.Operator().V1().ClusterManagers(),
|
||||
deploymentInformer.Apps().V1().Deployments(),
|
||||
controllerContext.EventRecorder)
|
||||
deploymentInformer.Apps().V1().Deployments())
|
||||
|
||||
certRotationController := certrotationcontroller.NewCertRotationController(
|
||||
kubeClient,
|
||||
secretInformers,
|
||||
configmapInformer.Core().V1().ConfigMaps(),
|
||||
operatorInformer.Operator().V1().ClusterManagers(),
|
||||
controllerContext.EventRecorder)
|
||||
operatorInformer.Operator().V1().ClusterManagers())
|
||||
|
||||
crdMigrationController := migrationcontroller.NewCRDMigrationController(
|
||||
controllerContext.KubeConfig,
|
||||
kubeClient,
|
||||
operatorClient.OperatorV1().ClusterManagers(),
|
||||
operatorInformer.Operator().V1().ClusterManagers(),
|
||||
controllerContext.EventRecorder)
|
||||
operatorInformer.Operator().V1().ClusterManagers())
|
||||
|
||||
crdStatusController := crdstatuccontroller.NewCRDStatusController(
|
||||
controllerContext.KubeConfig,
|
||||
kubeClient,
|
||||
operatorInformer.Operator().V1().ClusterManagers(),
|
||||
controllerContext.EventRecorder)
|
||||
operatorInformer.Operator().V1().ClusterManagers())
|
||||
|
||||
go operatorInformer.Start(ctx.Done())
|
||||
go deploymentInformer.Start(ctx.Done())
|
||||
|
||||
@@ -3,13 +3,13 @@ package addonsecretcontroller
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
coreinformer "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
@@ -27,28 +27,25 @@ type addonPullImageSecretController struct {
|
||||
operatorNamespace string
|
||||
namespaceInformer coreinformer.NamespaceInformer
|
||||
kubeClient kubernetes.Interface
|
||||
recorder events.Recorder
|
||||
}
|
||||
|
||||
func NewAddonPullImageSecretController(kubeClient kubernetes.Interface, operatorNamespace string,
|
||||
namespaceInformer coreinformer.NamespaceInformer, recorder events.Recorder) factory.Controller {
|
||||
namespaceInformer coreinformer.NamespaceInformer) factory.Controller {
|
||||
ac := &addonPullImageSecretController{
|
||||
operatorNamespace: operatorNamespace,
|
||||
namespaceInformer: namespaceInformer,
|
||||
kubeClient: kubeClient,
|
||||
recorder: recorder,
|
||||
}
|
||||
return factory.New().WithFilteredEventsInformersQueueKeysFunc(
|
||||
queue.QueueKeyByMetaName,
|
||||
queue.FileterByLabelKeyValue(addonInstallNamespaceLabelKey, "true"),
|
||||
namespaceInformer.Informer()).WithSync(ac.sync).ToController("AddonPullImageSecretController", recorder)
|
||||
namespaceInformer.Informer()).WithSync(ac.sync).ToController("AddonPullImageSecretController")
|
||||
}
|
||||
|
||||
func (c *addonPullImageSecretController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
func (c *addonPullImageSecretController) sync(ctx context.Context, syncCtx factory.SyncContext, namespace string) error {
|
||||
var err error
|
||||
|
||||
// Sync secret if namespace is created
|
||||
namespace := controllerContext.QueueKey()
|
||||
if namespace == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -73,7 +70,7 @@ func (c *addonPullImageSecretController) sync(ctx context.Context, controllerCon
|
||||
ctx,
|
||||
c.kubeClient.CoreV1(),
|
||||
c.kubeClient.CoreV1(),
|
||||
c.recorder,
|
||||
syncCtx.Recorder(),
|
||||
c.operatorNamespace,
|
||||
helpers.ImagePullSecret,
|
||||
namespace,
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -84,7 +83,6 @@ func TestSync(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
recorder := eventstesting.NewTestingEventRecorder(t)
|
||||
objs := append(tc.objects, tc.namespaces...) //nolint:gocritic
|
||||
kubeClient := kubefake.NewSimpleClientset(objs...)
|
||||
kubeInformer := informers.NewSharedInformerFactory(kubeClient, 5*time.Minute)
|
||||
@@ -96,11 +94,10 @@ func TestSync(t *testing.T) {
|
||||
controller := &addonPullImageSecretController{
|
||||
operatorNamespace: "open-cluster-management",
|
||||
kubeClient: kubeClient,
|
||||
recorder: recorder,
|
||||
namespaceInformer: kubeInformer.Core().V1().Namespaces(),
|
||||
}
|
||||
|
||||
err := controller.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, tc.queueKey))
|
||||
err := controller.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, tc.queueKey), tc.queueKey)
|
||||
if err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", tc.name, err)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ package klusterletcontroller
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
@@ -18,14 +17,16 @@ import (
|
||||
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
|
||||
workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
commonrecorder "open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
type managedClusterClientsBuilderInterface interface {
|
||||
withMode(mode operatorapiv1.InstallMode) managedClusterClientsBuilderInterface
|
||||
withKubeConfigSecret(namespace, name string) managedClusterClientsBuilderInterface
|
||||
build(ctx context.Context) (*managedClusterClients, error)
|
||||
build(ctx context.Context, syncCtx factory.SyncContext) (*managedClusterClients, error)
|
||||
}
|
||||
|
||||
// managedClusterClients holds variety of kube client for managed cluster
|
||||
@@ -43,7 +44,6 @@ type managedClusterClientsBuilder struct {
|
||||
kubeClient kubernetes.Interface
|
||||
apiExtensionClient apiextensionsclient.Interface
|
||||
appliedManifestWorkClient workv1client.AppliedManifestWorkInterface
|
||||
recorder events.Recorder
|
||||
|
||||
mode operatorapiv1.InstallMode
|
||||
secretNamespace string
|
||||
@@ -54,13 +54,11 @@ func newManagedClusterClientsBuilder(
|
||||
kubeClient kubernetes.Interface,
|
||||
apiExtensionClient apiextensionsclient.Interface,
|
||||
appliedManifestWorkClient workv1client.AppliedManifestWorkInterface,
|
||||
recorder events.Recorder,
|
||||
) *managedClusterClientsBuilder {
|
||||
return &managedClusterClientsBuilder{
|
||||
kubeClient: kubeClient,
|
||||
apiExtensionClient: apiExtensionClient,
|
||||
appliedManifestWorkClient: appliedManifestWorkClient,
|
||||
recorder: recorder,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +73,7 @@ func (m *managedClusterClientsBuilder) withKubeConfigSecret(namespace, name stri
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *managedClusterClientsBuilder) build(ctx context.Context) (*managedClusterClients, error) {
|
||||
func (m *managedClusterClientsBuilder) build(ctx context.Context, syncCtx factory.SyncContext) (*managedClusterClients, error) {
|
||||
if !helpers.IsHosted(m.mode) {
|
||||
return &managedClusterClients{
|
||||
kubeClient: m.kubeClient,
|
||||
@@ -87,7 +85,8 @@ func (m *managedClusterClientsBuilder) build(ctx context.Context) (*managedClust
|
||||
// Ensure the agent namespace for users to create the external-managed-kubeconfig secret in this
|
||||
// namespace, so that in the next reconcile loop the controller can get the secret successfully after
|
||||
// the secret was created.
|
||||
_, _, err := resourceapply.ApplyNamespace(ctx, m.kubeClient.CoreV1(), m.recorder, &corev1.Namespace{
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, syncCtx.Recorder())
|
||||
_, _, err := resourceapply.ApplyNamespace(ctx, m.kubeClient.CoreV1(), recorderWrapper, &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: m.secretNamespace,
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -24,6 +22,7 @@ import (
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
@@ -57,8 +56,7 @@ func NewKlusterletCleanupController(
|
||||
operatorNamespace string,
|
||||
controlPlaneNodeLabelSelector string,
|
||||
deploymentReplicas int32,
|
||||
disableAddonNamespace bool,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
disableAddonNamespace bool) factory.Controller {
|
||||
controller := &klusterletCleanupController{
|
||||
kubeClient: kubeClient,
|
||||
patcher: patcher.NewPatcher[
|
||||
@@ -67,7 +65,7 @@ func NewKlusterletCleanupController(
|
||||
klusterletLister: klusterletInformer.Lister(),
|
||||
kubeVersion: kubeVersion,
|
||||
operatorNamespace: operatorNamespace,
|
||||
managedClusterClientsBuilder: newManagedClusterClientsBuilder(kubeClient, apiExtensionClient, appliedManifestWorkClient, recorder),
|
||||
managedClusterClientsBuilder: newManagedClusterClientsBuilder(kubeClient, apiExtensionClient, appliedManifestWorkClient),
|
||||
controlPlaneNodeLabelSelector: controlPlaneNodeLabelSelector,
|
||||
deploymentReplicas: deploymentReplicas,
|
||||
disableAddonNamespace: disableAddonNamespace,
|
||||
@@ -80,12 +78,12 @@ func NewKlusterletCleanupController(
|
||||
secretInformers[helpers.ExternalManagedKubeConfig].Informer()).
|
||||
WithInformersQueueKeysFunc(helpers.KlusterletDeploymentQueueKeyFunc(controller.klusterletLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, klusterletInformer.Informer()).
|
||||
ToController("KlusterletCleanupController", recorder)
|
||||
ToController("KlusterletCleanupController")
|
||||
}
|
||||
|
||||
func (n *klusterletCleanupController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
klusterletName := controllerContext.QueueKey()
|
||||
klog.V(4).Infof("Reconciling Klusterlet %q", klusterletName)
|
||||
func (n *klusterletCleanupController) sync(ctx context.Context, controllerContext factory.SyncContext, klusterletName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("klusterlet", klusterletName)
|
||||
logger.V(4).Info("Reconciling Klusterlet")
|
||||
originalKlusterlet, err := n.klusterletLister.Get(klusterletName)
|
||||
if errors.IsNotFound(err) {
|
||||
// Klusterlet not found, could have been deleted, do nothing.
|
||||
@@ -148,7 +146,7 @@ func (n *klusterletCleanupController) sync(ctx context.Context, controllerContex
|
||||
managedClusterClients, err := n.managedClusterClientsBuilder.
|
||||
withMode(config.InstallMode).
|
||||
withKubeConfigSecret(config.AgentNamespace, config.ExternalManagedKubeConfigSecret).
|
||||
build(ctx)
|
||||
build(ctx, controllerContext)
|
||||
// stop when hosted kubeconfig is not found. the klustelet controller will monitor the secret and retrigger
|
||||
// reconciliation of cleanup controller when secret is created again.
|
||||
if errors.IsNotFound(err) {
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestSyncDelete(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), appliedManifestWorks, false,
|
||||
namespace, bootstrapKubeConfigSecret)
|
||||
|
||||
err := controller.cleanupController.sync(context.TODO(), syncContext)
|
||||
err := controller.cleanupController.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -93,10 +93,10 @@ func TestSyncDeleteHosted(t *testing.T) {
|
||||
newAppliedManifestWorks("testhost-2", []string{workv1.AppliedManifestWorkFinalizer}, false),
|
||||
}
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, klusterlet.Name)
|
||||
controller := newTestControllerHosted(t, klusterlet, syncContext.Recorder(), appliedManifestWorks,
|
||||
controller := newTestControllerHosted(t, klusterlet, appliedManifestWorks,
|
||||
bootstrapKubeConfigSecret, namespace /*externalManagedSecret*/)
|
||||
|
||||
err := controller.cleanupController.sync(context.TODO(), syncContext)
|
||||
err := controller.cleanupController.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -158,9 +158,9 @@ func TestSyncDeleteHostedDeleteAgentNamespace(t *testing.T) {
|
||||
now := metav1.Now()
|
||||
klusterlet.ObjectMeta.SetDeletionTimestamp(&now)
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
|
||||
controller := newTestControllerHosted(t, klusterlet, syncContext.Recorder(), nil).setDefaultManagedClusterClientsBuilder()
|
||||
controller := newTestControllerHosted(t, klusterlet, nil).setDefaultManagedClusterClientsBuilder()
|
||||
|
||||
err := controller.cleanupController.sync(context.TODO(), syncContext)
|
||||
err := controller.cleanupController.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -175,9 +175,9 @@ func TestSyncDeleteHostedDeleteWaitKubeconfig(t *testing.T) {
|
||||
now := metav1.Now()
|
||||
klusterlet.ObjectMeta.SetDeletionTimestamp(&now)
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
|
||||
controller := newTestControllerHosted(t, klusterlet, syncContext.Recorder(), nil).setDefaultManagedClusterClientsBuilder()
|
||||
controller := newTestControllerHosted(t, klusterlet, nil).setDefaultManagedClusterClientsBuilder()
|
||||
|
||||
err := controller.cleanupController.sync(context.TODO(), syncContext)
|
||||
err := controller.cleanupController.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -196,9 +196,9 @@ func TestSyncAddHostedFinalizerWhenKubeconfigReady(t *testing.T) {
|
||||
klusterletHostedFinalizer)
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
|
||||
c := newTestControllerHosted(t, klusterlet, syncContext.Recorder(), nil)
|
||||
c := newTestControllerHosted(t, klusterlet, nil)
|
||||
|
||||
err := c.cleanupController.sync(context.TODO(), syncContext)
|
||||
err := c.cleanupController.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -218,7 +218,7 @@ func TestSyncAddHostedFinalizerWhenKubeconfigReady(t *testing.T) {
|
||||
if err := c.operatorStore.Update(klusterlet); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = c.cleanupController.sync(context.TODO(), syncContext)
|
||||
err = c.cleanupController.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
@@ -28,10 +26,13 @@ import (
|
||||
workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
commonhelpers "open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
commonrecorder "open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
@@ -84,8 +85,7 @@ func NewKlusterletController(
|
||||
controlPlaneNodeLabelSelector string,
|
||||
deploymentReplicas int32,
|
||||
disableAddonNamespace bool,
|
||||
enableSyncLabels bool,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
enableSyncLabels bool) factory.Controller {
|
||||
controller := &klusterletController{
|
||||
kubeClient: kubeClient,
|
||||
patcher: patcher.NewPatcher[
|
||||
@@ -94,7 +94,7 @@ func NewKlusterletController(
|
||||
kubeVersion: kubeVersion,
|
||||
operatorNamespace: operatorNamespace,
|
||||
cache: resourceapply.NewResourceCache(),
|
||||
managedClusterClientsBuilder: newManagedClusterClientsBuilder(kubeClient, apiExtensionClient, appliedManifestWorkClient, recorder),
|
||||
managedClusterClientsBuilder: newManagedClusterClientsBuilder(kubeClient, apiExtensionClient, appliedManifestWorkClient),
|
||||
controlPlaneNodeLabelSelector: controlPlaneNodeLabelSelector,
|
||||
deploymentReplicas: deploymentReplicas,
|
||||
disableAddonNamespace: disableAddonNamespace,
|
||||
@@ -109,7 +109,7 @@ func NewKlusterletController(
|
||||
WithInformersQueueKeysFunc(helpers.KlusterletDeploymentQueueKeyFunc(
|
||||
controller.klusterletLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, klusterletInformer.Informer()).
|
||||
ToController("KlusterletController", recorder)
|
||||
ToController("KlusterletController")
|
||||
}
|
||||
|
||||
type AwsIrsa struct {
|
||||
@@ -235,9 +235,9 @@ func (config *klusterletConfig) populateBootstrap(klusterlet *operatorapiv1.Klus
|
||||
}
|
||||
}
|
||||
|
||||
func (n *klusterletController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
klusterletName := controllerContext.QueueKey()
|
||||
klog.V(4).Infof("Reconciling Klusterlet %q", klusterletName)
|
||||
func (n *klusterletController) sync(ctx context.Context, controllerContext factory.SyncContext, klusterletName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("klusterlet", klusterletName)
|
||||
logger.V(4).Info("Reconciling Klusterlet")
|
||||
originalKlusterlet, err := n.klusterletLister.Get(klusterletName)
|
||||
if errors.IsNotFound(err) {
|
||||
// Klusterlet not found, could have been deleted, do nothing.
|
||||
@@ -250,7 +250,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
|
||||
|
||||
resourceRequirements, err := helpers.ResourceRequirements(klusterlet)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to parse resource requirements for klusterlet %s: %v", klusterlet.Name, err)
|
||||
logger.Error(err, "Failed to parse resource requirements for klusterlet")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -295,7 +295,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
|
||||
managedClusterClients, err := n.managedClusterClientsBuilder.
|
||||
withMode(config.InstallMode).
|
||||
withKubeConfigSecret(config.AgentNamespace, config.ExternalManagedKubeConfigSecret).
|
||||
build(ctx)
|
||||
build(ctx, controllerContext)
|
||||
|
||||
// update klusterletReadyToApply condition at first in hosted mode
|
||||
// this conditions should be updated even when klusterlet is in deleting state.
|
||||
@@ -543,7 +543,8 @@ func ensureNamespace(
|
||||
kubeClient kubernetes.Interface,
|
||||
klusterlet *operatorapiv1.Klusterlet,
|
||||
namespace string, labels map[string]string, recorder events.Recorder) error {
|
||||
_, _, err := resourceapply.ApplyNamespace(ctx, kubeClient.CoreV1(), recorder, &corev1.Namespace{
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, recorder)
|
||||
_, _, err := resourceapply.ApplyNamespace(ctx, kubeClient.CoreV1(), recorderWrapper, &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@@ -40,6 +39,8 @@ import (
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
@@ -63,7 +64,6 @@ type testController struct {
|
||||
operatorClient *fakeoperatorclient.Clientset
|
||||
workClient *fakeworkclient.Clientset
|
||||
operatorStore cache.Store
|
||||
recorder events.Recorder
|
||||
|
||||
managedKubeClient *fakekube.Clientset
|
||||
managedApiExtensionClient *fakeapiextensions.Clientset
|
||||
@@ -196,7 +196,7 @@ func newTestController(t *testing.T, klusterlet *operatorapiv1.Klusterlet, recor
|
||||
operatorNamespace: "open-cluster-management",
|
||||
cache: resourceapply.NewResourceCache(),
|
||||
managedClusterClientsBuilder: newManagedClusterClientsBuilder(fakeKubeClient, fakeAPIExtensionClient,
|
||||
fakeWorkClient.WorkV1().AppliedManifestWorks(), recorder),
|
||||
fakeWorkClient.WorkV1().AppliedManifestWorks()),
|
||||
enableSyncLabels: enableSyncLabels,
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ func newTestController(t *testing.T, klusterlet *operatorapiv1.Klusterlet, recor
|
||||
kubeVersion: kubeVersion,
|
||||
operatorNamespace: "open-cluster-management",
|
||||
managedClusterClientsBuilder: newManagedClusterClientsBuilder(fakeKubeClient, fakeAPIExtensionClient,
|
||||
fakeWorkClient.WorkV1().AppliedManifestWorks(), recorder),
|
||||
fakeWorkClient.WorkV1().AppliedManifestWorks()),
|
||||
}
|
||||
|
||||
store := operatorInformers.Operator().V1().Klusterlets().Informer().GetStore()
|
||||
@@ -224,13 +224,11 @@ func newTestController(t *testing.T, klusterlet *operatorapiv1.Klusterlet, recor
|
||||
operatorClient: fakeOperatorClient,
|
||||
workClient: fakeWorkClient,
|
||||
operatorStore: store,
|
||||
recorder: recorder,
|
||||
}
|
||||
}
|
||||
|
||||
func newTestControllerHosted(
|
||||
t *testing.T, klusterlet *operatorapiv1.Klusterlet,
|
||||
recorder events.Recorder,
|
||||
appliedManifestWorks []runtime.Object,
|
||||
objects ...runtime.Object) *testController {
|
||||
fakeKubeClient := fakekube.NewSimpleClientset(objects...)
|
||||
@@ -327,7 +325,6 @@ func newTestControllerHosted(
|
||||
operatorClient: fakeOperatorClient,
|
||||
workClient: fakeWorkClient,
|
||||
operatorStore: store,
|
||||
recorder: recorder,
|
||||
|
||||
managedKubeClient: fakeManagedKubeClient,
|
||||
managedApiExtensionClient: fakeManagedAPIExtensionClient,
|
||||
@@ -340,13 +337,11 @@ func (c *testController) setDefaultManagedClusterClientsBuilder() *testControlle
|
||||
c.kubeClient,
|
||||
c.apiExtensionClient,
|
||||
c.workClient.WorkV1().AppliedManifestWorks(),
|
||||
c.recorder,
|
||||
)
|
||||
c.cleanupController.managedClusterClientsBuilder = newManagedClusterClientsBuilder(
|
||||
c.kubeClient,
|
||||
c.apiExtensionClient,
|
||||
c.workClient.WorkV1().AppliedManifestWorks(),
|
||||
c.recorder,
|
||||
)
|
||||
return c
|
||||
}
|
||||
@@ -619,7 +614,7 @@ func TestSyncDeploy(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, c.enableSyncLabels,
|
||||
bootStrapSecret, hubKubeConfigSecret, namespace)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -700,7 +695,7 @@ func TestSyncDeploySingleton(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil,
|
||||
c.enableSyncLabels, bootStrapSecret, hubKubeConfigSecret, namespace)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -771,10 +766,10 @@ func TestSyncDeployHosted(t *testing.T) {
|
||||
pullSecret := newSecret(helpers.ImagePullSecret, "open-cluster-management")
|
||||
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
|
||||
controller := newTestControllerHosted(t, klusterlet, syncContext.Recorder(), nil, bootStrapSecret,
|
||||
controller := newTestControllerHosted(t, klusterlet, nil, bootStrapSecret,
|
||||
hubKubeConfigSecret, namespace, pullSecret /*externalManagedSecret*/)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -867,9 +862,9 @@ func TestSyncDeployHostedCreateAgentNamespace(t *testing.T) {
|
||||
Message: "Failed to build managed cluster clients: secrets \"external-managed-kubeconfig\" not found",
|
||||
})
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
|
||||
controller := newTestControllerHosted(t, klusterlet, syncContext.Recorder(), nil).setDefaultManagedClusterClientsBuilder()
|
||||
controller := newTestControllerHosted(t, klusterlet, nil).setDefaultManagedClusterClientsBuilder()
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if !errors.IsNotFound(err) {
|
||||
t.Errorf("Expected not found error when sync, but got %v", err)
|
||||
}
|
||||
@@ -896,7 +891,7 @@ func TestRemoveOldNamespace(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
|
||||
bootStrapSecret, hubKubeConfigSecret, namespace, oldNamespace)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -921,7 +916,7 @@ func TestRemoveOldNamespace(t *testing.T) {
|
||||
if err := controller.operatorStore.Update(klusterlet); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = controller.controller.sync(context.TODO(), syncContext)
|
||||
err = controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -953,7 +948,7 @@ func TestSyncDisableAddonNamespace(t *testing.T) {
|
||||
bootStrapSecret, hubKubeConfigSecret, namespace)
|
||||
controller.controller.disableAddonNamespace = true
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1057,7 +1052,7 @@ func TestAWSIrsaAuthInSingletonModeWithInvalidClusterArns(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
|
||||
objects...)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
assert.Equal(t, err.Error(), "HubClusterArn arn:aws:bks:us-west-2:123456789012:cluster/hub-cluster1 is not well formed")
|
||||
}
|
||||
@@ -1088,7 +1083,7 @@ func TestAWSIrsaAuthInSingletonMode(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
|
||||
objects...)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1119,7 +1114,7 @@ func TestAWSIrsaAuthInNonSingletonMode(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
|
||||
objects...)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1142,7 +1137,7 @@ func TestReplica(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
|
||||
objects...)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1167,7 +1162,7 @@ func TestReplica(t *testing.T) {
|
||||
controller.kubeClient.ClearActions()
|
||||
controller.operatorClient.ClearActions()
|
||||
|
||||
err = controller.controller.sync(context.TODO(), syncContext)
|
||||
err = controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1189,7 +1184,7 @@ func TestReplica(t *testing.T) {
|
||||
controller.kubeClient.ClearActions()
|
||||
controller.operatorClient.ClearActions()
|
||||
|
||||
err = controller.controller.sync(context.TODO(), syncContext)
|
||||
err = controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1232,7 +1227,7 @@ func TestWorkConfig(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
|
||||
objects...)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1254,7 +1249,7 @@ func TestClusterNameChange(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
|
||||
bootStrapSecret, hubSecret, namespace)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1286,7 +1281,7 @@ func TestClusterNameChange(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = controller.controller.sync(context.TODO(), syncContext)
|
||||
err = controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1308,7 +1303,7 @@ func TestClusterNameChange(t *testing.T) {
|
||||
})
|
||||
controller.kubeClient.ClearActions()
|
||||
|
||||
err = controller.controller.sync(context.TODO(), syncContext)
|
||||
err = controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1324,7 +1319,7 @@ func TestClusterNameChange(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = controller.controller.sync(context.TODO(), syncContext)
|
||||
err = controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1343,7 +1338,7 @@ func TestSyncWithPullSecret(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
|
||||
bootStrapSecret, hubKubeConfigSecret, namespace, pullSecret)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1456,7 +1451,7 @@ func TestClusterClaimConfigInSingletonMode(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
|
||||
objects...)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1482,7 +1477,7 @@ func TestSyncEnableClusterProperty(t *testing.T) {
|
||||
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
|
||||
objects...)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
@@ -1602,7 +1597,7 @@ func (f *fakeManagedClusterBuilder) withKubeConfigSecret(_, _ string) managedClu
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *fakeManagedClusterBuilder) build(_ context.Context) (*managedClusterClients, error) {
|
||||
func (f *fakeManagedClusterBuilder) build(_ context.Context, _ factory.SyncContext) (*managedClusterClients, error) {
|
||||
t, err := time.Parse(time.RFC3339, hostedKubeconfigCreationTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1843,7 +1838,7 @@ func TestCleanWithMultipleKlusterletAgentNamespaces(t *testing.T) {
|
||||
controller := newTestController(t, tt.klusterlet, syncContext.Recorder(), nil, false, tt.existingNamespaces...)
|
||||
|
||||
// Call the clean function through the cleanup controller
|
||||
err := controller.cleanupController.sync(ctx, syncContext)
|
||||
err := controller.cleanupController.sync(ctx, syncContext, "klusterlet")
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error from cleanup sync, but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -8,13 +8,13 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -21,6 +20,7 @@ import (
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -18,6 +17,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
@@ -113,7 +113,7 @@ func (r *managementReconcile) clean(ctx context.Context, klusterlet *operatorapi
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return klusterlet, reconcileStop, err
|
||||
}
|
||||
r.recorder.Eventf("SecretDeleted", "secret %s is deleted", secret)
|
||||
r.recorder.Eventf(ctx, "SecretDeleted", "secret %s is deleted", secret)
|
||||
}
|
||||
|
||||
// remove static file on the management cluster
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -18,6 +17,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
@@ -248,7 +248,7 @@ func (r *runtimeReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return klusterlet, reconcileStop, err
|
||||
}
|
||||
r.recorder.Eventf("DeploymentDeleted", "deployment %s is deleted", deployment)
|
||||
r.recorder.Eventf(ctx, "DeploymentDeleted", "deployment %s is deleted", deployment)
|
||||
}
|
||||
|
||||
return klusterlet, reconcileContinue, nil
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -21,6 +19,7 @@ import (
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
commonhelpers "open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
@@ -49,7 +48,6 @@ func NewKlusterletSSARController(
|
||||
klusterletClient operatorv1client.KlusterletInterface,
|
||||
klusterletInformer operatorinformer.KlusterletInformer,
|
||||
secretInformers map[string]coreinformer.SecretInformer,
|
||||
recorder events.Recorder,
|
||||
) factory.Controller {
|
||||
controller := &ssarController{
|
||||
kubeClient: kubeClient,
|
||||
@@ -68,7 +66,7 @@ func NewKlusterletSSARController(
|
||||
secretInformers[helpers.BootstrapHubKubeConfig].Informer(),
|
||||
secretInformers[helpers.ExternalManagedKubeConfig].Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, klusterletInformer.Informer()).
|
||||
ToController("KlusterletSSARController", recorder)
|
||||
ToController("KlusterletSSARController")
|
||||
}
|
||||
|
||||
func (l *klusterletLocker) inSSARChecking(klusterletName string) bool {
|
||||
@@ -90,10 +88,9 @@ func (l *klusterletLocker) deleteSSARChecking(klusterletName string) {
|
||||
delete(l.klusterletInChecking, klusterletName)
|
||||
}
|
||||
|
||||
func (c *ssarController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
func (c *ssarController) sync(ctx context.Context, controllerContext factory.SyncContext, klusterletName string) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
klusterletName := controllerContext.QueueKey()
|
||||
if klusterletName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -285,7 +285,7 @@ func TestSync(t *testing.T) {
|
||||
response.allowToOperateManagedClusterStatus = c.allowToOperateManagedClusterStatus
|
||||
response.allowToOperateManifestWorks = c.allowToOperateManifestWorks
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, c.klusterlet.Name)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when update status: %v", err)
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -20,6 +18,7 @@ import (
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -38,8 +37,7 @@ func NewKlusterletStatusController(
|
||||
kubeClient kubernetes.Interface,
|
||||
klusterletClient operatorv1client.KlusterletInterface,
|
||||
klusterletInformer operatorinformer.KlusterletInformer,
|
||||
deploymentInformer appsinformer.DeploymentInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
deploymentInformer appsinformer.DeploymentInformer) factory.Controller {
|
||||
controller := &klusterletStatusController{
|
||||
kubeClient: kubeClient,
|
||||
patcher: patcher.NewPatcher[
|
||||
@@ -50,15 +48,15 @@ func NewKlusterletStatusController(
|
||||
return factory.New().WithSync(controller.sync).
|
||||
WithInformersQueueKeysFunc(helpers.KlusterletDeploymentQueueKeyFunc(controller.klusterletLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, klusterletInformer.Informer()).
|
||||
ToController("KlusterletStatusController", recorder)
|
||||
ToController("KlusterletStatusController")
|
||||
}
|
||||
|
||||
func (k *klusterletStatusController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
klusterletName := controllerContext.QueueKey()
|
||||
func (k *klusterletStatusController) sync(ctx context.Context, controllerContext factory.SyncContext, klusterletName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("klusterlet", klusterletName)
|
||||
if klusterletName == "" {
|
||||
return nil
|
||||
}
|
||||
klog.V(4).Infof("Reconciling Klusterlet %q", klusterletName)
|
||||
logger.V(4).Info("Reconciling Klusterlet")
|
||||
|
||||
klusterlet, err := k.klusterletLister.Get(klusterletName)
|
||||
switch {
|
||||
|
||||
@@ -194,7 +194,7 @@ func TestSync(t *testing.T) {
|
||||
controller := newTestController(t, c.klusterlet, c.object...)
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, c.klusterlet.Name)
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
err := controller.controller.sync(context.TODO(), syncContext, c.klusterlet.Name)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when update status: %v", err)
|
||||
}
|
||||
|
||||
@@ -111,8 +111,7 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext *
|
||||
o.ControlPlaneNodeLabelSelector,
|
||||
o.DeploymentReplicas,
|
||||
o.DisableAddonNamespace,
|
||||
o.EnableSyncLabels,
|
||||
controllerContext.EventRecorder)
|
||||
o.EnableSyncLabels)
|
||||
|
||||
klusterletCleanupController := klusterletcontroller.NewKlusterletCleanupController(
|
||||
kubeClient,
|
||||
@@ -126,15 +125,13 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext *
|
||||
helpers.GetOperatorNamespace(),
|
||||
o.ControlPlaneNodeLabelSelector,
|
||||
o.DeploymentReplicas,
|
||||
o.DisableAddonNamespace,
|
||||
controllerContext.EventRecorder)
|
||||
o.DisableAddonNamespace)
|
||||
|
||||
ssarController := ssarcontroller.NewKlusterletSSARController(
|
||||
kubeClient,
|
||||
operatorClient.OperatorV1().Klusterlets(),
|
||||
operatorInformer.Operator().V1().Klusterlets(),
|
||||
secretInformers,
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
statusController := statuscontroller.NewKlusterletStatusController(
|
||||
@@ -142,14 +139,12 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext *
|
||||
operatorClient.OperatorV1().Klusterlets(),
|
||||
operatorInformer.Operator().V1().Klusterlets(),
|
||||
deploymentInformer.Apps().V1().Deployments(),
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
addonController := addonsecretcontroller.NewAddonPullImageSecretController(
|
||||
kubeClient,
|
||||
helpers.GetOperatorNamespace(),
|
||||
kubeInformer.Core().V1().Namespaces(),
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
go operatorInformer.Start(ctx.Done())
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned"
|
||||
clusterscheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme"
|
||||
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
"open-cluster-management.io/ocm/pkg/placement/controllers/metrics"
|
||||
"open-cluster-management.io/ocm/pkg/placement/controllers/scheduling"
|
||||
"open-cluster-management.io/ocm/pkg/placement/debugger"
|
||||
@@ -44,7 +44,7 @@ func RunControllerManagerWithInformers(
|
||||
clusterClient clusterclient.Interface,
|
||||
clusterInformers clusterinformers.SharedInformerFactory,
|
||||
) error {
|
||||
recorder, err := recorder.NewEventRecorder(ctx, clusterscheme.Scheme, kubeClient.EventsV1(), "placement-controller")
|
||||
recorder, err := events.NewEventRecorder(ctx, clusterscheme.Scheme, kubeClient.EventsV1(), "placement-controller")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func RunControllerManagerWithInformers(
|
||||
clusterInformers.Cluster().V1beta1().PlacementDecisions(),
|
||||
clusterInformers.Cluster().V1alpha1().AddOnPlacementScores(),
|
||||
scheduler,
|
||||
controllerContext.EventRecorder, recorder, metrics,
|
||||
recorder, metrics,
|
||||
)
|
||||
|
||||
go clusterInformers.Start(ctx.Done())
|
||||
|
||||
@@ -111,7 +111,7 @@ func TestOnClusterChange(t *testing.T) {
|
||||
clusterInformerFactory.Cluster().V1beta2().ManagedClusterSetBindings(),
|
||||
)
|
||||
queuedKeys := sets.NewString()
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.RateLimitingInterface) {
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.TypedRateLimitingInterface[string]) {
|
||||
key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
queuedKeys.Insert(key)
|
||||
}
|
||||
@@ -275,7 +275,7 @@ func TestOnClusterUpdate(t *testing.T) {
|
||||
clusterInformerFactory.Cluster().V1beta2().ManagedClusterSetBindings(),
|
||||
)
|
||||
queuedKeys := sets.NewString()
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.RateLimitingInterface) {
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.TypedRateLimitingInterface[string]) {
|
||||
key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
queuedKeys.Insert(key)
|
||||
}
|
||||
@@ -379,7 +379,7 @@ func TestOnClusterDelete(t *testing.T) {
|
||||
clusterInformerFactory.Cluster().V1beta2().ManagedClusterSetBindings(),
|
||||
)
|
||||
queuedKeys := sets.NewString()
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.RateLimitingInterface) {
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.TypedRateLimitingInterface[string]) {
|
||||
key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
queuedKeys.Insert(key)
|
||||
}
|
||||
|
||||
@@ -32,8 +32,8 @@ const (
|
||||
|
||||
type enqueuer struct {
|
||||
logger klog.Logger
|
||||
queue workqueue.RateLimitingInterface
|
||||
enqueuePlacementFunc func(obj interface{}, queue workqueue.RateLimitingInterface)
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
enqueuePlacementFunc func(obj interface{}, queue workqueue.TypedRateLimitingInterface[string])
|
||||
|
||||
clusterLister clusterlisterv1.ManagedClusterLister
|
||||
clusterSetLister clusterlisterv1beta2.ManagedClusterSetLister
|
||||
@@ -43,7 +43,7 @@ type enqueuer struct {
|
||||
|
||||
func newEnqueuer(
|
||||
ctx context.Context,
|
||||
queue workqueue.RateLimitingInterface,
|
||||
queue workqueue.TypedRateLimitingInterface[string],
|
||||
clusterInformer clusterinformerv1.ManagedClusterInformer,
|
||||
clusterSetInformer clusterinformerv1beta2.ManagedClusterSetInformer,
|
||||
placementInformer clusterinformerv1beta1.PlacementInformer,
|
||||
@@ -74,7 +74,7 @@ func newEnqueuer(
|
||||
}
|
||||
}
|
||||
|
||||
func enqueuePlacement(obj interface{}, queue workqueue.RateLimitingInterface) {
|
||||
func enqueuePlacement(obj interface{}, queue workqueue.TypedRateLimitingInterface[string]) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
runtime.HandleError(err)
|
||||
|
||||
@@ -199,7 +199,7 @@ func TestEnqueuePlacementsByClusterSet(t *testing.T) {
|
||||
clusterInformerFactory.Cluster().V1beta2().ManagedClusterSetBindings(),
|
||||
)
|
||||
queuedKeys := sets.NewString()
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.RateLimitingInterface) {
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.TypedRateLimitingInterface[string]) {
|
||||
key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
queuedKeys.Insert(key)
|
||||
}
|
||||
@@ -308,7 +308,7 @@ func TestEnqueuePlacementsByClusterSetBinding(t *testing.T) {
|
||||
clusterInformerFactory.Cluster().V1beta2().ManagedClusterSetBindings(),
|
||||
)
|
||||
queuedKeys := sets.NewString()
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.RateLimitingInterface) {
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.TypedRateLimitingInterface[string]) {
|
||||
key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
queuedKeys.Insert(key)
|
||||
}
|
||||
@@ -398,7 +398,7 @@ func TestEnqueuePlacementsByScore(t *testing.T) {
|
||||
clusterInformerFactory.Cluster().V1beta2().ManagedClusterSetBindings(),
|
||||
)
|
||||
queuedKeys := sets.NewString()
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.RateLimitingInterface) {
|
||||
fakeEnqueuePlacement := func(obj interface{}, queue workqueue.TypedRateLimitingInterface[string]) {
|
||||
key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
queuedKeys.Insert(key)
|
||||
}
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
errorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -38,6 +36,7 @@ import (
|
||||
clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
clusterapiv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
|
||||
clustersdkv1beta2 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta2"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -84,10 +83,10 @@ func NewSchedulingController(
|
||||
placementDecisionInformer clusterinformerv1beta1.PlacementDecisionInformer,
|
||||
placementScoreInformer clusterinformerv1alpha1.AddOnPlacementScoreInformer,
|
||||
scheduler Scheduler,
|
||||
recorder events.Recorder, krecorder kevents.EventRecorder,
|
||||
krecorder kevents.EventRecorder,
|
||||
metricsRecorder *metrics.ScheduleMetrics,
|
||||
) factory.Controller {
|
||||
syncCtx := factory.NewSyncContext(schedulingControllerName, recorder)
|
||||
syncCtx := factory.NewSyncContext(schedulingControllerName)
|
||||
|
||||
enQueuer := newEnqueuer(ctx, syncCtx.Queue(), clusterInformer, clusterSetInformer, placementInformer, clusterSetBindingInformer)
|
||||
|
||||
@@ -168,22 +167,22 @@ func NewSchedulingController(
|
||||
WithInformersQueueKeysFunc(
|
||||
queue.QueueKeyByMetaNamespaceName,
|
||||
placementInformer.Informer()).
|
||||
WithFilteredEventsInformersQueueKeyFunc(func(obj runtime.Object) string {
|
||||
WithFilteredEventsInformersQueueKeysFunc(func(obj runtime.Object) []string {
|
||||
accessor, _ := meta.Accessor(obj)
|
||||
placementName := accessor.GetLabels()[clusterapiv1beta1.PlacementLabel]
|
||||
return fmt.Sprintf("%s/%s", accessor.GetNamespace(), placementName)
|
||||
return []string{fmt.Sprintf("%s/%s", accessor.GetNamespace(), placementName)}
|
||||
},
|
||||
queue.FileterByLabel(clusterapiv1beta1.PlacementLabel),
|
||||
placementDecisionInformer.Informer()).
|
||||
WithBareInformers(clusterInformer.Informer(), clusterSetInformer.Informer(), clusterSetBindingInformer.Informer(), placementScoreInformer.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController(schedulingControllerName, recorder)
|
||||
ToController(schedulingControllerName)
|
||||
}
|
||||
|
||||
func (c *schedulingController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
queueKey := syncCtx.QueueKey()
|
||||
logger.V(4).Info("Reconciling placement", "queueKey", queueKey)
|
||||
func (c *schedulingController) sync(ctx context.Context, syncCtx factory.SyncContext, queueKey string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("queueKey", queueKey)
|
||||
logger.V(4).Info("Reconciling placement")
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
placement, err := c.getPlacement(queueKey)
|
||||
if errors.IsNotFound(err) {
|
||||
|
||||
@@ -570,8 +570,9 @@ func TestSchedulingController_sync(t *testing.T) {
|
||||
metricsRecorder: metrics.NewScheduleMetrics(clock.RealClock{}),
|
||||
}
|
||||
|
||||
sysCtx := testingcommon.NewFakeSyncContext(t, c.placement.Namespace+"/"+c.placement.Name)
|
||||
syncErr := ctrl.sync(context.TODO(), sysCtx)
|
||||
key := c.placement.Namespace + "/" + c.placement.Name
|
||||
sysCtx := testingcommon.NewFakeSyncContext(t, key)
|
||||
syncErr := ctrl.sync(context.TODO(), sysCtx, key)
|
||||
if syncErr != nil {
|
||||
t.Errorf("unexpected err: %v", syncErr)
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -20,6 +18,7 @@ import (
|
||||
clusterv1informer "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1"
|
||||
clusterv1listers "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -38,7 +37,6 @@ type addOnFeatureDiscoveryController struct {
|
||||
patcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus]
|
||||
clusterLister clusterv1listers.ManagedClusterLister
|
||||
addOnLister addonlisterv1alpha1.ManagedClusterAddOnLister
|
||||
recorder events.Recorder
|
||||
}
|
||||
|
||||
// NewAddOnFeatureDiscoveryController returns an instance of addOnFeatureDiscoveryController
|
||||
@@ -46,7 +44,6 @@ func NewAddOnFeatureDiscoveryController(
|
||||
clusterClient clientset.Interface,
|
||||
clusterInformer clusterv1informer.ManagedClusterInformer,
|
||||
addOnInformers addoninformerv1alpha1.ManagedClusterAddOnInformer,
|
||||
recorder events.Recorder,
|
||||
) factory.Controller {
|
||||
c := &addOnFeatureDiscoveryController{
|
||||
patcher: patcher.NewPatcher[
|
||||
@@ -54,7 +51,6 @@ func NewAddOnFeatureDiscoveryController(
|
||||
clusterClient.ClusterV1().ManagedClusters()),
|
||||
clusterLister: clusterInformer.Lister(),
|
||||
addOnLister: addOnInformers.Lister(),
|
||||
recorder: recorder,
|
||||
}
|
||||
|
||||
return factory.New().
|
||||
@@ -65,12 +61,10 @@ func NewAddOnFeatureDiscoveryController(
|
||||
queue.QueueKeyByMetaNamespace,
|
||||
addOnInformers.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("AddOnFeatureDiscoveryController", recorder)
|
||||
ToController("AddOnFeatureDiscoveryController")
|
||||
}
|
||||
|
||||
func (c *addOnFeatureDiscoveryController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
queueKey := syncCtx.QueueKey()
|
||||
|
||||
func (c *addOnFeatureDiscoveryController) sync(ctx context.Context, syncCtx factory.SyncContext, queueKey string) error {
|
||||
switch {
|
||||
case queueKey == factory.DefaultQueueKey:
|
||||
// no need to resync
|
||||
|
||||
@@ -280,7 +280,7 @@ func TestDiscoveryController_Sync(t *testing.T) {
|
||||
addOnLister: addOnInformerFactory.Addon().V1alpha1().ManagedClusterAddOns().Lister(),
|
||||
}
|
||||
|
||||
err := controller.sync(context.Background(), testingcommon.NewFakeSyncContext(t, c.queueKey))
|
||||
err := controller.sync(context.Background(), testingcommon.NewFakeSyncContext(t, c.queueKey), c.queueKey)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err: %v", err)
|
||||
}
|
||||
|
||||
@@ -3,8 +3,6 @@ package addon
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -18,6 +16,7 @@ import (
|
||||
clusterinformerv1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1"
|
||||
clusterlisterv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -34,8 +33,7 @@ type managedClusterAddOnHealthCheckController struct {
|
||||
// NewManagedClusterAddOnHealthCheckController returns an instance of managedClusterAddOnHealthCheckController
|
||||
func NewManagedClusterAddOnHealthCheckController(addOnClient addonclient.Interface,
|
||||
addOnInformer addoninformerv1alpha1.ManagedClusterAddOnInformer,
|
||||
clusterInformer clusterinformerv1.ManagedClusterInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
clusterInformer clusterinformerv1.ManagedClusterInformer) factory.Controller {
|
||||
c := &managedClusterAddOnHealthCheckController{
|
||||
addOnClient: addOnClient,
|
||||
addOnLister: addOnInformer.Lister(),
|
||||
@@ -45,11 +43,10 @@ func NewManagedClusterAddOnHealthCheckController(addOnClient addonclient.Interfa
|
||||
return factory.New().
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterInformer.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("ManagedClusterAddonHealthCheckController", recorder)
|
||||
ToController("ManagedClusterAddonHealthCheckController")
|
||||
}
|
||||
|
||||
func (c *managedClusterAddOnHealthCheckController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
managedClusterName := syncCtx.QueueKey()
|
||||
func (c *managedClusterAddOnHealthCheckController) sync(ctx context.Context, syncCtx factory.SyncContext, managedClusterName string) error {
|
||||
managedCluster, err := c.clusterLister.Get(managedClusterName)
|
||||
if errors.IsNotFound(err) {
|
||||
// Managed cluster is not found, could have been deleted, do nothing.
|
||||
@@ -102,7 +99,7 @@ func (c *managedClusterAddOnHealthCheckController) sync(ctx context.Context, syn
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if updated {
|
||||
syncCtx.Recorder().Eventf("ManagedClusterAddOnStatusUpdated", "update addon %q status to unknown on managed cluster %q",
|
||||
syncCtx.Recorder().Eventf(ctx, "ManagedClusterAddOnStatusUpdated", "update addon %q status to unknown on managed cluster %q",
|
||||
addOn.Name, managedClusterName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,7 +105,9 @@ func TestSync(t *testing.T) {
|
||||
clusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
|
||||
}
|
||||
|
||||
syncErr := ctrl.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, testinghelpers.TestManagedClusterName))
|
||||
syncErr := ctrl.sync(context.TODO(),
|
||||
testingcommon.NewFakeSyncContext(t, testinghelpers.TestManagedClusterName),
|
||||
testinghelpers.TestManagedClusterName)
|
||||
if syncErr != nil {
|
||||
t.Errorf("unexpected err: %v", syncErr)
|
||||
}
|
||||
|
||||
@@ -3,8 +3,6 @@ package clusterprofile
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -19,6 +17,7 @@ import (
|
||||
listerv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
v1beta2 "open-cluster-management.io/api/cluster/v1beta2"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -35,15 +34,13 @@ type clusterProfileController struct {
|
||||
clusterProfileClient cpclientset.Interface
|
||||
clusterProfileLister cplisterv1alpha1.ClusterProfileLister
|
||||
patcher patcher.Patcher[*cpv1alpha1.ClusterProfile, cpv1alpha1.ClusterProfileSpec, cpv1alpha1.ClusterProfileStatus]
|
||||
eventRecorder events.Recorder
|
||||
}
|
||||
|
||||
// NewClusterProfileController creates a new managed cluster controller
|
||||
func NewClusterProfileController(
|
||||
clusterInformer informerv1.ManagedClusterInformer,
|
||||
clusterProfileClient cpclientset.Interface,
|
||||
clusterProfileInformer cpinformerv1alpha1.ClusterProfileInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
clusterProfileInformer cpinformerv1alpha1.ClusterProfileInformer) factory.Controller {
|
||||
c := &clusterProfileController{
|
||||
clusterLister: clusterInformer.Lister(),
|
||||
clusterProfileClient: clusterProfileClient,
|
||||
@@ -51,19 +48,17 @@ func NewClusterProfileController(
|
||||
patcher: patcher.NewPatcher[
|
||||
*cpv1alpha1.ClusterProfile, cpv1alpha1.ClusterProfileSpec, cpv1alpha1.ClusterProfileStatus](
|
||||
clusterProfileClient.ApisV1alpha1().ClusterProfiles(ClusterProfileNamespace)),
|
||||
eventRecorder: recorder.WithComponentSuffix("cluster-profile-controller"),
|
||||
}
|
||||
|
||||
return factory.New().
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterInformer.Informer(), clusterProfileInformer.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("ClusterProfileController", recorder)
|
||||
ToController("ClusterProfileController")
|
||||
}
|
||||
|
||||
func (c *clusterProfileController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
managedClusterName := syncCtx.QueueKey()
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("Reconciling Cluster", "ClusterName", managedClusterName)
|
||||
func (c *clusterProfileController) sync(ctx context.Context, syncCtx factory.SyncContext, managedClusterName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("managedClusterName", managedClusterName)
|
||||
logger.V(4).Info("Reconciling Cluster")
|
||||
|
||||
managedCluster, err := c.clusterLister.Get(managedClusterName)
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -169,7 +164,7 @@ func (c *clusterProfileController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
return err
|
||||
}
|
||||
if updated {
|
||||
c.eventRecorder.Eventf("ClusterProfileSynced", "cluster profile %s is synced from open cluster management", managedClusterName)
|
||||
syncCtx.Recorder().Eventf(ctx, "ClusterProfileSynced", "cluster profile %s is synced from open cluster management", managedClusterName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
@@ -250,9 +249,8 @@ func TestSyncClusterProfile(t *testing.T) {
|
||||
patcher.NewPatcher[
|
||||
*cpv1alpha1.ClusterProfile, cpv1alpha1.ClusterProfileSpec, cpv1alpha1.ClusterProfileStatus](
|
||||
clusterProfileClient.ApisV1alpha1().ClusterProfiles(ClusterProfileNamespace)),
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
}
|
||||
syncErr := ctrl.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, testinghelpers.TestManagedClusterName))
|
||||
syncErr := ctrl.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, testinghelpers.TestManagedClusterName), testinghelpers.TestManagedClusterName)
|
||||
if syncErr != nil {
|
||||
t.Errorf("unexpected err: %v", syncErr)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -14,9 +12,11 @@ import (
|
||||
|
||||
clusterv1informer "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1"
|
||||
clusterv1listers "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/apply"
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
commonrecorder "open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
"open-cluster-management.io/ocm/pkg/registration/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/registration/hub/manifests"
|
||||
)
|
||||
@@ -32,7 +32,6 @@ type clusterroleController struct {
|
||||
clusterLister clusterv1listers.ManagedClusterLister
|
||||
applier *apply.PermissionApplier
|
||||
cache resourceapply.ResourceCache
|
||||
eventRecorder events.Recorder
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
@@ -41,7 +40,6 @@ func NewManagedClusterClusterroleController(
|
||||
kubeClient kubernetes.Interface,
|
||||
clusterInformer clusterv1informer.ManagedClusterInformer,
|
||||
clusterRoleInformer rbacv1informers.ClusterRoleInformer,
|
||||
recorder events.Recorder,
|
||||
labels map[string]string) factory.Controller {
|
||||
|
||||
// Creating a deep copy of the labels to avoid controllers from reading the same map concurrently.
|
||||
@@ -60,8 +58,7 @@ func NewManagedClusterClusterroleController(
|
||||
clusterRoleInformer.Lister(),
|
||||
nil,
|
||||
),
|
||||
eventRecorder: recorder.WithComponentSuffix("managed-cluster-clusterrole-controller"),
|
||||
labels: deepCopyLabels,
|
||||
labels: deepCopyLabels,
|
||||
}
|
||||
return factory.New().
|
||||
WithFilteredEventsInformers(
|
||||
@@ -69,10 +66,10 @@ func NewManagedClusterClusterroleController(
|
||||
clusterRoleInformer.Informer()).
|
||||
WithInformers(clusterInformer.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("ManagedClusterClusterRoleController", recorder)
|
||||
ToController("ManagedClusterClusterRoleController")
|
||||
}
|
||||
|
||||
func (c *clusterroleController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
func (c *clusterroleController) sync(ctx context.Context, syncCtx factory.SyncContext, _ string) error {
|
||||
managedClusters, err := c.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -80,12 +77,13 @@ func (c *clusterroleController) sync(ctx context.Context, syncCtx factory.SyncCo
|
||||
var errs []error
|
||||
assetFn := helpers.ManagedClusterAssetFnWithAccepted(manifests.RBACManifests, "", false, c.labels)
|
||||
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, syncCtx.Recorder())
|
||||
// Clean up managedcluser cluserroles if there are no managed clusters
|
||||
if len(managedClusters) == 0 {
|
||||
results := resourceapply.DeleteAll(
|
||||
ctx,
|
||||
resourceapply.NewKubeClientHolder(c.kubeClient),
|
||||
c.eventRecorder,
|
||||
recorderWrapper,
|
||||
assetFn,
|
||||
manifests.CommonClusterRoleFiles...,
|
||||
)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -118,11 +117,12 @@ func TestSyncManagedClusterClusterRole(t *testing.T) {
|
||||
),
|
||||
clusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
|
||||
cache: resourceapply.NewResourceCache(),
|
||||
eventRecorder: eventstesting.NewTestingEventRecorder(t),
|
||||
labels: c.labels,
|
||||
}
|
||||
|
||||
syncErr := ctrl.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, "testmangedclsuterclusterrole"))
|
||||
syncErr := ctrl.sync(context.TODO(),
|
||||
testingcommon.NewFakeSyncContext(t, "testmangedclsuterclusterrole"),
|
||||
"testmangedclsuterclusterrole")
|
||||
if syncErr != nil {
|
||||
t.Errorf("unexpected err: %v", syncErr)
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -18,6 +16,7 @@ import (
|
||||
informerv1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1"
|
||||
clusterv1listers "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
commonhelper "open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
@@ -35,7 +34,6 @@ type GCController struct {
|
||||
clusterLister clusterv1listers.ManagedClusterLister
|
||||
clusterPatcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus]
|
||||
gcResourcesController *gcResourcesController
|
||||
eventRecorder events.Recorder
|
||||
}
|
||||
|
||||
// NewGCController ensures the related resources are cleaned up after cluster is deleted
|
||||
@@ -43,7 +41,6 @@ func NewGCController(
|
||||
clusterInformer informerv1.ManagedClusterInformer,
|
||||
clusterClient clientset.Interface,
|
||||
metadataClient metadata.Interface,
|
||||
eventRecorder events.Recorder,
|
||||
gcResourceList []string,
|
||||
) factory.Controller {
|
||||
clusterPatcher := patcher.NewPatcher[
|
||||
@@ -53,7 +50,6 @@ func NewGCController(
|
||||
controller := &GCController{
|
||||
clusterLister: clusterInformer.Lister(),
|
||||
clusterPatcher: clusterPatcher,
|
||||
eventRecorder: eventRecorder.WithComponentSuffix("gc-resources"),
|
||||
}
|
||||
if len(gcResourceList) != 0 {
|
||||
gcResources := []schema.GroupVersionResource{}
|
||||
@@ -71,14 +67,13 @@ func NewGCController(
|
||||
|
||||
return factory.New().
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterInformer.Informer()).
|
||||
WithSync(controller.sync).ToController("GCController", eventRecorder)
|
||||
WithSync(controller.sync).ToController("GCController")
|
||||
}
|
||||
|
||||
// gc controller is watching cluster and to do these jobs:
|
||||
// 1. add a cleanup finalizer to managedCluster if the cluster is not deleting.
|
||||
// 2. clean up the resources in the cluster ns after the cluster is deleted.
|
||||
func (r *GCController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
clusterName := controllerContext.QueueKey()
|
||||
func (r *GCController) sync(ctx context.Context, syncCtx factory.SyncContext, clusterName string) error {
|
||||
if clusterName == "" || clusterName == factory.DefaultQueueKey {
|
||||
return nil
|
||||
}
|
||||
@@ -99,14 +94,14 @@ func (r *GCController) sync(ctx context.Context, controllerContext factory.SyncC
|
||||
gcErr := r.gcResourcesController.reconcile(ctx, copyCluster, clusterName)
|
||||
if cluster == nil {
|
||||
if errors.Is(gcErr, requeueError) {
|
||||
controllerContext.Queue().AddAfter(clusterName, requeueError.RequeueTime)
|
||||
syncCtx.Queue().AddAfter(clusterName, requeueError.RequeueTime)
|
||||
return nil
|
||||
}
|
||||
return gcErr
|
||||
}
|
||||
|
||||
if gcErr != nil && !errors.Is(gcErr, requeueError) {
|
||||
r.eventRecorder.Eventf("ResourceCleanupFail",
|
||||
syncCtx.Recorder().Eventf(ctx, "ResourceCleanupFail",
|
||||
"failed to cleanup resources in cluster %s:%v", cluster.Name, gcErr)
|
||||
|
||||
meta.SetStatusCondition(©Cluster.Status.Conditions, metav1.Condition{
|
||||
@@ -122,14 +117,14 @@ func (r *GCController) sync(ctx context.Context, controllerContext factory.SyncC
|
||||
}
|
||||
|
||||
if errors.Is(gcErr, requeueError) {
|
||||
controllerContext.Queue().AddAfter(clusterName, requeueError.RequeueTime)
|
||||
syncCtx.Queue().AddAfter(clusterName, requeueError.RequeueTime)
|
||||
return nil
|
||||
}
|
||||
if gcErr != nil {
|
||||
return gcErr
|
||||
}
|
||||
|
||||
r.eventRecorder.Eventf("ResourceCleanupCompleted",
|
||||
syncCtx.Recorder().Eventf(ctx, "ResourceCleanupCompleted",
|
||||
"resources in cluster %s are cleaned up", cluster.Name)
|
||||
|
||||
return r.clusterPatcher.RemoveFinalizer(ctx, cluster, commonhelper.GcFinalizer)
|
||||
|
||||
@@ -6,21 +6,19 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/pkg/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
fakemetadataclient "k8s.io/client-go/metadata/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
clocktesting "k8s.io/utils/clock/testing"
|
||||
|
||||
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
fakeclusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
|
||||
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
workv1 "open-cluster-management.io/api/work/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
commonhelpers "open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
@@ -146,7 +144,6 @@ func TestGController(t *testing.T) {
|
||||
clusterInformerFactory.Cluster().V1().ManagedClusters(),
|
||||
clusterClient,
|
||||
metadataClient,
|
||||
events.NewInMemoryRecorder("", clocktesting.NewFakePassiveClock(time.Now())),
|
||||
[]string{"addon.open-cluster-management.io/v1alpha1/managedclusteraddons",
|
||||
"work.open-cluster-management.io/v1/manifestworks"},
|
||||
)
|
||||
@@ -159,11 +156,10 @@ func TestGController(t *testing.T) {
|
||||
clusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
|
||||
clusterPatcher: clusterPatcher,
|
||||
gcResourcesController: newGCResourcesController(metadataClient, []schema.GroupVersionResource{addonGvr, workGvr}),
|
||||
eventRecorder: events.NewInMemoryRecorder("", clocktesting.NewFakePassiveClock(time.Now())),
|
||||
}
|
||||
|
||||
controllerContext := testingcommon.NewFakeSyncContext(t, c.key)
|
||||
err := ctrl.sync(context.TODO(), controllerContext)
|
||||
err := ctrl.sync(context.TODO(), controllerContext, c.key)
|
||||
if err != nil && !errors.Is(err, requeueError) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/openshift/api"
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourcehelper"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
|
||||
@@ -30,10 +28,13 @@ import (
|
||||
operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned"
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
operatorv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
commonrecorder "open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers/chart"
|
||||
cloudproviders "open-cluster-management.io/ocm/pkg/registration/hub/importer/providers"
|
||||
)
|
||||
@@ -81,10 +82,9 @@ func NewImporter(
|
||||
renders []KlusterletConfigRenderer,
|
||||
clusterClient clusterclientset.Interface,
|
||||
clusterInformer clusterinformerv1.ManagedClusterInformer,
|
||||
providers []cloudproviders.Interface,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
providers []cloudproviders.Interface) factory.Controller {
|
||||
controllerName := "managed-cluster-importer"
|
||||
syncCtx := factory.NewSyncContext(controllerName, recorder)
|
||||
syncCtx := factory.NewSyncContext(controllerName)
|
||||
|
||||
i := &Importer{
|
||||
providers: providers,
|
||||
@@ -101,13 +101,12 @@ func NewImporter(
|
||||
}
|
||||
|
||||
return factory.New().WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterInformer.Informer()).
|
||||
WithSyncContext(syncCtx).WithSync(i.sync).ToController(controllerName, recorder)
|
||||
WithSyncContext(syncCtx).WithSync(i.sync).ToController(controllerName)
|
||||
}
|
||||
|
||||
func (i *Importer) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
clusterName := syncCtx.QueueKey()
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("Reconciling key", "clusterName", clusterName)
|
||||
func (i *Importer) sync(ctx context.Context, syncCtx factory.SyncContext, clusterName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("managedClusterName", clusterName)
|
||||
logger.V(4).Info("Reconciling key")
|
||||
|
||||
cluster, err := i.clusterLister.Get(clusterName)
|
||||
switch {
|
||||
@@ -142,7 +141,7 @@ func (i *Importer) sync(ctx context.Context, syncCtx factory.SyncContext) error
|
||||
return updatedErr
|
||||
}
|
||||
if updated {
|
||||
syncCtx.Recorder().Eventf(
|
||||
syncCtx.Recorder().Eventf(ctx,
|
||||
"ManagedClusterImported", "managed cluster %s is imported", clusterName)
|
||||
}
|
||||
var rqe helpers.RequeueError
|
||||
@@ -160,6 +159,7 @@ func (i *Importer) reconcile(
|
||||
recorder events.Recorder,
|
||||
provider cloudproviders.Interface,
|
||||
cluster *v1.ManagedCluster) (*v1.ManagedCluster, error) {
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, recorder)
|
||||
clients, err := provider.Clients(ctx, cluster)
|
||||
if err != nil {
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
@@ -227,14 +227,14 @@ func (i *Importer) reconcile(
|
||||
switch t := requiredObj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
result.Result, result.Changed, result.Error = resourceapply.ApplyDeployment(
|
||||
ctx, clients.KubeClient.AppsV1(), recorder, t, 0)
|
||||
ctx, clients.KubeClient.AppsV1(), recorderWrapper, t, 0)
|
||||
results = append(results, result)
|
||||
case *operatorv1.Klusterlet:
|
||||
result.Result, result.Changed, result.Error = ApplyKlusterlet(
|
||||
ctx, clients.OperatorClient, recorder, t)
|
||||
results = append(results, result)
|
||||
default:
|
||||
tempResults := resourceapply.ApplyDirectly(ctx, clientHolder, recorder, cache,
|
||||
tempResults := resourceapply.ApplyDirectly(ctx, clientHolder, recorderWrapper, cache,
|
||||
func(name string) ([]byte, error) {
|
||||
return manifest, nil
|
||||
},
|
||||
@@ -273,11 +273,12 @@ func ApplyKlusterlet(
|
||||
client operatorclient.Interface,
|
||||
recorder events.Recorder,
|
||||
required *operatorv1.Klusterlet) (*operatorv1.Klusterlet, bool, error) {
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, recorder)
|
||||
existing, err := client.OperatorV1().Klusterlets().Get(ctx, required.Name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
requiredCopy := required.DeepCopy()
|
||||
actual, err := client.OperatorV1().Klusterlets().Create(ctx, requiredCopy, metav1.CreateOptions{})
|
||||
resourcehelper.ReportCreateEvent(recorder, required, err)
|
||||
resourcehelper.ReportCreateEvent(recorderWrapper, required, err)
|
||||
return actual, true, err
|
||||
}
|
||||
if err != nil {
|
||||
@@ -294,6 +295,6 @@ func ApplyKlusterlet(
|
||||
|
||||
existingCopy.Spec = required.Spec
|
||||
actual, err := client.OperatorV1().Klusterlets().Update(ctx, existingCopy, metav1.UpdateOptions{})
|
||||
resourcehelper.ReportUpdateEvent(recorder, required, err)
|
||||
resourcehelper.ReportUpdateEvent(recorderWrapper, required, err)
|
||||
return actual, true, err
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
fakeapiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
fakeoperatorclient "open-cluster-management.io/api/client/operator/clientset/versioned/fake"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
@@ -126,7 +126,7 @@ func TestSync(t *testing.T) {
|
||||
*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus](
|
||||
clusterClient.ClusterV1().ManagedClusters()),
|
||||
}
|
||||
err := importer.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, c.key))
|
||||
err := importer.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, c.key), c.key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/pkg/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -23,6 +22,7 @@ import (
|
||||
|
||||
clusterinformerv1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/registration/hub/importer/providers"
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -19,6 +17,7 @@ import (
|
||||
fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
|
||||
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
)
|
||||
@@ -71,15 +70,15 @@ func TestEnqueu(t *testing.T) {
|
||||
provider := &CAPIProvider{
|
||||
managedClusterIndexer: clusterInformer.Informer().GetIndexer(),
|
||||
}
|
||||
syncCtx := factory.NewSyncContext("test", eventstesting.NewTestingEventRecorder(t))
|
||||
syncCtx := factory.NewSyncContext("test")
|
||||
provider.enqueueManagedClusterByCAPI(&metav1.PartialObjectMetadata{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.capiName,
|
||||
Namespace: c.capiNamespace,
|
||||
},
|
||||
}, syncCtx)
|
||||
if i, _ := syncCtx.Queue().Get(); i.(string) != c.expectedKey {
|
||||
t.Errorf("expected key %s but got %s", c.expectedKey, syncCtx.QueueKey())
|
||||
if i, _ := syncCtx.Queue().Get(); i != c.expectedKey {
|
||||
t.Errorf("expected key %s but got %s", c.expectedKey, i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package providers
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -11,6 +10,7 @@ import (
|
||||
|
||||
operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
)
|
||||
|
||||
// Interface is the interface that a cluster provider should implement
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
coordv1 "k8s.io/api/coordination/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -20,6 +18,7 @@ import (
|
||||
clusterv1informer "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1"
|
||||
clusterv1listers "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -29,7 +28,6 @@ type clockSyncController struct {
|
||||
patcher patcher.Patcher[*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus]
|
||||
clusterLister clusterv1listers.ManagedClusterLister
|
||||
leaseLister coordlisters.LeaseLister
|
||||
eventRecorder events.Recorder
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -40,7 +38,6 @@ func NewClockSyncController(
|
||||
clusterClient clientset.Interface,
|
||||
clusterInformer clusterv1informer.ManagedClusterInformer,
|
||||
leaseInformer coordinformers.LeaseInformer,
|
||||
recorder events.Recorder,
|
||||
) factory.Controller {
|
||||
c := &clockSyncController{
|
||||
patcher: patcher.NewPatcher[
|
||||
@@ -48,19 +45,18 @@ func NewClockSyncController(
|
||||
clusterClient.ClusterV1().ManagedClusters()),
|
||||
clusterLister: clusterInformer.Lister(),
|
||||
leaseLister: leaseInformer.Lister(),
|
||||
eventRecorder: recorder.WithComponentSuffix("managed-cluster-clock-sync-controller"),
|
||||
}
|
||||
|
||||
syncCtx := factory.NewSyncContext(clockSyncControllerName, recorder)
|
||||
syncCtx := factory.NewSyncContext(clockSyncControllerName)
|
||||
leaseRenewTimeUpdateInformer := renewUpdateInfomer(syncCtx.Queue(), leaseInformer)
|
||||
|
||||
return factory.New().WithSyncContext(syncCtx).
|
||||
WithBareInformers(leaseRenewTimeUpdateInformer).
|
||||
WithSync(c.sync).
|
||||
ToController(clockSyncControllerName, recorder)
|
||||
ToController(clockSyncControllerName)
|
||||
}
|
||||
|
||||
func renewUpdateInfomer(q workqueue.RateLimitingInterface, leaseInformer coordinformers.LeaseInformer) factory.Informer {
|
||||
func renewUpdateInfomer(q workqueue.TypedRateLimitingInterface[string], leaseInformer coordinformers.LeaseInformer) factory.Informer {
|
||||
leaseRenewTimeUpdateInformer := leaseInformer.Informer()
|
||||
queueKeyByLabel := queue.QueueKeyByLabel(clusterv1.ClusterNameLabelKey)
|
||||
_, err := leaseRenewTimeUpdateInformer.AddEventHandler(&cache.FilteringResourceEventHandler{
|
||||
@@ -84,9 +80,7 @@ func renewUpdateInfomer(q workqueue.RateLimitingInterface, leaseInformer coordin
|
||||
return leaseRenewTimeUpdateInformer
|
||||
}
|
||||
|
||||
func (c *clockSyncController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
clusterName := syncCtx.QueueKey()
|
||||
|
||||
func (c *clockSyncController) sync(ctx context.Context, syncCtx factory.SyncContext, clusterName string) error {
|
||||
// the event caused by resync will be filtered because the cluster is not found
|
||||
cluster, err := c.clusterLister.Get(clusterName)
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -110,14 +104,14 @@ func (c *clockSyncController) sync(ctx context.Context, syncCtx factory.SyncCont
|
||||
leaseDuration = time.Duration(LeaseDurationSeconds*leaseDurationTimes) * time.Second
|
||||
}
|
||||
|
||||
if err := c.updateClusterStatusClockSynced(ctx, cluster,
|
||||
if err := c.updateClusterStatusClockSynced(ctx, syncCtx, cluster,
|
||||
now.Sub(observedLease.Spec.RenewTime.Time) < leaseDuration && observedLease.Spec.RenewTime.Time.Sub(now) < leaseDuration); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *clockSyncController) updateClusterStatusClockSynced(ctx context.Context, cluster *clusterv1.ManagedCluster, synced bool) error {
|
||||
func (c *clockSyncController) updateClusterStatusClockSynced(ctx context.Context, syncCtx factory.SyncContext, cluster *clusterv1.ManagedCluster, synced bool) error {
|
||||
var desiredStatus metav1.ConditionStatus
|
||||
var condition metav1.Condition
|
||||
if synced {
|
||||
@@ -148,7 +142,7 @@ func (c *clockSyncController) updateClusterStatusClockSynced(ctx context.Context
|
||||
|
||||
updated, err := c.patcher.PatchStatus(ctx, newCluster, newCluster.Status, cluster.Status)
|
||||
if updated {
|
||||
c.eventRecorder.Eventf("ManagedClusterClockSyncedConditionUpdated",
|
||||
syncCtx.Recorder().Eventf(ctx, "ManagedClusterClockSyncedConditionUpdated",
|
||||
"update managed cluster %q clock synced condition to %v.", cluster.Name, desiredStatus)
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -112,9 +112,8 @@ func TestClockSyncController(t *testing.T) {
|
||||
clusterClient.ClusterV1().ManagedClusters()),
|
||||
clusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
|
||||
leaseLister: leaseInformerFactory.Coordination().V1().Leases().Lister(),
|
||||
eventRecorder: syncCtx.Recorder(),
|
||||
}
|
||||
syncErr := controller.sync(context.TODO(), syncCtx)
|
||||
syncErr := controller.sync(context.TODO(), syncCtx, testinghelpers.TestManagedClusterName)
|
||||
if syncErr != nil {
|
||||
t.Errorf("unexpected err: %v", syncErr)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
coordv1 "k8s.io/api/coordination/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -21,6 +19,7 @@ import (
|
||||
clusterv1informer "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1"
|
||||
clusterv1listers "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -49,7 +48,6 @@ func NewClusterLeaseController(
|
||||
clusterClient clientset.Interface,
|
||||
clusterInformer clusterv1informer.ManagedClusterInformer,
|
||||
leaseInformer coordinformers.LeaseInformer,
|
||||
recorder events.Recorder,
|
||||
mcEventRecorder kevents.EventRecorder) factory.Controller {
|
||||
c := &leaseController{
|
||||
kubeClient: kubeClient,
|
||||
@@ -68,14 +66,12 @@ func NewClusterLeaseController(
|
||||
).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterInformer.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("ManagedClusterLeaseController", recorder)
|
||||
ToController("ManagedClusterLeaseController")
|
||||
}
|
||||
|
||||
// sync checks the lease of each cluster on hub, which is accepted or previously accepted, to determine whether
|
||||
// the managed cluster is available.
|
||||
func (c *leaseController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
clusterName := syncCtx.QueueKey()
|
||||
|
||||
func (c *leaseController) sync(ctx context.Context, syncCtx factory.SyncContext, clusterName string) error {
|
||||
cluster, err := c.clusterLister.Get(clusterName)
|
||||
if errors.IsNotFound(err) {
|
||||
// the cluster is not found, do nothing
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
@@ -20,9 +18,9 @@ import (
|
||||
clusterscheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme"
|
||||
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
testinghelpers "open-cluster-management.io/ocm/pkg/registration/helpers/testing"
|
||||
)
|
||||
@@ -174,7 +172,7 @@ func TestSync(t *testing.T) {
|
||||
|
||||
ctx := context.TODO()
|
||||
syncCtx := testingcommon.NewFakeSyncContext(t, testinghelpers.TestManagedClusterName)
|
||||
mcEventRecorder, err := recorder.NewEventRecorder(ctx, clusterscheme.Scheme, hubClient.EventsV1(), "test")
|
||||
mcEventRecorder, err := events.NewEventRecorder(ctx, clusterscheme.Scheme, hubClient.EventsV1(), "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -187,7 +185,7 @@ func TestSync(t *testing.T) {
|
||||
leaseLister: leaseInformerFactory.Coordination().V1().Leases().Lister(),
|
||||
mcEventRecorder: mcEventRecorder,
|
||||
}
|
||||
syncErr := ctrl.sync(context.TODO(), syncCtx)
|
||||
syncErr := ctrl.sync(context.TODO(), syncCtx, testinghelpers.TestManagedClusterName)
|
||||
if syncErr != nil {
|
||||
t.Errorf("unexpected err: %v", syncErr)
|
||||
}
|
||||
@@ -208,15 +206,15 @@ func newDeletingManagedCluster() *clusterv1.ManagedCluster {
|
||||
|
||||
// spyQueue wraps a real queue and captures AddAfter calls
|
||||
type spyQueue struct {
|
||||
workqueue.RateLimitingInterface
|
||||
workqueue.TypedRateLimitingInterface[string]
|
||||
addAfterDelay time.Duration
|
||||
addAfterKey interface{}
|
||||
}
|
||||
|
||||
func (s *spyQueue) AddAfter(item interface{}, duration time.Duration) {
|
||||
func (s *spyQueue) AddAfter(item string, duration time.Duration) {
|
||||
s.addAfterDelay = duration
|
||||
s.addAfterKey = item
|
||||
s.RateLimitingInterface.AddAfter(item, duration)
|
||||
s.TypedRateLimitingInterface.AddAfter(item, duration)
|
||||
}
|
||||
|
||||
// testSyncContext is a custom sync context for testing requeue timing
|
||||
@@ -226,9 +224,9 @@ type testSyncContext struct {
|
||||
queue *spyQueue
|
||||
}
|
||||
|
||||
func (t *testSyncContext) Queue() workqueue.RateLimitingInterface { return t.queue }
|
||||
func (t *testSyncContext) QueueKey() string { return t.queueKey }
|
||||
func (t *testSyncContext) Recorder() events.Recorder { return t.recorder }
|
||||
func (t *testSyncContext) Queue() workqueue.TypedRateLimitingInterface[string] { return t.queue }
|
||||
func (t *testSyncContext) QueueKey() string { return t.queueKey }
|
||||
func (t *testSyncContext) Recorder() events.Recorder { return t.recorder }
|
||||
|
||||
func newManagedClusterWithLeaseDuration(seconds int32) *clusterv1.ManagedCluster {
|
||||
cluster := testinghelpers.NewAvailableManagedCluster()
|
||||
@@ -288,18 +286,18 @@ func TestRequeueTime(t *testing.T) {
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
mcEventRecorder, err := recorder.NewEventRecorder(ctx, clusterscheme.Scheme, hubClient.EventsV1(), "test")
|
||||
mcEventRecorder, err := events.NewEventRecorder(ctx, clusterscheme.Scheme, hubClient.EventsV1(), "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a custom sync context with spy queue to capture AddAfter calls
|
||||
spyQ := &spyQueue{
|
||||
RateLimitingInterface: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||
TypedRateLimitingInterface: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]()),
|
||||
}
|
||||
syncCtx := &testSyncContext{
|
||||
queueKey: testinghelpers.TestManagedClusterName,
|
||||
recorder: eventstesting.NewTestingEventRecorder(t),
|
||||
recorder: events.NewContextualLoggingEventRecorder(t.Name()),
|
||||
queue: spyQ,
|
||||
}
|
||||
|
||||
@@ -313,7 +311,7 @@ func TestRequeueTime(t *testing.T) {
|
||||
mcEventRecorder: mcEventRecorder,
|
||||
}
|
||||
|
||||
syncErr := ctrl.sync(context.TODO(), syncCtx)
|
||||
syncErr := ctrl.sync(context.TODO(), syncCtx, testinghelpers.TestManagedClusterName)
|
||||
if syncErr != nil {
|
||||
t.Errorf("unexpected err: %v", syncErr)
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -30,11 +28,13 @@ import (
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
workv1 "open-cluster-management.io/api/work/v1"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/apply"
|
||||
commonhelper "open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
commonrecorder "open-cluster-management.io/ocm/pkg/common/recorder"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/registration/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/registration/hub/manifests"
|
||||
@@ -62,7 +62,6 @@ type managedClusterController struct {
|
||||
applier *apply.PermissionApplier
|
||||
patcher patcher.Patcher[*v1.ManagedCluster, v1.ManagedClusterSpec, v1.ManagedClusterStatus]
|
||||
hubDriver register.HubDriver
|
||||
eventRecorder events.Recorder
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
@@ -77,7 +76,7 @@ func NewManagedClusterController(
|
||||
clusterRoleBindingInformer rbacv1informers.ClusterRoleBindingInformer,
|
||||
manifestWorkInformer workinformers.ManifestWorkInformer,
|
||||
hubDriver register.HubDriver,
|
||||
recorder events.Recorder, labels map[string]string) factory.Controller {
|
||||
labels map[string]string) factory.Controller {
|
||||
|
||||
// Creating a deep copy of the labels to avoid controllers from reading the same map concurrently.
|
||||
deepCopyLabels := make(map[string]string, len(labels))
|
||||
@@ -101,8 +100,7 @@ func NewManagedClusterController(
|
||||
patcher: patcher.NewPatcher[
|
||||
*v1.ManagedCluster, v1.ManagedClusterSpec, v1.ManagedClusterStatus](
|
||||
clusterClient.ClusterV1().ManagedClusters()),
|
||||
eventRecorder: recorder.WithComponentSuffix("managed-cluster-controller"),
|
||||
labels: deepCopyLabels,
|
||||
labels: deepCopyLabels,
|
||||
}
|
||||
return factory.New().
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterInformer.Informer()).
|
||||
@@ -114,16 +112,16 @@ func NewManagedClusterController(
|
||||
clusterRoleInformer.Informer(),
|
||||
clusterRoleBindingInformer.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("ManagedClusterController", recorder)
|
||||
ToController("ManagedClusterController")
|
||||
}
|
||||
|
||||
func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
managedClusterName := syncCtx.QueueKey()
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("Reconciling ManagedCluster", "managedClusterName", managedClusterName)
|
||||
func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.SyncContext, managedClusterName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("managedClusterName", managedClusterName)
|
||||
logger.V(4).Info("Reconciling ManagedCluster")
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
managedCluster, err := c.clusterLister.Get(managedClusterName)
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = c.removeClusterRbac(ctx, managedClusterName, true)
|
||||
err = c.removeClusterRbac(ctx, syncCtx, managedClusterName, true)
|
||||
if errors.Is(err, requeueError) {
|
||||
syncCtx.Queue().AddAfter(managedClusterName, requeueError.RequeueTime)
|
||||
return nil
|
||||
@@ -141,7 +139,7 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.removeClusterRbac(ctx, managedClusterName, true)
|
||||
err = c.removeClusterRbac(ctx, syncCtx, managedClusterName, true)
|
||||
if err != nil {
|
||||
if errors.Is(err, requeueError) {
|
||||
syncCtx.Queue().AddAfter(managedClusterName, requeueError.RequeueTime)
|
||||
@@ -170,7 +168,7 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
}
|
||||
|
||||
// Hub cluster-admin denies the current spoke cluster, we remove its related resources and update its condition.
|
||||
c.eventRecorder.Eventf("ManagedClusterDenied", "managed cluster %s is denied by hub cluster admin", managedClusterName)
|
||||
syncCtx.Recorder().Eventf(ctx, "ManagedClusterDenied", "managed cluster %s is denied by hub cluster admin", managedClusterName)
|
||||
|
||||
// Apply(Update) the cluster specific rbac resources for this spoke cluster with hubAcceptsClient=false.
|
||||
var errs []error
|
||||
@@ -203,7 +201,7 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
}
|
||||
|
||||
// Remove the cluster role binding files for registration-agent and work-agent.
|
||||
err = c.removeClusterRbac(ctx, managedClusterName, managedCluster.Spec.HubAcceptsClient)
|
||||
err = c.removeClusterRbac(ctx, syncCtx, managedClusterName, managedCluster.Spec.HubAcceptsClient)
|
||||
if errors.Is(err, requeueError) {
|
||||
syncCtx.Queue().AddAfter(managedClusterName, requeueError.RequeueTime)
|
||||
return nil
|
||||
@@ -231,8 +229,9 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
// 1. namespace for this spoke cluster.
|
||||
// 2. cluster specific rbac resources for this spoke cluster.(hubAcceptsClient=true)
|
||||
// 3. cluster specific rolebinding(registration-agent and work-agent) for this spoke cluster.
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, syncCtx.Recorder())
|
||||
var errs []error
|
||||
_, _, err = resourceapply.ApplyNamespace(ctx, c.kubeClient.CoreV1(), syncCtx.Recorder(), namespace)
|
||||
_, _, err = resourceapply.ApplyNamespace(ctx, c.kubeClient.CoreV1(), recorderWrapper, namespace)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
@@ -278,7 +277,7 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
errs = append(errs, updatedErr)
|
||||
}
|
||||
if updated {
|
||||
c.eventRecorder.Eventf("ManagedClusterAccepted", "managed cluster %s is accepted by hub cluster admin", managedClusterName)
|
||||
syncCtx.Recorder().Eventf(ctx, "ManagedClusterAccepted", "managed cluster %s is accepted by hub cluster admin", managedClusterName)
|
||||
}
|
||||
return operatorhelpers.NewMultiLineAggregate(errs)
|
||||
}
|
||||
@@ -304,7 +303,7 @@ func (c *managedClusterController) acceptCluster(ctx context.Context, managedClu
|
||||
// remove the cluster rbac resources firstly.
|
||||
// the work roleBinding with a finalizer remains because it is used by work agent to operator the works.
|
||||
// the finalizer on work roleBinding will be removed after there is no works in the ns.
|
||||
func (c *managedClusterController) removeClusterRbac(ctx context.Context, clusterName string, accepted bool) error {
|
||||
func (c *managedClusterController) removeClusterRbac(ctx context.Context, syncCtx factory.SyncContext, clusterName string, accepted bool) error {
|
||||
var errs []error
|
||||
assetFn := helpers.ManagedClusterAssetFnWithAccepted(manifests.RBACManifests, clusterName, accepted, c.labels)
|
||||
files := manifests.ClusterSpecificRoleBindings
|
||||
@@ -312,8 +311,9 @@ func (c *managedClusterController) removeClusterRbac(ctx context.Context, cluste
|
||||
files = append(files, manifests.ClusterSpecificRBACFiles...)
|
||||
}
|
||||
|
||||
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, syncCtx.Recorder())
|
||||
resourceResults := resourceapply.DeleteAll(ctx, resourceapply.NewKubeClientHolder(c.kubeClient),
|
||||
c.eventRecorder, assetFn, files...)
|
||||
recorderWrapper, assetFn, files...)
|
||||
for _, result := range resourceResults {
|
||||
if result.Error != nil {
|
||||
errs = append(errs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
@@ -332,9 +331,8 @@ func TestSyncManagedCluster(t *testing.T) {
|
||||
),
|
||||
patcher.NewPatcher[*v1.ManagedCluster, v1.ManagedClusterSpec, v1.ManagedClusterStatus](clusterClient.ClusterV1().ManagedClusters()),
|
||||
register.NewNoopHubDriver(),
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
c.labels}
|
||||
syncErr := ctrl.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, testinghelpers.TestManagedClusterName))
|
||||
syncErr := ctrl.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, testinghelpers.TestManagedClusterName), testinghelpers.TestManagedClusterName)
|
||||
if syncErr != nil && !errors.Is(syncErr, requeueError) {
|
||||
t.Errorf("unexpected err: %v", syncErr)
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -24,6 +22,7 @@ import (
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
|
||||
clustersdkv1beta2 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta2"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
)
|
||||
|
||||
@@ -34,18 +33,16 @@ type managedNamespaceController struct {
|
||||
clusterPatcher patcher.Patcher[*v1.ManagedCluster, v1.ManagedClusterSpec, v1.ManagedClusterStatus]
|
||||
clusterLister clusterlisterv1.ManagedClusterLister
|
||||
clusterSetLister clusterlisterv1beta2.ManagedClusterSetLister
|
||||
eventRecorder events.Recorder
|
||||
}
|
||||
|
||||
// NewManagedNamespaceController creates a new managed namespace controller
|
||||
func NewManagedNamespaceController(
|
||||
clusterClient clientset.Interface,
|
||||
clusterInformer clusterinformerv1.ManagedClusterInformer,
|
||||
clusterSetInformer clusterinformerv1beta2.ManagedClusterSetInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
clusterSetInformer clusterinformerv1beta2.ManagedClusterSetInformer) factory.Controller {
|
||||
|
||||
controllerName := "managed-namespace-controller"
|
||||
syncCtx := factory.NewSyncContext(controllerName, recorder)
|
||||
syncCtx := factory.NewSyncContext(controllerName)
|
||||
|
||||
c := &managedNamespaceController{
|
||||
clusterPatcher: patcher.NewPatcher[
|
||||
@@ -53,7 +50,6 @@ func NewManagedNamespaceController(
|
||||
clusterClient.ClusterV1().ManagedClusters()),
|
||||
clusterLister: clusterInformer.Lister(),
|
||||
clusterSetLister: clusterSetInformer.Lister(),
|
||||
eventRecorder: recorder.WithComponentSuffix("managed-namespace-controller"),
|
||||
}
|
||||
|
||||
// Add explicit event handlers for ManagedCluster
|
||||
@@ -92,17 +88,17 @@ func NewManagedNamespaceController(
|
||||
WithBareInformers(clusterInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(c.clusterSetToClusterQueueKeysFunc, clusterSetInformer.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("ManagedNamespaceController", recorder)
|
||||
ToController("ManagedNamespaceController")
|
||||
}
|
||||
|
||||
func (c *managedNamespaceController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
clusterName := syncCtx.QueueKey()
|
||||
func (c *managedNamespaceController) sync(ctx context.Context, syncCtx factory.SyncContext, clusterName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("clusterName", clusterName)
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
if len(clusterName) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.V(4).Info("Reconciling managed namespaces for ManagedCluster", "clusterName", clusterName)
|
||||
logger.V(4).Info("Reconciling managed namespaces for ManagedCluster")
|
||||
|
||||
cluster, err := c.clusterLister.Get(clusterName)
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -120,7 +116,7 @@ func (c *managedNamespaceController) sync(ctx context.Context, syncCtx factory.S
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.syncManagedNamespacesForCluster(ctx, cluster); err != nil {
|
||||
if err := c.syncManagedNamespacesForCluster(ctx, syncCtx, cluster); err != nil {
|
||||
return fmt.Errorf("failed to sync managed namespaces for ManagedCluster %q: %w", cluster.Name, err)
|
||||
}
|
||||
|
||||
@@ -129,7 +125,7 @@ func (c *managedNamespaceController) sync(ctx context.Context, syncCtx factory.S
|
||||
|
||||
// syncManagedNamespacesForCluster updates the managed namespace configuration for a specific cluster
|
||||
// based on all cluster sets it belongs to
|
||||
func (c *managedNamespaceController) syncManagedNamespacesForCluster(ctx context.Context, cluster *v1.ManagedCluster) error {
|
||||
func (c *managedNamespaceController) syncManagedNamespacesForCluster(ctx context.Context, syncCtx factory.SyncContext, cluster *v1.ManagedCluster) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
// Get all cluster sets this cluster belongs to
|
||||
@@ -178,7 +174,7 @@ func (c *managedNamespaceController) syncManagedNamespacesForCluster(ctx context
|
||||
// Only record event if there was an actual update
|
||||
if updated {
|
||||
logger.V(4).Info("Updated managed namespaces for cluster", "clusterName", cluster.Name, "namespacesCount", len(allManagedNamespaces))
|
||||
c.eventRecorder.Eventf("ManagedNamespacesUpdated", "Updated managed namespaces for cluster %q (total: %d)", cluster.Name, len(allManagedNamespaces))
|
||||
syncCtx.Recorder().Eventf(ctx, "ManagedNamespacesUpdated", "Updated managed namespaces for cluster %q (total: %d)", cluster.Name, len(allManagedNamespaces))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -153,10 +152,11 @@ func TestSyncManagedNamespacesForCluster(t *testing.T) {
|
||||
clusterClient.ClusterV1().ManagedClusters()),
|
||||
clusterLister: clusterInformer.Lister(),
|
||||
clusterSetLister: clusterSetInformer.Lister(),
|
||||
eventRecorder: eventstesting.NewTestingEventRecorder(t),
|
||||
}
|
||||
|
||||
err := controller.syncManagedNamespacesForCluster(context.TODO(), c.cluster)
|
||||
syncCtx := testingcommon.NewFakeSyncContext(t, c.name)
|
||||
|
||||
err := controller.syncManagedNamespacesForCluster(context.TODO(), syncCtx, c.cluster)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -245,7 +245,6 @@ func TestClusterSetToClusterQueueKeysFunc(t *testing.T) {
|
||||
clusterClient.ClusterV1().ManagedClusters()),
|
||||
clusterLister: clusterInformer.Lister(),
|
||||
clusterSetLister: clusterSetInformer.Lister(),
|
||||
eventRecorder: eventstesting.NewTestingEventRecorder(t),
|
||||
}
|
||||
|
||||
clusterNames := controller.clusterSetToClusterQueueKeysFunc(c.clusterSet)
|
||||
@@ -341,7 +340,6 @@ func TestGetClustersPreviouslyInSet(t *testing.T) {
|
||||
*clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus](
|
||||
clusterClient.ClusterV1().ManagedClusters()),
|
||||
clusterLister: clusterInformer.Lister(),
|
||||
eventRecorder: eventstesting.NewTestingEventRecorder(t),
|
||||
}
|
||||
|
||||
clusters, err := controller.getClustersPreviouslyInSet(c.clusterSetName)
|
||||
@@ -459,12 +457,11 @@ func TestSync(t *testing.T) {
|
||||
clusterClient.ClusterV1().ManagedClusters()),
|
||||
clusterLister: clusterInformer.Lister(),
|
||||
clusterSetLister: clusterSetInformer.Lister(),
|
||||
eventRecorder: eventstesting.NewTestingEventRecorder(t),
|
||||
}
|
||||
|
||||
// Create a fake sync context
|
||||
syncCtx := testingcommon.NewFakeSyncContext(t, c.queueKey)
|
||||
err := controller.sync(context.TODO(), syncCtx)
|
||||
err := controller.sync(context.TODO(), syncCtx, c.queueKey)
|
||||
|
||||
if c.expectError && err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -24,6 +22,7 @@ import (
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
|
||||
clustersdkv1beta2 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta2"
|
||||
"open-cluster-management.io/sdk-go/pkg/basecontroller/factory"
|
||||
"open-cluster-management.io/sdk-go/pkg/patcher"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
@@ -40,19 +39,17 @@ type managedClusterSetController struct {
|
||||
patcher patcher.Patcher[*clusterv1beta2.ManagedClusterSet, clusterv1beta2.ManagedClusterSetSpec, clusterv1beta2.ManagedClusterSetStatus]
|
||||
clusterLister clusterlisterv1.ManagedClusterLister
|
||||
clusterSetLister clusterlisterv1beta2.ManagedClusterSetLister
|
||||
eventRecorder events.Recorder
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewManagedClusterSetController creates a new managed cluster set controller
|
||||
func NewManagedClusterSetController(
|
||||
clusterClient clientset.Interface,
|
||||
clusterInformer clusterinformerv1.ManagedClusterInformer,
|
||||
clusterSetInformer clusterinformerv1beta2.ManagedClusterSetInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
clusterSetInformer clusterinformerv1beta2.ManagedClusterSetInformer) factory.Controller {
|
||||
|
||||
controllerName := "managed-clusterset-controller"
|
||||
syncCtx := factory.NewSyncContext(controllerName, recorder)
|
||||
syncCtx := factory.NewSyncContext(controllerName)
|
||||
|
||||
c := &managedClusterSetController{
|
||||
patcher: patcher.NewPatcher[
|
||||
@@ -60,7 +57,6 @@ func NewManagedClusterSetController(
|
||||
clusterClient.ClusterV1beta2().ManagedClusterSets()),
|
||||
clusterLister: clusterInformer.Lister(),
|
||||
clusterSetLister: clusterSetInformer.Lister(),
|
||||
eventRecorder: recorder.WithComponentSuffix("managed-cluster-set-controller"),
|
||||
queue: syncCtx.Queue(),
|
||||
}
|
||||
|
||||
@@ -120,16 +116,16 @@ func NewManagedClusterSetController(
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterSetInformer.Informer()).
|
||||
WithBareInformers(clusterInformer.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("ManagedClusterSetController", recorder)
|
||||
ToController("ManagedClusterSetController")
|
||||
}
|
||||
|
||||
func (c *managedClusterSetController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
|
||||
func (c *managedClusterSetController) sync(ctx context.Context, syncCtx factory.SyncContext, clusterSetName string) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
clusterSetName := syncCtx.QueueKey()
|
||||
if len(clusterSetName) == 0 {
|
||||
return nil
|
||||
}
|
||||
logger.V(4).Info("Reconciling ManagedClusterSet", "clusterSetName", clusterSetName)
|
||||
logger.WithValues("clusterSetName", clusterSetName)
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
clusterSet, err := c.clusterSetLister.Get(clusterSetName)
|
||||
if errors.IsNotFound(err) {
|
||||
// cluster set not found, could have been deleted, do nothing.
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -236,7 +235,6 @@ func TestSyncClusterSet(t *testing.T) {
|
||||
clusterClient.ClusterV1beta2().ManagedClusterSets()),
|
||||
clusterLister: informerFactory.Cluster().V1().ManagedClusters().Lister(),
|
||||
clusterSetLister: informerFactory.Cluster().V1beta2().ManagedClusterSets().Lister(),
|
||||
eventRecorder: eventstesting.NewTestingEventRecorder(t),
|
||||
}
|
||||
|
||||
syncErr := ctrl.syncClusterSet(context.Background(), c.existingClusterSet)
|
||||
@@ -386,7 +384,6 @@ func TestEnqueueUpdateClusterClusterSet(t *testing.T) {
|
||||
|
||||
ctrl := managedClusterSetController{
|
||||
clusterSetLister: informerFactory.Cluster().V1beta2().ManagedClusterSets().Lister(),
|
||||
eventRecorder: eventstesting.NewTestingEventRecorder(t),
|
||||
queue: syncCtx.Queue(),
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user