🌱 use SDK basecontroller for better logging. (#1269)

* Use basecontroller in sdk-go instead for better logging

Signed-off-by: Jian Qiu <jqiu@redhat.com>

* Rename to fakeSyncContext

Signed-off-by: Jian Qiu <jqiu@redhat.com>

---------

Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
Jian Qiu
2025-12-01 11:07:02 +08:00
committed by GitHub
parent 26edb9423a
commit 33310619d9
199 changed files with 794 additions and 3268 deletions

View File

@@ -1,287 +0,0 @@
package factory
import (
"context"
"errors"
"fmt"
"sync"
"time"
applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1"
"github.com/robfig/cron"
apierrors "k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
operatorv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/library-go/pkg/operator/management"
operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers"
)
// SyntheticRequeueError can be returned from sync() in case of forcing a sync() retry artificially.
// This can be also done by re-adding the key to queue, but this is cheaper and more convenient.
var SyntheticRequeueError = errors.New("synthetic requeue request")
var defaultCacheSyncTimeout = 10 * time.Minute
// baseController represents generic Kubernetes controller boiler-plate
type baseController struct {
name string
controllerInstanceName string
cachesToSync []cache.InformerSynced
sync func(ctx context.Context, controllerContext SyncContext) error
syncContext SyncContext
syncDegradedClient operatorv1helpers.OperatorClient
resyncEvery time.Duration
resyncSchedules []cron.Schedule
postStartHooks []PostStartHook
cacheSyncTimeout time.Duration
}
var _ Controller = &baseController{}
// Name returns a controller name.
func (c baseController) Name() string {
return c.name
}
// ControllerInstanceName specifies the controller instance.
// Useful when the same controller is used multiple times.
func (c baseController) ControllerInstanceName() string {
return c.controllerInstanceName
}
type scheduledJob struct {
queue workqueue.RateLimitingInterface
name string
}
func newScheduledJob(name string, queue workqueue.RateLimitingInterface) cron.Job {
return &scheduledJob{
queue: queue,
name: name,
}
}
func (s *scheduledJob) Run() {
klog.V(4).Infof("Triggering scheduled %q controller run", s.name)
s.queue.Add(DefaultQueueKey)
}
func waitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) error {
klog.Infof("Waiting for caches to sync for %s", controllerName)
if !cache.WaitForCacheSync(stopCh, cacheSyncs...) {
return fmt.Errorf("unable to sync caches for %s", controllerName)
}
klog.Infof("Caches are synced for %s ", controllerName)
return nil
}
func (c *baseController) Run(ctx context.Context, workers int) {
// HandleCrash recovers panics
defer utilruntime.HandleCrash(c.degradedPanicHandler)
// give caches 10 minutes to sync
cacheSyncCtx, cacheSyncCancel := context.WithTimeout(ctx, c.cacheSyncTimeout)
defer cacheSyncCancel()
err := waitForNamedCacheSync(c.name, cacheSyncCtx.Done(), c.cachesToSync...)
if err != nil {
select {
case <-ctx.Done():
// Exit gracefully because the controller was requested to stop.
return
default:
// If caches did not sync after 10 minutes, it has taken oddly long and
// we should provide feedback. Since the control loops will never start,
// it is safer to exit with a good message than to continue with a dead loop.
// TODO: Consider making this behavior configurable.
klog.Exit(err)
}
}
var workerWg sync.WaitGroup
defer func() {
defer klog.Infof("All %s workers have been terminated", c.name)
workerWg.Wait()
}()
// queueContext is used to track and initiate queue shutdown
queueContext, queueContextCancel := context.WithCancel(context.TODO())
for i := 1; i <= workers; i++ {
klog.Infof("Starting #%d worker of %s controller ...", i, c.name)
workerWg.Add(1)
go func() {
defer func() {
klog.Infof("Shutting down worker of %s controller ...", c.name)
workerWg.Done()
}()
c.runWorker(queueContext)
}()
}
// if scheduled run is requested, run the cron scheduler
if c.resyncSchedules != nil {
scheduler := cron.New()
for _, s := range c.resyncSchedules {
scheduler.Schedule(s, newScheduledJob(c.name, c.syncContext.Queue()))
}
scheduler.Start()
defer scheduler.Stop()
}
// runPeriodicalResync is independent from queue
if c.resyncEvery > 0 {
workerWg.Add(1)
if c.resyncEvery < 60*time.Second {
// Warn about too fast resyncs as they might drain the operators QPS.
// This event is cheap as it is only emitted on operator startup.
c.syncContext.Recorder().Warningf("FastControllerResync", "Controller %q resync interval is set to %s which might lead to client request throttling", c.name, c.resyncEvery)
}
go func() {
defer workerWg.Done()
wait.UntilWithContext(ctx, func(ctx context.Context) { c.syncContext.Queue().Add(DefaultQueueKey) }, c.resyncEvery)
}()
}
// run post-start hooks (custom triggers, etc.)
if len(c.postStartHooks) > 0 {
var hookWg sync.WaitGroup
defer func() {
hookWg.Wait() // wait for the post-start hooks
klog.Infof("All %s post start hooks have been terminated", c.name)
}()
for i := range c.postStartHooks {
hookWg.Add(1)
go func(index int) {
defer hookWg.Done()
if err := c.postStartHooks[index](ctx, c.syncContext); err != nil {
klog.Warningf("%s controller post start hook error: %v", c.name, err)
}
}(i)
}
}
// Handle controller shutdown
<-ctx.Done() // wait for controller context to be cancelled
c.syncContext.Queue().ShutDown() // shutdown the controller queue first
queueContextCancel() // cancel the queue context, which tell workers to initiate shutdown
// Wait for all workers to finish their job.
// at this point the Run() can hang and caller have to implement the logic that will kill
// this controller (SIGKILL).
klog.Infof("Shutting down %s ...", c.name)
}
func (c *baseController) Sync(ctx context.Context, syncCtx SyncContext) error {
return c.sync(ctx, syncCtx)
}
// runWorker runs a single worker
// The worker is asked to terminate when the passed context is cancelled and is given terminationGraceDuration time
// to complete its shutdown.
func (c *baseController) runWorker(queueCtx context.Context) {
wait.UntilWithContext(
queueCtx,
func(queueCtx context.Context) {
defer utilruntime.HandleCrash(c.degradedPanicHandler)
for {
select {
case <-queueCtx.Done():
return
default:
c.processNextWorkItem(queueCtx)
}
}
},
1*time.Second)
}
// reconcile wraps the sync() call and if operator client is set, it handle the degraded condition if sync() returns an error.
func (c *baseController) reconcile(ctx context.Context, syncCtx SyncContext) error {
err := c.sync(ctx, syncCtx)
degradedErr := c.reportDegraded(ctx, err)
if apierrors.IsNotFound(degradedErr) && management.IsOperatorRemovable() {
// The operator tolerates missing CR, therefore don't report it up.
return err
}
return degradedErr
}
// degradedPanicHandler will go degraded on failures, then we should catch potential panics and covert them into bad status.
func (c *baseController) degradedPanicHandler(panicVal interface{}) {
if c.syncDegradedClient == nil {
// if we don't have a client for reporting degraded condition, then let the existing panic handler do the work
return
}
_ = c.reportDegraded(context.TODO(), fmt.Errorf("panic caught:\n%v", panicVal))
}
// reportDegraded updates status with an indication of degraded-ness
func (c *baseController) reportDegraded(ctx context.Context, reportedError error) error {
if c.syncDegradedClient == nil {
return reportedError
}
if reportedError != nil {
condition := applyoperatorv1.OperatorStatus().
WithConditions(applyoperatorv1.OperatorCondition().
WithType(c.name + "Degraded").
WithStatus(operatorv1.ConditionTrue).
WithReason("SyncError").
WithMessage(reportedError.Error()))
updateErr := c.syncDegradedClient.ApplyOperatorStatus(ctx, ControllerFieldManager(c.name, "reportDegraded"), condition)
if updateErr != nil {
klog.Warningf("Updating status of %q failed: %v", c.Name(), updateErr)
}
return reportedError
}
condition := applyoperatorv1.OperatorStatus().
WithConditions(applyoperatorv1.OperatorCondition().
WithType(c.name + "Degraded").
WithStatus(operatorv1.ConditionFalse).
WithReason("AsExpected"))
updateErr := c.syncDegradedClient.ApplyOperatorStatus(ctx, ControllerFieldManager(c.name, "reportDegraded"), condition)
return updateErr
}
func (c *baseController) processNextWorkItem(queueCtx context.Context) {
key, quit := c.syncContext.Queue().Get()
if quit {
return
}
defer c.syncContext.Queue().Done(key)
syncCtx := c.syncContext.(syncContext)
var ok bool
syncCtx.queueKey, ok = key.(string)
if !ok {
utilruntime.HandleError(fmt.Errorf("%q controller failed to process key %q (not a string)", c.name, key))
return
}
if err := c.reconcile(queueCtx, syncCtx); err != nil {
if err == SyntheticRequeueError {
// logging this helps detecting wedged controllers with missing pre-requirements
klog.V(5).Infof("%q controller requested synthetic requeue with key %q", c.name, key)
} else {
if klog.V(4).Enabled() || key != "key" {
utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.name, key, err))
} else {
utilruntime.HandleError(fmt.Errorf("%s reconciliation failed: %w", c.name, err))
}
}
c.syncContext.Queue().AddRateLimited(key)
return
}
c.syncContext.Queue().Forget(key)
}

View File

@@ -1,117 +0,0 @@
package factory
import (
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/openshift/library-go/pkg/operator/events"
)
// syncContext implements SyncContext and provide user access to queue and object that caused
// the sync to be triggered.
type syncContext struct {
eventRecorder events.Recorder
queue workqueue.RateLimitingInterface
queueKey string
}
var _ SyncContext = syncContext{}
// NewSyncContext gives new sync context.
func NewSyncContext(name string, recorder events.Recorder) SyncContext {
return syncContext{
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name),
eventRecorder: recorder.WithComponentSuffix(strings.ToLower(name)),
}
}
func (c syncContext) Queue() workqueue.RateLimitingInterface {
return c.queue
}
func (c syncContext) QueueKey() string {
return c.queueKey
}
func (c syncContext) Recorder() events.Recorder {
return c.eventRecorder
}
// eventHandler provides default event handler that is added to an informers passed to controller factory.
func (c syncContext) eventHandler(queueKeysFunc ObjectQueueKeysFunc, filter EventFilterFunc) cache.ResourceEventHandler {
resourceEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
runtimeObj, ok := obj.(runtime.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("added object %+v is not runtime Object", obj))
return
}
c.enqueueKeys(queueKeysFunc(runtimeObj)...)
},
UpdateFunc: func(old, new interface{}) {
runtimeObj, ok := new.(runtime.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj))
return
}
c.enqueueKeys(queueKeysFunc(runtimeObj)...)
},
DeleteFunc: func(obj interface{}) {
runtimeObj, ok := obj.(runtime.Object)
if !ok {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
c.enqueueKeys(queueKeysFunc(tombstone.Obj.(runtime.Object))...)
return
}
utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj))
return
}
c.enqueueKeys(queueKeysFunc(runtimeObj)...)
},
}
if filter == nil {
return resourceEventHandler
}
return cache.FilteringResourceEventHandler{
FilterFunc: filter,
Handler: resourceEventHandler,
}
}
func (c syncContext) enqueueKeys(keys ...string) {
for _, qKey := range keys {
c.queue.Add(qKey)
}
}
// namespaceChecker returns a function which returns true if an inpuut obj
// (or its tombstone) is a namespace and it matches a name of any namespaces
// that we are interested in
func namespaceChecker(interestingNamespaces []string) func(obj interface{}) bool {
// This is used for quick lookups in informers
interestingNamespacesSet := sets.New(interestingNamespaces...)
return func(obj interface{}) bool {
ns, ok := obj.(*corev1.Namespace)
if ok {
return interestingNamespacesSet.Has(ns.Name)
}
// the object might be getting deleted
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if ok {
if ns, ok := tombstone.Obj.(*corev1.Namespace); ok {
return interestingNamespacesSet.Has(ns.Name)
}
}
return false
}
}

View File

@@ -1,26 +0,0 @@
package factory
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
)
func ObjectNameToKey(obj runtime.Object) string {
metaObj, ok := obj.(metav1.ObjectMetaAccessor)
if !ok {
return ""
}
return metaObj.GetObjectMeta().GetName()
}
func NamesFilter(names ...string) EventFilterFunc {
nameSet := sets.New(names...)
return func(obj interface{}) bool {
metaObj, ok := obj.(metav1.ObjectMetaAccessor)
if !ok {
return false
}
return nameSet.Has(metaObj.GetObjectMeta().GetName())
}
}

View File

@@ -1,341 +0,0 @@
package factory
import (
"context"
"fmt"
"reflect"
"time"
"github.com/robfig/cron"
"k8s.io/apimachinery/pkg/runtime"
errorutil "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"github.com/openshift/library-go/pkg/operator/events"
operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers"
)
// DefaultQueueKey is the queue key used for string trigger based controllers.
const DefaultQueueKey = "key"
// DefaultQueueKeysFunc returns a slice with a single element - the DefaultQueueKey
func DefaultQueueKeysFunc(_ runtime.Object) []string {
return []string{DefaultQueueKey}
}
// Factory is generator that generate standard Kubernetes controllers.
// Factory is really generic and should be only used for simple controllers that does not require special stuff..
type Factory struct {
sync SyncFunc
syncContext SyncContext
syncDegradedClient operatorv1helpers.OperatorClient
resyncInterval time.Duration
resyncSchedules []string
informers []filteredInformers
informerQueueKeys []informersWithQueueKey
bareInformers []Informer
postStartHooks []PostStartHook
namespaceInformers []*namespaceInformer
cachesToSync []cache.InformerSynced
controllerInstanceName string
}
// Informer represents any structure that allow to register event handlers and informs if caches are synced.
// Any SharedInformer will comply.
type Informer interface {
AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error)
HasSynced() bool
}
type namespaceInformer struct {
informer Informer
nsFilter EventFilterFunc
}
type informersWithQueueKey struct {
informers []Informer
filter EventFilterFunc
queueKeyFn ObjectQueueKeysFunc
}
type filteredInformers struct {
informers []Informer
filter EventFilterFunc
}
// PostStartHook specify a function that will run after controller is started.
// The context is cancelled when the controller is asked to shutdown and the post start hook should terminate as well.
// The syncContext allow access to controller queue and event recorder.
type PostStartHook func(ctx context.Context, syncContext SyncContext) error
// ObjectQueueKeyFunc is used to make a string work queue key out of the runtime object that is passed to it.
// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string
// triggers.
// DEPRECATED: use ObjectQueueKeysFunc instead
type ObjectQueueKeyFunc func(runtime.Object) string
// ObjectQueueKeysFunc is used to make a string work queue keys out of the runtime object that is passed to it.
// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string
// triggers.
type ObjectQueueKeysFunc func(runtime.Object) []string
// EventFilterFunc is used to filter informer events to prevent Sync() from being called
type EventFilterFunc func(obj interface{}) bool
// New return new factory instance.
func New() *Factory {
return &Factory{}
}
// Sync is used to set the controller synchronization function. This function is the core of the controller and is
// usually hold the main controller logic.
func (f *Factory) WithSync(syncFn SyncFunc) *Factory {
f.sync = syncFn
return f
}
// WithInformers is used to register event handlers and get the caches synchronized functions.
// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
func (f *Factory) WithInformers(informers ...Informer) *Factory {
f.WithFilteredEventsInformers(nil, informers...)
return f
}
// WithFilteredEventsInformers is used to register event handlers and get the caches synchronized functions.
// Pass the informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
// Pass filter to filter out events that should not trigger Sync() call.
func (f *Factory) WithFilteredEventsInformers(filter EventFilterFunc, informers ...Informer) *Factory {
f.informers = append(f.informers, filteredInformers{
informers: informers,
filter: filter,
})
return f
}
// WithBareInformers allow to register informer that already has custom event handlers registered and no additional
// event handlers will be added to this informer.
// The controller will wait for the cache of this informer to be synced.
// The existing event handlers will have to respect the queue key function or the sync() implementation will have to
// count with custom queue keys.
func (f *Factory) WithBareInformers(informers ...Informer) *Factory {
f.bareInformers = append(f.bareInformers, informers...)
return f
}
// WithInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions.
// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
func (f *Factory) WithInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, informers ...Informer) *Factory {
f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
informers: informers,
queueKeyFn: func(o runtime.Object) []string {
return []string{queueKeyFn(o)}
},
})
return f
}
// WithFilteredEventsInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions.
// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
// Pass filter to filter out events that should not trigger Sync() call.
func (f *Factory) WithFilteredEventsInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, filter EventFilterFunc, informers ...Informer) *Factory {
f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
informers: informers,
filter: filter,
queueKeyFn: func(o runtime.Object) []string {
return []string{queueKeyFn(o)}
},
})
return f
}
// WithInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions.
// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
func (f *Factory) WithInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, informers ...Informer) *Factory {
f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
informers: informers,
queueKeyFn: queueKeyFn,
})
return f
}
// WithFilteredEventsInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions.
// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
// Pass filter to filter out events that should not trigger Sync() call.
func (f *Factory) WithFilteredEventsInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, filter EventFilterFunc, informers ...Informer) *Factory {
f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
informers: informers,
filter: filter,
queueKeyFn: queueKeyFn,
})
return f
}
// WithPostStartHooks allows to register functions that will run asynchronously after the controller is started via Run command.
func (f *Factory) WithPostStartHooks(hooks ...PostStartHook) *Factory {
f.postStartHooks = append(f.postStartHooks, hooks...)
return f
}
// WithNamespaceInformer is used to register event handlers and get the caches synchronized functions.
// The sync function will only trigger when the object observed by this informer is a namespace and its name matches the interestingNamespaces.
// Do not use this to register non-namespace informers.
func (f *Factory) WithNamespaceInformer(informer Informer, interestingNamespaces ...string) *Factory {
f.namespaceInformers = append(f.namespaceInformers, &namespaceInformer{
informer: informer,
nsFilter: namespaceChecker(interestingNamespaces),
})
return f
}
// ResyncEvery will cause the Sync() function to be called periodically, regardless of informers.
// This is useful when you want to refresh every N minutes or you fear that your informers can be stucked.
// If this is not called, no periodical resync will happen.
// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself.
//
// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects.
func (f *Factory) ResyncEvery(interval time.Duration) *Factory {
f.resyncInterval = interval
return f
}
// ResyncSchedule allows to supply a Cron syntax schedule that will be used to schedule the sync() call runs.
// This allows more fine-tuned controller scheduling than ResyncEvery.
// Examples:
//
// factory.New().ResyncSchedule("@every 1s").ToController() // Every second
// factory.New().ResyncSchedule("@hourly").ToController() // Every hour
// factory.New().ResyncSchedule("30 * * * *").ToController() // Every hour on the half hour
//
// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself.
//
// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects.
func (f *Factory) ResyncSchedule(schedules ...string) *Factory {
f.resyncSchedules = append(f.resyncSchedules, schedules...)
return f
}
// WithSyncContext allows to specify custom, existing sync context for this factory.
// This is useful during unit testing where you can override the default event recorder or mock the runtime objects.
// If this function not called, a SyncContext is created by the factory automatically.
func (f *Factory) WithSyncContext(ctx SyncContext) *Factory {
f.syncContext = ctx
return f
}
// WithSyncDegradedOnError encapsulate the controller sync() function, so when this function return an error, the operator client
// is used to set the degraded condition to (eg. "ControllerFooDegraded"). The degraded condition name is set based on the controller name.
func (f *Factory) WithSyncDegradedOnError(operatorClient operatorv1helpers.OperatorClient) *Factory {
f.syncDegradedClient = operatorClient
return f
}
// WithControllerInstanceName specifies the controller instance.
// Useful when the same controller is used multiple times.
func (f *Factory) WithControllerInstanceName(controllerInstanceName string) *Factory {
f.controllerInstanceName = controllerInstanceName
return f
}
type informerHandleTuple struct {
informer Informer
filter uintptr
}
// Controller produce a runnable controller.
func (f *Factory) ToController(name string, eventRecorder events.Recorder) Controller {
if f.sync == nil {
panic(fmt.Errorf("WithSync() must be used before calling ToController() in %q", name))
}
var ctx SyncContext
if f.syncContext != nil {
ctx = f.syncContext
} else {
ctx = NewSyncContext(name, eventRecorder)
}
var cronSchedules []cron.Schedule
if len(f.resyncSchedules) > 0 {
var errors []error
for _, schedule := range f.resyncSchedules {
if s, err := cron.ParseStandard(schedule); err != nil {
errors = append(errors, err)
} else {
cronSchedules = append(cronSchedules, s)
}
}
if err := errorutil.NewAggregate(errors); err != nil {
panic(fmt.Errorf("failed to parse controller schedules for %q: %v", name, err))
}
}
c := &baseController{
name: name,
controllerInstanceName: f.controllerInstanceName,
syncDegradedClient: f.syncDegradedClient,
sync: f.sync,
resyncEvery: f.resyncInterval,
resyncSchedules: cronSchedules,
cachesToSync: append([]cache.InformerSynced{}, f.cachesToSync...),
syncContext: ctx,
postStartHooks: f.postStartHooks,
cacheSyncTimeout: defaultCacheSyncTimeout,
}
// avoid adding an informer more than once
informerQueueKeySet := sets.New[informerHandleTuple]()
for i := range f.informerQueueKeys {
for d := range f.informerQueueKeys[i].informers {
informer := f.informerQueueKeys[i].informers[d]
queueKeyFn := f.informerQueueKeys[i].queueKeyFn
tuple := informerHandleTuple{
informer: informer,
filter: reflect.ValueOf(f.informerQueueKeys[i].filter).Pointer(),
}
if !informerQueueKeySet.Has(tuple) {
sets.Insert(informerQueueKeySet, tuple)
informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(queueKeyFn, f.informerQueueKeys[i].filter))
}
c.cachesToSync = append(c.cachesToSync, informer.HasSynced)
}
}
// avoid adding an informer more than once
informerSet := sets.New[informerHandleTuple]()
for i := range f.informers {
for d := range f.informers[i].informers {
informer := f.informers[i].informers[d]
tuple := informerHandleTuple{
informer: informer,
filter: reflect.ValueOf(f.informers[i].filter).Pointer(),
}
if !informerSet.Has(tuple) {
sets.Insert(informerSet, tuple)
informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.informers[i].filter))
}
c.cachesToSync = append(c.cachesToSync, informer.HasSynced)
}
}
for i := range f.bareInformers {
c.cachesToSync = append(c.cachesToSync, f.bareInformers[i].HasSynced)
}
for i := range f.namespaceInformers {
f.namespaceInformers[i].informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.namespaceInformers[i].nsFilter))
c.cachesToSync = append(c.cachesToSync, f.namespaceInformers[i].informer.HasSynced)
}
return c
}

View File

@@ -1,56 +0,0 @@
package factory
import (
"context"
"fmt"
"k8s.io/client-go/util/workqueue"
"github.com/openshift/library-go/pkg/operator/events"
)
// Controller interface represents a runnable Kubernetes controller.
// Cancelling the syncContext passed will cause the controller to shutdown.
// Number of workers determine how much parallel the job processing should be.
type Controller interface {
// Run runs the controller and blocks until the controller is finished.
// Number of workers can be specified via workers parameter.
// This function will return when all internal loops are finished.
// Note that having more than one worker usually means handing parallelization of Sync().
Run(ctx context.Context, workers int)
// Sync contain the main controller logic.
// This should not be called directly, but can be used in unit tests to exercise the sync.
Sync(ctx context.Context, controllerContext SyncContext) error
// Name returns the controller name string.
Name() string
}
// SyncContext interface represents a context given to the Sync() function where the main controller logic happen.
// SyncContext exposes controller name and give user access to the queue (for manual requeue).
// SyncContext also provides metadata about object that informers observed as changed.
type SyncContext interface {
// Queue gives access to controller queue. This can be used for manual requeue, although if a Sync() function return
// an error, the object is automatically re-queued. Use with caution.
Queue() workqueue.RateLimitingInterface
// QueueKey represents the queue key passed to the Sync function.
QueueKey() string
// Recorder provide access to event recorder.
Recorder() events.Recorder
}
// SyncFunc is a function that contain main controller logic.
// The syncContext.syncContext passed is the main controller syncContext, when cancelled it means the controller is being shut down.
// The syncContext provides access to controller name, queue and event recorder.
type SyncFunc func(ctx context.Context, controllerContext SyncContext) error
func ControllerFieldManager(controllerName, usageName string) string {
return fmt.Sprintf("%s-%s", controllerName, usageName)
}
func ControllerInstanceName(instanceName, controllerName string) string {
return fmt.Sprintf("%s-%s", instanceName, controllerName)
}

View File

@@ -1,53 +0,0 @@
package eventstesting
import (
"context"
"fmt"
"testing"
"github.com/openshift/library-go/pkg/operator/events"
)
type TestingEventRecorder struct {
t *testing.T
component string
}
func (r *TestingEventRecorder) WithContext(ctx context.Context) events.Recorder {
return r
}
// NewTestingEventRecorder provides event recorder that will log all recorded events to the error log.
func NewTestingEventRecorder(t *testing.T) events.Recorder {
return &TestingEventRecorder{t: t, component: "test"}
}
func (r *TestingEventRecorder) ComponentName() string {
return r.component
}
func (r *TestingEventRecorder) ForComponent(c string) events.Recorder {
return &TestingEventRecorder{t: r.t, component: c}
}
func (r *TestingEventRecorder) Shutdown() {}
func (r *TestingEventRecorder) WithComponentSuffix(suffix string) events.Recorder {
return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix))
}
func (r *TestingEventRecorder) Event(reason, message string) {
r.t.Logf("Event: %v: %v", reason, message)
}
func (r *TestingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) {
r.Event(reason, fmt.Sprintf(messageFmt, args...))
}
func (r *TestingEventRecorder) Warning(reason, message string) {
r.t.Logf("Warning: %v: %v", reason, message)
}
func (r *TestingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) {
r.Warning(reason, fmt.Sprintf(messageFmt, args...))
}

View File

@@ -1,58 +0,0 @@
package eventstesting
import (
"context"
"testing"
"github.com/openshift/library-go/pkg/operator/events"
)
type EventRecorder struct {
realEventRecorder events.Recorder
testingEventRecorder *TestingEventRecorder
}
func (e *EventRecorder) WithContext(ctx context.Context) events.Recorder {
return e
}
func NewEventRecorder(t *testing.T, r events.Recorder) events.Recorder {
return &EventRecorder{
testingEventRecorder: NewTestingEventRecorder(t).(*TestingEventRecorder),
realEventRecorder: r,
}
}
func (e *EventRecorder) Event(reason, message string) {
e.realEventRecorder.Event(reason, message)
e.testingEventRecorder.Event(reason, message)
}
func (e *EventRecorder) Shutdown() {}
func (e *EventRecorder) Eventf(reason, messageFmt string, args ...interface{}) {
e.realEventRecorder.Eventf(reason, messageFmt, args...)
e.testingEventRecorder.Eventf(reason, messageFmt, args...)
}
func (e *EventRecorder) Warning(reason, message string) {
e.realEventRecorder.Warning(reason, message)
e.testingEventRecorder.Warning(reason, message)
}
func (e *EventRecorder) Warningf(reason, messageFmt string, args ...interface{}) {
e.realEventRecorder.Warningf(reason, messageFmt, args...)
e.testingEventRecorder.Warningf(reason, messageFmt, args...)
}
func (e *EventRecorder) ForComponent(componentName string) events.Recorder {
return e
}
func (e *EventRecorder) WithComponentSuffix(componentNameSuffix string) events.Recorder {
return e
}
func (e *EventRecorder) ComponentName() string {
return "test-recorder"
}

View File

@@ -1,77 +0,0 @@
package management
import (
v1 "github.com/openshift/api/operator/v1"
)
var (
allowOperatorUnmanagedState = true
allowOperatorRemovedState = true
)
// SetOperatorAlwaysManaged is one time choice when an operator want to opt-out from supporting the "unmanaged" state.
// This is a case of control plane operators or operators that are required to always run otherwise the cluster will
// get into unstable state or critical components will stop working.
func SetOperatorAlwaysManaged() {
allowOperatorUnmanagedState = false
}
// SetOperatorUnmanageable is one time choice when an operator wants to support the "unmanaged" state.
// This is the default setting, provided here mostly for unit tests.
func SetOperatorUnmanageable() {
allowOperatorUnmanagedState = true
}
// SetOperatorNotRemovable is one time choice the operator author can make to indicate the operator does not support
// removing of his operand. This makes sense for operators like kube-apiserver where removing operand will lead to a
// bricked, non-automatically recoverable state.
func SetOperatorNotRemovable() {
allowOperatorRemovedState = false
}
// SetOperatorRemovable is one time choice the operator author can make to indicate the operator supports
// removing of his operand.
// This is the default setting, provided here mostly for unit tests.
func SetOperatorRemovable() {
allowOperatorRemovedState = true
}
// IsOperatorAlwaysManaged means the operator can't be set to unmanaged state.
func IsOperatorAlwaysManaged() bool {
return !allowOperatorUnmanagedState
}
// IsOperatorNotRemovable means the operator can't be set to removed state.
func IsOperatorNotRemovable() bool {
return !allowOperatorRemovedState
}
// IsOperatorRemovable means the operator can be set to removed state.
func IsOperatorRemovable() bool {
return allowOperatorRemovedState
}
func IsOperatorUnknownState(state v1.ManagementState) bool {
switch state {
case v1.Managed, v1.Removed, v1.Unmanaged:
return false
default:
return true
}
}
// IsOperatorManaged indicates whether the operator management state allows the control loop to proceed and manage the operand.
func IsOperatorManaged(state v1.ManagementState) bool {
if IsOperatorAlwaysManaged() || IsOperatorNotRemovable() {
return true
}
switch state {
case v1.Managed:
return true
case v1.Removed:
return false
case v1.Unmanaged:
return false
}
return true
}