🌱 use SDK basecontroller for better logging. (#1269)

* Use basecontroller in sdk-go instead for better logging

Signed-off-by: Jian Qiu <jqiu@redhat.com>

* Rename to fakeSyncContext

Signed-off-by: Jian Qiu <jqiu@redhat.com>

---------

Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
Jian Qiu
2025-12-01 11:07:02 +08:00
committed by GitHub
parent 26edb9423a
commit 33310619d9
199 changed files with 794 additions and 3268 deletions

View File

@@ -1,287 +0,0 @@
package factory
import (
"context"
"errors"
"fmt"
"sync"
"time"
applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1"
"github.com/robfig/cron"
apierrors "k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
operatorv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/library-go/pkg/operator/management"
operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers"
)
// SyntheticRequeueError can be returned from sync() in case of forcing a sync() retry artificially.
// This can be also done by re-adding the key to queue, but this is cheaper and more convenient.
var SyntheticRequeueError = errors.New("synthetic requeue request")
var defaultCacheSyncTimeout = 10 * time.Minute
// baseController represents generic Kubernetes controller boiler-plate
type baseController struct {
name string
controllerInstanceName string
cachesToSync []cache.InformerSynced
sync func(ctx context.Context, controllerContext SyncContext) error
syncContext SyncContext
syncDegradedClient operatorv1helpers.OperatorClient
resyncEvery time.Duration
resyncSchedules []cron.Schedule
postStartHooks []PostStartHook
cacheSyncTimeout time.Duration
}
var _ Controller = &baseController{}
// Name returns a controller name.
func (c baseController) Name() string {
return c.name
}
// ControllerInstanceName specifies the controller instance.
// Useful when the same controller is used multiple times.
func (c baseController) ControllerInstanceName() string {
return c.controllerInstanceName
}
type scheduledJob struct {
queue workqueue.RateLimitingInterface
name string
}
func newScheduledJob(name string, queue workqueue.RateLimitingInterface) cron.Job {
return &scheduledJob{
queue: queue,
name: name,
}
}
func (s *scheduledJob) Run() {
klog.V(4).Infof("Triggering scheduled %q controller run", s.name)
s.queue.Add(DefaultQueueKey)
}
func waitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) error {
klog.Infof("Waiting for caches to sync for %s", controllerName)
if !cache.WaitForCacheSync(stopCh, cacheSyncs...) {
return fmt.Errorf("unable to sync caches for %s", controllerName)
}
klog.Infof("Caches are synced for %s ", controllerName)
return nil
}
func (c *baseController) Run(ctx context.Context, workers int) {
// HandleCrash recovers panics
defer utilruntime.HandleCrash(c.degradedPanicHandler)
// give caches 10 minutes to sync
cacheSyncCtx, cacheSyncCancel := context.WithTimeout(ctx, c.cacheSyncTimeout)
defer cacheSyncCancel()
err := waitForNamedCacheSync(c.name, cacheSyncCtx.Done(), c.cachesToSync...)
if err != nil {
select {
case <-ctx.Done():
// Exit gracefully because the controller was requested to stop.
return
default:
// If caches did not sync after 10 minutes, it has taken oddly long and
// we should provide feedback. Since the control loops will never start,
// it is safer to exit with a good message than to continue with a dead loop.
// TODO: Consider making this behavior configurable.
klog.Exit(err)
}
}
var workerWg sync.WaitGroup
defer func() {
defer klog.Infof("All %s workers have been terminated", c.name)
workerWg.Wait()
}()
// queueContext is used to track and initiate queue shutdown
queueContext, queueContextCancel := context.WithCancel(context.TODO())
for i := 1; i <= workers; i++ {
klog.Infof("Starting #%d worker of %s controller ...", i, c.name)
workerWg.Add(1)
go func() {
defer func() {
klog.Infof("Shutting down worker of %s controller ...", c.name)
workerWg.Done()
}()
c.runWorker(queueContext)
}()
}
// if scheduled run is requested, run the cron scheduler
if c.resyncSchedules != nil {
scheduler := cron.New()
for _, s := range c.resyncSchedules {
scheduler.Schedule(s, newScheduledJob(c.name, c.syncContext.Queue()))
}
scheduler.Start()
defer scheduler.Stop()
}
// runPeriodicalResync is independent from queue
if c.resyncEvery > 0 {
workerWg.Add(1)
if c.resyncEvery < 60*time.Second {
// Warn about too fast resyncs as they might drain the operators QPS.
// This event is cheap as it is only emitted on operator startup.
c.syncContext.Recorder().Warningf("FastControllerResync", "Controller %q resync interval is set to %s which might lead to client request throttling", c.name, c.resyncEvery)
}
go func() {
defer workerWg.Done()
wait.UntilWithContext(ctx, func(ctx context.Context) { c.syncContext.Queue().Add(DefaultQueueKey) }, c.resyncEvery)
}()
}
// run post-start hooks (custom triggers, etc.)
if len(c.postStartHooks) > 0 {
var hookWg sync.WaitGroup
defer func() {
hookWg.Wait() // wait for the post-start hooks
klog.Infof("All %s post start hooks have been terminated", c.name)
}()
for i := range c.postStartHooks {
hookWg.Add(1)
go func(index int) {
defer hookWg.Done()
if err := c.postStartHooks[index](ctx, c.syncContext); err != nil {
klog.Warningf("%s controller post start hook error: %v", c.name, err)
}
}(i)
}
}
// Handle controller shutdown
<-ctx.Done() // wait for controller context to be cancelled
c.syncContext.Queue().ShutDown() // shutdown the controller queue first
queueContextCancel() // cancel the queue context, which tell workers to initiate shutdown
// Wait for all workers to finish their job.
// at this point the Run() can hang and caller have to implement the logic that will kill
// this controller (SIGKILL).
klog.Infof("Shutting down %s ...", c.name)
}
func (c *baseController) Sync(ctx context.Context, syncCtx SyncContext) error {
return c.sync(ctx, syncCtx)
}
// runWorker runs a single worker
// The worker is asked to terminate when the passed context is cancelled and is given terminationGraceDuration time
// to complete its shutdown.
func (c *baseController) runWorker(queueCtx context.Context) {
wait.UntilWithContext(
queueCtx,
func(queueCtx context.Context) {
defer utilruntime.HandleCrash(c.degradedPanicHandler)
for {
select {
case <-queueCtx.Done():
return
default:
c.processNextWorkItem(queueCtx)
}
}
},
1*time.Second)
}
// reconcile wraps the sync() call and if operator client is set, it handle the degraded condition if sync() returns an error.
func (c *baseController) reconcile(ctx context.Context, syncCtx SyncContext) error {
err := c.sync(ctx, syncCtx)
degradedErr := c.reportDegraded(ctx, err)
if apierrors.IsNotFound(degradedErr) && management.IsOperatorRemovable() {
// The operator tolerates missing CR, therefore don't report it up.
return err
}
return degradedErr
}
// degradedPanicHandler will go degraded on failures, then we should catch potential panics and covert them into bad status.
func (c *baseController) degradedPanicHandler(panicVal interface{}) {
if c.syncDegradedClient == nil {
// if we don't have a client for reporting degraded condition, then let the existing panic handler do the work
return
}
_ = c.reportDegraded(context.TODO(), fmt.Errorf("panic caught:\n%v", panicVal))
}
// reportDegraded updates status with an indication of degraded-ness
func (c *baseController) reportDegraded(ctx context.Context, reportedError error) error {
if c.syncDegradedClient == nil {
return reportedError
}
if reportedError != nil {
condition := applyoperatorv1.OperatorStatus().
WithConditions(applyoperatorv1.OperatorCondition().
WithType(c.name + "Degraded").
WithStatus(operatorv1.ConditionTrue).
WithReason("SyncError").
WithMessage(reportedError.Error()))
updateErr := c.syncDegradedClient.ApplyOperatorStatus(ctx, ControllerFieldManager(c.name, "reportDegraded"), condition)
if updateErr != nil {
klog.Warningf("Updating status of %q failed: %v", c.Name(), updateErr)
}
return reportedError
}
condition := applyoperatorv1.OperatorStatus().
WithConditions(applyoperatorv1.OperatorCondition().
WithType(c.name + "Degraded").
WithStatus(operatorv1.ConditionFalse).
WithReason("AsExpected"))
updateErr := c.syncDegradedClient.ApplyOperatorStatus(ctx, ControllerFieldManager(c.name, "reportDegraded"), condition)
return updateErr
}
func (c *baseController) processNextWorkItem(queueCtx context.Context) {
key, quit := c.syncContext.Queue().Get()
if quit {
return
}
defer c.syncContext.Queue().Done(key)
syncCtx := c.syncContext.(syncContext)
var ok bool
syncCtx.queueKey, ok = key.(string)
if !ok {
utilruntime.HandleError(fmt.Errorf("%q controller failed to process key %q (not a string)", c.name, key))
return
}
if err := c.reconcile(queueCtx, syncCtx); err != nil {
if err == SyntheticRequeueError {
// logging this helps detecting wedged controllers with missing pre-requirements
klog.V(5).Infof("%q controller requested synthetic requeue with key %q", c.name, key)
} else {
if klog.V(4).Enabled() || key != "key" {
utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.name, key, err))
} else {
utilruntime.HandleError(fmt.Errorf("%s reconciliation failed: %w", c.name, err))
}
}
c.syncContext.Queue().AddRateLimited(key)
return
}
c.syncContext.Queue().Forget(key)
}

View File

@@ -1,117 +0,0 @@
package factory
import (
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/openshift/library-go/pkg/operator/events"
)
// syncContext implements SyncContext and provide user access to queue and object that caused
// the sync to be triggered.
type syncContext struct {
eventRecorder events.Recorder
queue workqueue.RateLimitingInterface
queueKey string
}
var _ SyncContext = syncContext{}
// NewSyncContext gives new sync context.
func NewSyncContext(name string, recorder events.Recorder) SyncContext {
return syncContext{
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name),
eventRecorder: recorder.WithComponentSuffix(strings.ToLower(name)),
}
}
func (c syncContext) Queue() workqueue.RateLimitingInterface {
return c.queue
}
func (c syncContext) QueueKey() string {
return c.queueKey
}
func (c syncContext) Recorder() events.Recorder {
return c.eventRecorder
}
// eventHandler provides default event handler that is added to an informers passed to controller factory.
func (c syncContext) eventHandler(queueKeysFunc ObjectQueueKeysFunc, filter EventFilterFunc) cache.ResourceEventHandler {
resourceEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
runtimeObj, ok := obj.(runtime.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("added object %+v is not runtime Object", obj))
return
}
c.enqueueKeys(queueKeysFunc(runtimeObj)...)
},
UpdateFunc: func(old, new interface{}) {
runtimeObj, ok := new.(runtime.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj))
return
}
c.enqueueKeys(queueKeysFunc(runtimeObj)...)
},
DeleteFunc: func(obj interface{}) {
runtimeObj, ok := obj.(runtime.Object)
if !ok {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
c.enqueueKeys(queueKeysFunc(tombstone.Obj.(runtime.Object))...)
return
}
utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj))
return
}
c.enqueueKeys(queueKeysFunc(runtimeObj)...)
},
}
if filter == nil {
return resourceEventHandler
}
return cache.FilteringResourceEventHandler{
FilterFunc: filter,
Handler: resourceEventHandler,
}
}
func (c syncContext) enqueueKeys(keys ...string) {
for _, qKey := range keys {
c.queue.Add(qKey)
}
}
// namespaceChecker returns a function which returns true if an inpuut obj
// (or its tombstone) is a namespace and it matches a name of any namespaces
// that we are interested in
func namespaceChecker(interestingNamespaces []string) func(obj interface{}) bool {
// This is used for quick lookups in informers
interestingNamespacesSet := sets.New(interestingNamespaces...)
return func(obj interface{}) bool {
ns, ok := obj.(*corev1.Namespace)
if ok {
return interestingNamespacesSet.Has(ns.Name)
}
// the object might be getting deleted
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if ok {
if ns, ok := tombstone.Obj.(*corev1.Namespace); ok {
return interestingNamespacesSet.Has(ns.Name)
}
}
return false
}
}

View File

@@ -1,26 +0,0 @@
package factory
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
)
func ObjectNameToKey(obj runtime.Object) string {
metaObj, ok := obj.(metav1.ObjectMetaAccessor)
if !ok {
return ""
}
return metaObj.GetObjectMeta().GetName()
}
func NamesFilter(names ...string) EventFilterFunc {
nameSet := sets.New(names...)
return func(obj interface{}) bool {
metaObj, ok := obj.(metav1.ObjectMetaAccessor)
if !ok {
return false
}
return nameSet.Has(metaObj.GetObjectMeta().GetName())
}
}

View File

@@ -1,341 +0,0 @@
package factory
import (
"context"
"fmt"
"reflect"
"time"
"github.com/robfig/cron"
"k8s.io/apimachinery/pkg/runtime"
errorutil "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"github.com/openshift/library-go/pkg/operator/events"
operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers"
)
// DefaultQueueKey is the queue key used for string trigger based controllers.
const DefaultQueueKey = "key"
// DefaultQueueKeysFunc returns a slice with a single element - the DefaultQueueKey
func DefaultQueueKeysFunc(_ runtime.Object) []string {
return []string{DefaultQueueKey}
}
// Factory is generator that generate standard Kubernetes controllers.
// Factory is really generic and should be only used for simple controllers that does not require special stuff..
type Factory struct {
sync SyncFunc
syncContext SyncContext
syncDegradedClient operatorv1helpers.OperatorClient
resyncInterval time.Duration
resyncSchedules []string
informers []filteredInformers
informerQueueKeys []informersWithQueueKey
bareInformers []Informer
postStartHooks []PostStartHook
namespaceInformers []*namespaceInformer
cachesToSync []cache.InformerSynced
controllerInstanceName string
}
// Informer represents any structure that allow to register event handlers and informs if caches are synced.
// Any SharedInformer will comply.
type Informer interface {
AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error)
HasSynced() bool
}
type namespaceInformer struct {
informer Informer
nsFilter EventFilterFunc
}
type informersWithQueueKey struct {
informers []Informer
filter EventFilterFunc
queueKeyFn ObjectQueueKeysFunc
}
type filteredInformers struct {
informers []Informer
filter EventFilterFunc
}
// PostStartHook specify a function that will run after controller is started.
// The context is cancelled when the controller is asked to shutdown and the post start hook should terminate as well.
// The syncContext allow access to controller queue and event recorder.
type PostStartHook func(ctx context.Context, syncContext SyncContext) error
// ObjectQueueKeyFunc is used to make a string work queue key out of the runtime object that is passed to it.
// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string
// triggers.
// DEPRECATED: use ObjectQueueKeysFunc instead
type ObjectQueueKeyFunc func(runtime.Object) string
// ObjectQueueKeysFunc is used to make a string work queue keys out of the runtime object that is passed to it.
// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string
// triggers.
type ObjectQueueKeysFunc func(runtime.Object) []string
// EventFilterFunc is used to filter informer events to prevent Sync() from being called
type EventFilterFunc func(obj interface{}) bool
// New return new factory instance.
func New() *Factory {
return &Factory{}
}
// Sync is used to set the controller synchronization function. This function is the core of the controller and is
// usually hold the main controller logic.
func (f *Factory) WithSync(syncFn SyncFunc) *Factory {
f.sync = syncFn
return f
}
// WithInformers is used to register event handlers and get the caches synchronized functions.
// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
func (f *Factory) WithInformers(informers ...Informer) *Factory {
f.WithFilteredEventsInformers(nil, informers...)
return f
}
// WithFilteredEventsInformers is used to register event handlers and get the caches synchronized functions.
// Pass the informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
// Pass filter to filter out events that should not trigger Sync() call.
func (f *Factory) WithFilteredEventsInformers(filter EventFilterFunc, informers ...Informer) *Factory {
f.informers = append(f.informers, filteredInformers{
informers: informers,
filter: filter,
})
return f
}
// WithBareInformers allow to register informer that already has custom event handlers registered and no additional
// event handlers will be added to this informer.
// The controller will wait for the cache of this informer to be synced.
// The existing event handlers will have to respect the queue key function or the sync() implementation will have to
// count with custom queue keys.
func (f *Factory) WithBareInformers(informers ...Informer) *Factory {
f.bareInformers = append(f.bareInformers, informers...)
return f
}
// WithInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions.
// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
func (f *Factory) WithInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, informers ...Informer) *Factory {
f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
informers: informers,
queueKeyFn: func(o runtime.Object) []string {
return []string{queueKeyFn(o)}
},
})
return f
}
// WithFilteredEventsInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions.
// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
// Pass filter to filter out events that should not trigger Sync() call.
func (f *Factory) WithFilteredEventsInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, filter EventFilterFunc, informers ...Informer) *Factory {
f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
informers: informers,
filter: filter,
queueKeyFn: func(o runtime.Object) []string {
return []string{queueKeyFn(o)}
},
})
return f
}
// WithInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions.
// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
func (f *Factory) WithInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, informers ...Informer) *Factory {
f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
informers: informers,
queueKeyFn: queueKeyFn,
})
return f
}
// WithFilteredEventsInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions.
// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
// is called.
// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
// Pass filter to filter out events that should not trigger Sync() call.
func (f *Factory) WithFilteredEventsInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, filter EventFilterFunc, informers ...Informer) *Factory {
f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
informers: informers,
filter: filter,
queueKeyFn: queueKeyFn,
})
return f
}
// WithPostStartHooks allows to register functions that will run asynchronously after the controller is started via Run command.
func (f *Factory) WithPostStartHooks(hooks ...PostStartHook) *Factory {
f.postStartHooks = append(f.postStartHooks, hooks...)
return f
}
// WithNamespaceInformer is used to register event handlers and get the caches synchronized functions.
// The sync function will only trigger when the object observed by this informer is a namespace and its name matches the interestingNamespaces.
// Do not use this to register non-namespace informers.
func (f *Factory) WithNamespaceInformer(informer Informer, interestingNamespaces ...string) *Factory {
f.namespaceInformers = append(f.namespaceInformers, &namespaceInformer{
informer: informer,
nsFilter: namespaceChecker(interestingNamespaces),
})
return f
}
// ResyncEvery will cause the Sync() function to be called periodically, regardless of informers.
// This is useful when you want to refresh every N minutes or you fear that your informers can be stucked.
// If this is not called, no periodical resync will happen.
// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself.
//
// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects.
func (f *Factory) ResyncEvery(interval time.Duration) *Factory {
f.resyncInterval = interval
return f
}
// ResyncSchedule allows to supply a Cron syntax schedule that will be used to schedule the sync() call runs.
// This allows more fine-tuned controller scheduling than ResyncEvery.
// Examples:
//
// factory.New().ResyncSchedule("@every 1s").ToController() // Every second
// factory.New().ResyncSchedule("@hourly").ToController() // Every hour
// factory.New().ResyncSchedule("30 * * * *").ToController() // Every hour on the half hour
//
// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself.
//
// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects.
func (f *Factory) ResyncSchedule(schedules ...string) *Factory {
f.resyncSchedules = append(f.resyncSchedules, schedules...)
return f
}
// WithSyncContext allows to specify custom, existing sync context for this factory.
// This is useful during unit testing where you can override the default event recorder or mock the runtime objects.
// If this function not called, a SyncContext is created by the factory automatically.
func (f *Factory) WithSyncContext(ctx SyncContext) *Factory {
f.syncContext = ctx
return f
}
// WithSyncDegradedOnError encapsulate the controller sync() function, so when this function return an error, the operator client
// is used to set the degraded condition to (eg. "ControllerFooDegraded"). The degraded condition name is set based on the controller name.
func (f *Factory) WithSyncDegradedOnError(operatorClient operatorv1helpers.OperatorClient) *Factory {
f.syncDegradedClient = operatorClient
return f
}
// WithControllerInstanceName specifies the controller instance.
// Useful when the same controller is used multiple times.
func (f *Factory) WithControllerInstanceName(controllerInstanceName string) *Factory {
f.controllerInstanceName = controllerInstanceName
return f
}
type informerHandleTuple struct {
informer Informer
filter uintptr
}
// Controller produce a runnable controller.
func (f *Factory) ToController(name string, eventRecorder events.Recorder) Controller {
if f.sync == nil {
panic(fmt.Errorf("WithSync() must be used before calling ToController() in %q", name))
}
var ctx SyncContext
if f.syncContext != nil {
ctx = f.syncContext
} else {
ctx = NewSyncContext(name, eventRecorder)
}
var cronSchedules []cron.Schedule
if len(f.resyncSchedules) > 0 {
var errors []error
for _, schedule := range f.resyncSchedules {
if s, err := cron.ParseStandard(schedule); err != nil {
errors = append(errors, err)
} else {
cronSchedules = append(cronSchedules, s)
}
}
if err := errorutil.NewAggregate(errors); err != nil {
panic(fmt.Errorf("failed to parse controller schedules for %q: %v", name, err))
}
}
c := &baseController{
name: name,
controllerInstanceName: f.controllerInstanceName,
syncDegradedClient: f.syncDegradedClient,
sync: f.sync,
resyncEvery: f.resyncInterval,
resyncSchedules: cronSchedules,
cachesToSync: append([]cache.InformerSynced{}, f.cachesToSync...),
syncContext: ctx,
postStartHooks: f.postStartHooks,
cacheSyncTimeout: defaultCacheSyncTimeout,
}
// avoid adding an informer more than once
informerQueueKeySet := sets.New[informerHandleTuple]()
for i := range f.informerQueueKeys {
for d := range f.informerQueueKeys[i].informers {
informer := f.informerQueueKeys[i].informers[d]
queueKeyFn := f.informerQueueKeys[i].queueKeyFn
tuple := informerHandleTuple{
informer: informer,
filter: reflect.ValueOf(f.informerQueueKeys[i].filter).Pointer(),
}
if !informerQueueKeySet.Has(tuple) {
sets.Insert(informerQueueKeySet, tuple)
informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(queueKeyFn, f.informerQueueKeys[i].filter))
}
c.cachesToSync = append(c.cachesToSync, informer.HasSynced)
}
}
// avoid adding an informer more than once
informerSet := sets.New[informerHandleTuple]()
for i := range f.informers {
for d := range f.informers[i].informers {
informer := f.informers[i].informers[d]
tuple := informerHandleTuple{
informer: informer,
filter: reflect.ValueOf(f.informers[i].filter).Pointer(),
}
if !informerSet.Has(tuple) {
sets.Insert(informerSet, tuple)
informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.informers[i].filter))
}
c.cachesToSync = append(c.cachesToSync, informer.HasSynced)
}
}
for i := range f.bareInformers {
c.cachesToSync = append(c.cachesToSync, f.bareInformers[i].HasSynced)
}
for i := range f.namespaceInformers {
f.namespaceInformers[i].informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.namespaceInformers[i].nsFilter))
c.cachesToSync = append(c.cachesToSync, f.namespaceInformers[i].informer.HasSynced)
}
return c
}

View File

@@ -1,56 +0,0 @@
package factory
import (
"context"
"fmt"
"k8s.io/client-go/util/workqueue"
"github.com/openshift/library-go/pkg/operator/events"
)
// Controller interface represents a runnable Kubernetes controller.
// Cancelling the syncContext passed will cause the controller to shutdown.
// Number of workers determine how much parallel the job processing should be.
type Controller interface {
// Run runs the controller and blocks until the controller is finished.
// Number of workers can be specified via workers parameter.
// This function will return when all internal loops are finished.
// Note that having more than one worker usually means handing parallelization of Sync().
Run(ctx context.Context, workers int)
// Sync contain the main controller logic.
// This should not be called directly, but can be used in unit tests to exercise the sync.
Sync(ctx context.Context, controllerContext SyncContext) error
// Name returns the controller name string.
Name() string
}
// SyncContext interface represents a context given to the Sync() function where the main controller logic happen.
// SyncContext exposes controller name and give user access to the queue (for manual requeue).
// SyncContext also provides metadata about object that informers observed as changed.
type SyncContext interface {
// Queue gives access to controller queue. This can be used for manual requeue, although if a Sync() function return
// an error, the object is automatically re-queued. Use with caution.
Queue() workqueue.RateLimitingInterface
// QueueKey represents the queue key passed to the Sync function.
QueueKey() string
// Recorder provide access to event recorder.
Recorder() events.Recorder
}
// SyncFunc is a function that contain main controller logic.
// The syncContext.syncContext passed is the main controller syncContext, when cancelled it means the controller is being shut down.
// The syncContext provides access to controller name, queue and event recorder.
type SyncFunc func(ctx context.Context, controllerContext SyncContext) error
func ControllerFieldManager(controllerName, usageName string) string {
return fmt.Sprintf("%s-%s", controllerName, usageName)
}
func ControllerInstanceName(instanceName, controllerName string) string {
return fmt.Sprintf("%s-%s", instanceName, controllerName)
}

View File

@@ -1,53 +0,0 @@
package eventstesting
import (
"context"
"fmt"
"testing"
"github.com/openshift/library-go/pkg/operator/events"
)
type TestingEventRecorder struct {
t *testing.T
component string
}
func (r *TestingEventRecorder) WithContext(ctx context.Context) events.Recorder {
return r
}
// NewTestingEventRecorder provides event recorder that will log all recorded events to the error log.
func NewTestingEventRecorder(t *testing.T) events.Recorder {
return &TestingEventRecorder{t: t, component: "test"}
}
func (r *TestingEventRecorder) ComponentName() string {
return r.component
}
func (r *TestingEventRecorder) ForComponent(c string) events.Recorder {
return &TestingEventRecorder{t: r.t, component: c}
}
func (r *TestingEventRecorder) Shutdown() {}
func (r *TestingEventRecorder) WithComponentSuffix(suffix string) events.Recorder {
return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix))
}
func (r *TestingEventRecorder) Event(reason, message string) {
r.t.Logf("Event: %v: %v", reason, message)
}
func (r *TestingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) {
r.Event(reason, fmt.Sprintf(messageFmt, args...))
}
func (r *TestingEventRecorder) Warning(reason, message string) {
r.t.Logf("Warning: %v: %v", reason, message)
}
func (r *TestingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) {
r.Warning(reason, fmt.Sprintf(messageFmt, args...))
}

View File

@@ -1,58 +0,0 @@
package eventstesting
import (
"context"
"testing"
"github.com/openshift/library-go/pkg/operator/events"
)
type EventRecorder struct {
realEventRecorder events.Recorder
testingEventRecorder *TestingEventRecorder
}
func (e *EventRecorder) WithContext(ctx context.Context) events.Recorder {
return e
}
func NewEventRecorder(t *testing.T, r events.Recorder) events.Recorder {
return &EventRecorder{
testingEventRecorder: NewTestingEventRecorder(t).(*TestingEventRecorder),
realEventRecorder: r,
}
}
func (e *EventRecorder) Event(reason, message string) {
e.realEventRecorder.Event(reason, message)
e.testingEventRecorder.Event(reason, message)
}
func (e *EventRecorder) Shutdown() {}
func (e *EventRecorder) Eventf(reason, messageFmt string, args ...interface{}) {
e.realEventRecorder.Eventf(reason, messageFmt, args...)
e.testingEventRecorder.Eventf(reason, messageFmt, args...)
}
func (e *EventRecorder) Warning(reason, message string) {
e.realEventRecorder.Warning(reason, message)
e.testingEventRecorder.Warning(reason, message)
}
func (e *EventRecorder) Warningf(reason, messageFmt string, args ...interface{}) {
e.realEventRecorder.Warningf(reason, messageFmt, args...)
e.testingEventRecorder.Warningf(reason, messageFmt, args...)
}
func (e *EventRecorder) ForComponent(componentName string) events.Recorder {
return e
}
func (e *EventRecorder) WithComponentSuffix(componentNameSuffix string) events.Recorder {
return e
}
func (e *EventRecorder) ComponentName() string {
return "test-recorder"
}

View File

@@ -1,77 +0,0 @@
package management
import (
v1 "github.com/openshift/api/operator/v1"
)
var (
allowOperatorUnmanagedState = true
allowOperatorRemovedState = true
)
// SetOperatorAlwaysManaged is one time choice when an operator want to opt-out from supporting the "unmanaged" state.
// This is a case of control plane operators or operators that are required to always run otherwise the cluster will
// get into unstable state or critical components will stop working.
func SetOperatorAlwaysManaged() {
allowOperatorUnmanagedState = false
}
// SetOperatorUnmanageable is one time choice when an operator wants to support the "unmanaged" state.
// This is the default setting, provided here mostly for unit tests.
func SetOperatorUnmanageable() {
allowOperatorUnmanagedState = true
}
// SetOperatorNotRemovable is one time choice the operator author can make to indicate the operator does not support
// removing of his operand. This makes sense for operators like kube-apiserver where removing operand will lead to a
// bricked, non-automatically recoverable state.
func SetOperatorNotRemovable() {
allowOperatorRemovedState = false
}
// SetOperatorRemovable is one time choice the operator author can make to indicate the operator supports
// removing of his operand.
// This is the default setting, provided here mostly for unit tests.
func SetOperatorRemovable() {
allowOperatorRemovedState = true
}
// IsOperatorAlwaysManaged means the operator can't be set to unmanaged state.
func IsOperatorAlwaysManaged() bool {
return !allowOperatorUnmanagedState
}
// IsOperatorNotRemovable means the operator can't be set to removed state.
func IsOperatorNotRemovable() bool {
return !allowOperatorRemovedState
}
// IsOperatorRemovable means the operator can be set to removed state.
func IsOperatorRemovable() bool {
return allowOperatorRemovedState
}
func IsOperatorUnknownState(state v1.ManagementState) bool {
switch state {
case v1.Managed, v1.Removed, v1.Unmanaged:
return false
default:
return true
}
}
// IsOperatorManaged indicates whether the operator management state allows the control loop to proceed and manage the operand.
func IsOperatorManaged(state v1.ManagementState) bool {
if IsOperatorAlwaysManaged() || IsOperatorNotRemovable() {
return true
}
switch state {
case v1.Managed:
return true
case v1.Removed:
return false
case v1.Unmanaged:
return false
}
return true
}

View File

@@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@@ -1 +0,0 @@
language: go

View File

@@ -1,21 +0,0 @@
Copyright (C) 2012 Rob Figueiredo
All Rights Reserved.
MIT LICENSE
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,6 +0,0 @@
[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron)
[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron)
# cron
Documentation here: https://godoc.org/github.com/robfig/cron

View File

@@ -1,27 +0,0 @@
package cron
import "time"
// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
// It does not support jobs more frequent than once a second.
type ConstantDelaySchedule struct {
Delay time.Duration
}
// Every returns a crontab Schedule that activates once every duration.
// Delays of less than a second are not supported (will round up to 1 second).
// Any fields less than a Second are truncated.
func Every(duration time.Duration) ConstantDelaySchedule {
if duration < time.Second {
duration = time.Second
}
return ConstantDelaySchedule{
Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
}
}
// Next returns the next time this should be run.
// This rounds so that the next activation time will be on the second.
func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
}

259
vendor/github.com/robfig/cron/cron.go generated vendored
View File

@@ -1,259 +0,0 @@
package cron
import (
"log"
"runtime"
"sort"
"time"
)
// Cron keeps track of any number of entries, invoking the associated func as
// specified by the schedule. It may be started, stopped, and the entries may
// be inspected while running.
type Cron struct {
entries []*Entry
stop chan struct{}
add chan *Entry
snapshot chan []*Entry
running bool
ErrorLog *log.Logger
location *time.Location
}
// Job is an interface for submitted cron jobs.
type Job interface {
Run()
}
// The Schedule describes a job's duty cycle.
type Schedule interface {
// Return the next activation time, later than the given time.
// Next is invoked initially, and then each time the job is run.
Next(time.Time) time.Time
}
// Entry consists of a schedule and the func to execute on that schedule.
type Entry struct {
// The schedule on which this job should be run.
Schedule Schedule
// The next time the job will run. This is the zero time if Cron has not been
// started or this entry's schedule is unsatisfiable
Next time.Time
// The last time this job was run. This is the zero time if the job has never
// been run.
Prev time.Time
// The Job to run.
Job Job
}
// byTime is a wrapper for sorting the entry array by time
// (with zero time at the end).
type byTime []*Entry
func (s byTime) Len() int { return len(s) }
func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s byTime) Less(i, j int) bool {
// Two zero times should return false.
// Otherwise, zero is "greater" than any other time.
// (To sort it at the end of the list.)
if s[i].Next.IsZero() {
return false
}
if s[j].Next.IsZero() {
return true
}
return s[i].Next.Before(s[j].Next)
}
// New returns a new Cron job runner, in the Local time zone.
func New() *Cron {
return NewWithLocation(time.Now().Location())
}
// NewWithLocation returns a new Cron job runner.
func NewWithLocation(location *time.Location) *Cron {
return &Cron{
entries: nil,
add: make(chan *Entry),
stop: make(chan struct{}),
snapshot: make(chan []*Entry),
running: false,
ErrorLog: nil,
location: location,
}
}
// A wrapper that turns a func() into a cron.Job
type FuncJob func()
func (f FuncJob) Run() { f() }
// AddFunc adds a func to the Cron to be run on the given schedule.
func (c *Cron) AddFunc(spec string, cmd func()) error {
return c.AddJob(spec, FuncJob(cmd))
}
// AddJob adds a Job to the Cron to be run on the given schedule.
func (c *Cron) AddJob(spec string, cmd Job) error {
schedule, err := Parse(spec)
if err != nil {
return err
}
c.Schedule(schedule, cmd)
return nil
}
// Schedule adds a Job to the Cron to be run on the given schedule.
func (c *Cron) Schedule(schedule Schedule, cmd Job) {
entry := &Entry{
Schedule: schedule,
Job: cmd,
}
if !c.running {
c.entries = append(c.entries, entry)
return
}
c.add <- entry
}
// Entries returns a snapshot of the cron entries.
func (c *Cron) Entries() []*Entry {
if c.running {
c.snapshot <- nil
x := <-c.snapshot
return x
}
return c.entrySnapshot()
}
// Location gets the time zone location
func (c *Cron) Location() *time.Location {
return c.location
}
// Start the cron scheduler in its own go-routine, or no-op if already started.
func (c *Cron) Start() {
if c.running {
return
}
c.running = true
go c.run()
}
// Run the cron scheduler, or no-op if already running.
func (c *Cron) Run() {
if c.running {
return
}
c.running = true
c.run()
}
func (c *Cron) runWithRecovery(j Job) {
defer func() {
if r := recover(); r != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
c.logf("cron: panic running job: %v\n%s", r, buf)
}
}()
j.Run()
}
// Run the scheduler. this is private just due to the need to synchronize
// access to the 'running' state variable.
func (c *Cron) run() {
// Figure out the next activation times for each entry.
now := c.now()
for _, entry := range c.entries {
entry.Next = entry.Schedule.Next(now)
}
for {
// Determine the next entry to run.
sort.Sort(byTime(c.entries))
var timer *time.Timer
if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
// If there are no entries yet, just sleep - it still handles new entries
// and stop requests.
timer = time.NewTimer(100000 * time.Hour)
} else {
timer = time.NewTimer(c.entries[0].Next.Sub(now))
}
for {
select {
case now = <-timer.C:
now = now.In(c.location)
// Run every entry whose next time was less than now
for _, e := range c.entries {
if e.Next.After(now) || e.Next.IsZero() {
break
}
go c.runWithRecovery(e.Job)
e.Prev = e.Next
e.Next = e.Schedule.Next(now)
}
case newEntry := <-c.add:
timer.Stop()
now = c.now()
newEntry.Next = newEntry.Schedule.Next(now)
c.entries = append(c.entries, newEntry)
case <-c.snapshot:
c.snapshot <- c.entrySnapshot()
continue
case <-c.stop:
timer.Stop()
return
}
break
}
}
}
// Logs an error to stderr or to the configured error log
func (c *Cron) logf(format string, args ...interface{}) {
if c.ErrorLog != nil {
c.ErrorLog.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// Stop stops the cron scheduler if it is running; otherwise it does nothing.
func (c *Cron) Stop() {
if !c.running {
return
}
c.stop <- struct{}{}
c.running = false
}
// entrySnapshot returns a copy of the current cron entry list.
func (c *Cron) entrySnapshot() []*Entry {
entries := []*Entry{}
for _, e := range c.entries {
entries = append(entries, &Entry{
Schedule: e.Schedule,
Next: e.Next,
Prev: e.Prev,
Job: e.Job,
})
}
return entries
}
// now returns current time in c location
func (c *Cron) now() time.Time {
return time.Now().In(c.location)
}

129
vendor/github.com/robfig/cron/doc.go generated vendored
View File

@@ -1,129 +0,0 @@
/*
Package cron implements a cron spec parser and job runner.
Usage
Callers may register Funcs to be invoked on a given schedule. Cron will run
them in their own goroutines.
c := cron.New()
c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") })
c.AddFunc("@hourly", func() { fmt.Println("Every hour") })
c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") })
c.Start()
..
// Funcs are invoked in their own goroutine, asynchronously.
...
// Funcs may also be added to a running Cron
c.AddFunc("@daily", func() { fmt.Println("Every day") })
..
// Inspect the cron job entries' next and previous run times.
inspect(c.Entries())
..
c.Stop() // Stop the scheduler (does not stop any jobs already running).
CRON Expression Format
A cron expression represents a set of times, using 6 space-separated fields.
Field name | Mandatory? | Allowed values | Allowed special characters
---------- | ---------- | -------------- | --------------------------
Seconds | Yes | 0-59 | * / , -
Minutes | Yes | 0-59 | * / , -
Hours | Yes | 0-23 | * / , -
Day of month | Yes | 1-31 | * / , - ?
Month | Yes | 1-12 or JAN-DEC | * / , -
Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun",
and "sun" are equally accepted.
Special Characters
Asterisk ( * )
The asterisk indicates that the cron expression will match for all values of the
field; e.g., using an asterisk in the 5th field (month) would indicate every
month.
Slash ( / )
Slashes are used to describe increments of ranges. For example 3-59/15 in the
1st field (minutes) would indicate the 3rd minute of the hour and every 15
minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
that is, an increment over the largest possible range of the field. The form
"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
increment until the end of that specific range. It does not wrap around.
Comma ( , )
Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
Hyphen ( - )
Hyphens are used to define ranges. For example, 9-17 would indicate every
hour between 9am and 5pm inclusive.
Question mark ( ? )
Question mark may be used instead of '*' for leaving either day-of-month or
day-of-week blank.
Predefined schedules
You may use one of several pre-defined schedules in place of a cron expression.
Entry | Description | Equivalent To
----- | ----------- | -------------
@yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 *
@monthly | Run once a month, midnight, first of month | 0 0 0 1 * *
@weekly | Run once a week, midnight between Sat/Sun | 0 0 0 * * 0
@daily (or @midnight) | Run once a day, midnight | 0 0 0 * * *
@hourly | Run once an hour, beginning of hour | 0 0 * * * *
Intervals
You may also schedule a job to execute at fixed intervals, starting at the time it's added
or cron is run. This is supported by formatting the cron spec like this:
@every <duration>
where "duration" is a string accepted by time.ParseDuration
(http://golang.org/pkg/time/#ParseDuration).
For example, "@every 1h30m10s" would indicate a schedule that activates after
1 hour, 30 minutes, 10 seconds, and then every interval after that.
Note: The interval does not take the job runtime into account. For example,
if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
it will have only 2 minutes of idle time between each run.
Time zones
All interpretation and scheduling is done in the machine's local time zone (as
provided by the Go time package (http://www.golang.org/pkg/time).
Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
not be run!
Thread safety
Since the Cron service runs concurrently with the calling code, some amount of
care must be taken to ensure proper synchronization.
All cron methods are designed to be correctly synchronized as long as the caller
ensures that invocations have a clear happens-before ordering between them.
Implementation
Cron entries are stored in an array, sorted by their next activation time. Cron
sleeps until the next job is due to be run.
Upon waking:
- it runs each entry that is active on that second
- it calculates the next run times for the jobs that were run
- it re-sorts the array of entries by next activation time.
- it goes to sleep until the soonest job.
*/
package cron

View File

@@ -1,380 +0,0 @@
package cron
import (
"fmt"
"math"
"strconv"
"strings"
"time"
)
// Configuration options for creating a parser. Most options specify which
// fields should be included, while others enable features. If a field is not
// included the parser will assume a default value. These options do not change
// the order fields are parse in.
type ParseOption int
const (
Second ParseOption = 1 << iota // Seconds field, default 0
Minute // Minutes field, default 0
Hour // Hours field, default 0
Dom // Day of month field, default *
Month // Month field, default *
Dow // Day of week field, default *
DowOptional // Optional day of week field, default *
Descriptor // Allow descriptors such as @monthly, @weekly, etc.
)
var places = []ParseOption{
Second,
Minute,
Hour,
Dom,
Month,
Dow,
}
var defaults = []string{
"0",
"0",
"0",
"*",
"*",
"*",
}
// A custom Parser that can be configured.
type Parser struct {
options ParseOption
optionals int
}
// Creates a custom Parser with custom options.
//
// // Standard parser without descriptors
// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
// sched, err := specParser.Parse("0 0 15 */3 *")
//
// // Same as above, just excludes time fields
// subsParser := NewParser(Dom | Month | Dow)
// sched, err := specParser.Parse("15 */3 *")
//
// // Same as above, just makes Dow optional
// subsParser := NewParser(Dom | Month | DowOptional)
// sched, err := specParser.Parse("15 */3")
//
func NewParser(options ParseOption) Parser {
optionals := 0
if options&DowOptional > 0 {
options |= Dow
optionals++
}
return Parser{options, optionals}
}
// Parse returns a new crontab schedule representing the given spec.
// It returns a descriptive error if the spec is not valid.
// It accepts crontab specs and features configured by NewParser.
func (p Parser) Parse(spec string) (Schedule, error) {
if len(spec) == 0 {
return nil, fmt.Errorf("Empty spec string")
}
if spec[0] == '@' && p.options&Descriptor > 0 {
return parseDescriptor(spec)
}
// Figure out how many fields we need
max := 0
for _, place := range places {
if p.options&place > 0 {
max++
}
}
min := max - p.optionals
// Split fields on whitespace
fields := strings.Fields(spec)
// Validate number of fields
if count := len(fields); count < min || count > max {
if min == max {
return nil, fmt.Errorf("Expected exactly %d fields, found %d: %s", min, count, spec)
}
return nil, fmt.Errorf("Expected %d to %d fields, found %d: %s", min, max, count, spec)
}
// Fill in missing fields
fields = expandFields(fields, p.options)
var err error
field := func(field string, r bounds) uint64 {
if err != nil {
return 0
}
var bits uint64
bits, err = getField(field, r)
return bits
}
var (
second = field(fields[0], seconds)
minute = field(fields[1], minutes)
hour = field(fields[2], hours)
dayofmonth = field(fields[3], dom)
month = field(fields[4], months)
dayofweek = field(fields[5], dow)
)
if err != nil {
return nil, err
}
return &SpecSchedule{
Second: second,
Minute: minute,
Hour: hour,
Dom: dayofmonth,
Month: month,
Dow: dayofweek,
}, nil
}
func expandFields(fields []string, options ParseOption) []string {
n := 0
count := len(fields)
expFields := make([]string, len(places))
copy(expFields, defaults)
for i, place := range places {
if options&place > 0 {
expFields[i] = fields[n]
n++
}
if n == count {
break
}
}
return expFields
}
var standardParser = NewParser(
Minute | Hour | Dom | Month | Dow | Descriptor,
)
// ParseStandard returns a new crontab schedule representing the given standardSpec
// (https://en.wikipedia.org/wiki/Cron). It differs from Parse requiring to always
// pass 5 entries representing: minute, hour, day of month, month and day of week,
// in that order. It returns a descriptive error if the spec is not valid.
//
// It accepts
// - Standard crontab specs, e.g. "* * * * ?"
// - Descriptors, e.g. "@midnight", "@every 1h30m"
func ParseStandard(standardSpec string) (Schedule, error) {
return standardParser.Parse(standardSpec)
}
var defaultParser = NewParser(
Second | Minute | Hour | Dom | Month | DowOptional | Descriptor,
)
// Parse returns a new crontab schedule representing the given spec.
// It returns a descriptive error if the spec is not valid.
//
// It accepts
// - Full crontab specs, e.g. "* * * * * ?"
// - Descriptors, e.g. "@midnight", "@every 1h30m"
func Parse(spec string) (Schedule, error) {
return defaultParser.Parse(spec)
}
// getField returns an Int with the bits set representing all of the times that
// the field represents or error parsing field value. A "field" is a comma-separated
// list of "ranges".
func getField(field string, r bounds) (uint64, error) {
var bits uint64
ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
for _, expr := range ranges {
bit, err := getRange(expr, r)
if err != nil {
return bits, err
}
bits |= bit
}
return bits, nil
}
// getRange returns the bits indicated by the given expression:
// number | number "-" number [ "/" number ]
// or error parsing range.
func getRange(expr string, r bounds) (uint64, error) {
var (
start, end, step uint
rangeAndStep = strings.Split(expr, "/")
lowAndHigh = strings.Split(rangeAndStep[0], "-")
singleDigit = len(lowAndHigh) == 1
err error
)
var extra uint64
if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
start = r.min
end = r.max
extra = starBit
} else {
start, err = parseIntOrName(lowAndHigh[0], r.names)
if err != nil {
return 0, err
}
switch len(lowAndHigh) {
case 1:
end = start
case 2:
end, err = parseIntOrName(lowAndHigh[1], r.names)
if err != nil {
return 0, err
}
default:
return 0, fmt.Errorf("Too many hyphens: %s", expr)
}
}
switch len(rangeAndStep) {
case 1:
step = 1
case 2:
step, err = mustParseInt(rangeAndStep[1])
if err != nil {
return 0, err
}
// Special handling: "N/step" means "N-max/step".
if singleDigit {
end = r.max
}
default:
return 0, fmt.Errorf("Too many slashes: %s", expr)
}
if start < r.min {
return 0, fmt.Errorf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
}
if end > r.max {
return 0, fmt.Errorf("End of range (%d) above maximum (%d): %s", end, r.max, expr)
}
if start > end {
return 0, fmt.Errorf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
}
if step == 0 {
return 0, fmt.Errorf("Step of range should be a positive number: %s", expr)
}
return getBits(start, end, step) | extra, nil
}
// parseIntOrName returns the (possibly-named) integer contained in expr.
func parseIntOrName(expr string, names map[string]uint) (uint, error) {
if names != nil {
if namedInt, ok := names[strings.ToLower(expr)]; ok {
return namedInt, nil
}
}
return mustParseInt(expr)
}
// mustParseInt parses the given expression as an int or returns an error.
func mustParseInt(expr string) (uint, error) {
num, err := strconv.Atoi(expr)
if err != nil {
return 0, fmt.Errorf("Failed to parse int from %s: %s", expr, err)
}
if num < 0 {
return 0, fmt.Errorf("Negative number (%d) not allowed: %s", num, expr)
}
return uint(num), nil
}
// getBits sets all bits in the range [min, max], modulo the given step size.
func getBits(min, max, step uint) uint64 {
var bits uint64
// If step is 1, use shifts.
if step == 1 {
return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
}
// Else, use a simple loop.
for i := min; i <= max; i += step {
bits |= 1 << i
}
return bits
}
// all returns all bits within the given bounds. (plus the star bit)
func all(r bounds) uint64 {
return getBits(r.min, r.max, 1) | starBit
}
// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
func parseDescriptor(descriptor string) (Schedule, error) {
switch descriptor {
case "@yearly", "@annually":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: 1 << dom.min,
Month: 1 << months.min,
Dow: all(dow),
}, nil
case "@monthly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: 1 << dom.min,
Month: all(months),
Dow: all(dow),
}, nil
case "@weekly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: all(dom),
Month: all(months),
Dow: 1 << dow.min,
}, nil
case "@daily", "@midnight":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: all(dom),
Month: all(months),
Dow: all(dow),
}, nil
case "@hourly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: all(hours),
Dom: all(dom),
Month: all(months),
Dow: all(dow),
}, nil
}
const every = "@every "
if strings.HasPrefix(descriptor, every) {
duration, err := time.ParseDuration(descriptor[len(every):])
if err != nil {
return nil, fmt.Errorf("Failed to parse duration %s: %s", descriptor, err)
}
return Every(duration), nil
}
return nil, fmt.Errorf("Unrecognized descriptor: %s", descriptor)
}

158
vendor/github.com/robfig/cron/spec.go generated vendored
View File

@@ -1,158 +0,0 @@
package cron
import "time"
// SpecSchedule specifies a duty cycle (to the second granularity), based on a
// traditional crontab specification. It is computed initially and stored as bit sets.
type SpecSchedule struct {
Second, Minute, Hour, Dom, Month, Dow uint64
}
// bounds provides a range of acceptable values (plus a map of name to value).
type bounds struct {
min, max uint
names map[string]uint
}
// The bounds for each field.
var (
seconds = bounds{0, 59, nil}
minutes = bounds{0, 59, nil}
hours = bounds{0, 23, nil}
dom = bounds{1, 31, nil}
months = bounds{1, 12, map[string]uint{
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"may": 5,
"jun": 6,
"jul": 7,
"aug": 8,
"sep": 9,
"oct": 10,
"nov": 11,
"dec": 12,
}}
dow = bounds{0, 6, map[string]uint{
"sun": 0,
"mon": 1,
"tue": 2,
"wed": 3,
"thu": 4,
"fri": 5,
"sat": 6,
}}
)
const (
// Set the top bit if a star was included in the expression.
starBit = 1 << 63
)
// Next returns the next time this schedule is activated, greater than the given
// time. If no time can be found to satisfy the schedule, return the zero time.
func (s *SpecSchedule) Next(t time.Time) time.Time {
// General approach:
// For Month, Day, Hour, Minute, Second:
// Check if the time value matches. If yes, continue to the next field.
// If the field doesn't match the schedule, then increment the field until it matches.
// While incrementing the field, a wrap-around brings it back to the beginning
// of the field list (since it is necessary to re-verify previous field
// values)
// Start at the earliest possible time (the upcoming second).
t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
// This flag indicates whether a field has been incremented.
added := false
// If no time is found within five years, return zero.
yearLimit := t.Year() + 5
WRAP:
if t.Year() > yearLimit {
return time.Time{}
}
// Find the first applicable month.
// If it's this month, then do nothing.
for 1<<uint(t.Month())&s.Month == 0 {
// If we have to add a month, reset the other parts to 0.
if !added {
added = true
// Otherwise, set the date at the beginning (since the current time is irrelevant).
t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
}
t = t.AddDate(0, 1, 0)
// Wrapped around.
if t.Month() == time.January {
goto WRAP
}
}
// Now get a day in that month.
for !dayMatches(s, t) {
if !added {
added = true
t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
}
t = t.AddDate(0, 0, 1)
if t.Day() == 1 {
goto WRAP
}
}
for 1<<uint(t.Hour())&s.Hour == 0 {
if !added {
added = true
t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location())
}
t = t.Add(1 * time.Hour)
if t.Hour() == 0 {
goto WRAP
}
}
for 1<<uint(t.Minute())&s.Minute == 0 {
if !added {
added = true
t = t.Truncate(time.Minute)
}
t = t.Add(1 * time.Minute)
if t.Minute() == 0 {
goto WRAP
}
}
for 1<<uint(t.Second())&s.Second == 0 {
if !added {
added = true
t = t.Truncate(time.Second)
}
t = t.Add(1 * time.Second)
if t.Second() == 0 {
goto WRAP
}
}
return t
}
// dayMatches returns true if the schedule's day-of-week and day-of-month
// restrictions are satisfied by the given time.
func dayMatches(s *SpecSchedule, t time.Time) bool {
var (
domMatch bool = 1<<uint(t.Day())&s.Dom > 0
dowMatch bool = 1<<uint(t.Weekday())&s.Dow > 0
)
if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
return domMatch && dowMatch
}
return domMatch || dowMatch
}