Compare commits

...

3 Commits

Author SHA1 Message Date
Michał Matuszak
11c315289c feat: Make replicas optional for Konnectivity Agent mode Deployment (#1041) 2026-01-04 19:26:57 +01:00
Syed Azeez
0428024946 fix(metrics): resolve workqueue metrics initialization conflict (#1044)
Signed-off-by: Azeez Syed <syedazeez337@gmail.com>
2026-01-04 19:24:43 +01:00
Syed Azeez
f55df56eac fix(soot): add unique controller names to prevent metric conflicts (#1043)
Signed-off-by: Azeez Syed <syedazeez337@gmail.com>
2026-01-04 19:24:10 +01:00
17 changed files with 403 additions and 17 deletions

View File

@@ -289,7 +289,7 @@ var (
KonnectivityAgentModeDeployment KonnectivityAgentMode = "Deployment"
)
//+kubebuilder:validation:XValidation:rule="!(self.mode == 'DaemonSet' && has(self.replicas) && self.replicas != 0) && !(self.mode == 'Deployment' && self.replicas == 0)",message="replicas must be 0 when mode is DaemonSet, and greater than 0 when mode is Deployment"
//+kubebuilder:validation:XValidation:rule="!(self.mode == 'DaemonSet' && has(self.replicas) && self.replicas != 0) && !(self.mode == 'Deployment' && has(self.replicas) && self.replicas == 0)",message="replicas must be 0 (or unset) when mode is DaemonSet, and greater than 0 (or unset) when mode is Deployment"
type KonnectivityAgentSpec struct {
// AgentImage defines the container image for Konnectivity's agent.
@@ -318,7 +318,7 @@ type KonnectivityAgentSpec struct {
// Replicas defines the number of replicas when Mode is Deployment.
// Must be 0 if Mode is DaemonSet.
//+kubebuilder:validation:Optional
Replicas int32 `json:"replicas,omitempty"`
Replicas *int32 `json:"replicas,omitempty"`
}
// KonnectivitySpec defines the spec for Konnectivity.

View File

@@ -178,8 +178,8 @@ versions:
type: string
type: object
x-kubernetes-validations:
- message: replicas must be 0 when mode is DaemonSet, and greater than 0 when mode is Deployment
rule: '!(self.mode == ''DaemonSet'' && has(self.replicas) && self.replicas != 0) && !(self.mode == ''Deployment'' && self.replicas == 0)'
- message: replicas must be 0 (or unset) when mode is DaemonSet, and greater than 0 (or unset) when mode is Deployment
rule: '!(self.mode == ''DaemonSet'' && has(self.replicas) && self.replicas != 0) && !(self.mode == ''Deployment'' && has(self.replicas) && self.replicas == 0)'
server:
default:
image: registry.k8s.io/kas-network-proxy/proxy-server

View File

@@ -186,8 +186,8 @@ spec:
type: string
type: object
x-kubernetes-validations:
- message: replicas must be 0 when mode is DaemonSet, and greater than 0 when mode is Deployment
rule: '!(self.mode == ''DaemonSet'' && has(self.replicas) && self.replicas != 0) && !(self.mode == ''Deployment'' && self.replicas == 0)'
- message: replicas must be 0 (or unset) when mode is DaemonSet, and greater than 0 (or unset) when mode is Deployment
rule: '!(self.mode == ''DaemonSet'' && has(self.replicas) && self.replicas != 0) && !(self.mode == ''Deployment'' && has(self.replicas) && self.replicas == 0)'
server:
default:
image: registry.k8s.io/kas-network-proxy/proxy-server

View File

@@ -36,6 +36,7 @@ type CoreDNS struct {
AdminClient client.Client
GetTenantControlPlaneFunc utils.TenantControlPlaneRetrievalFn
TriggerChannel chan event.GenericEvent
ControllerName string
}
func (c *CoreDNS) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
@@ -80,6 +81,7 @@ func (c *CoreDNS) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile
func (c *CoreDNS) SetupWithManager(mgr manager.Manager) error {
return controllerruntime.NewControllerManagedBy(mgr).
Named(c.ControllerName).
WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: ptr.To(true)}).
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
return object.GetName() == kubeadm.CoreDNSClusterRoleBindingName

View File

@@ -37,6 +37,7 @@ type KonnectivityAgent struct {
AdminClient client.Client
GetTenantControlPlaneFunc utils.TenantControlPlaneRetrievalFn
TriggerChannel chan event.GenericEvent
ControllerName string
}
func (k *KonnectivityAgent) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
@@ -87,6 +88,7 @@ func (k *KonnectivityAgent) Reconcile(ctx context.Context, _ reconcile.Request)
func (k *KonnectivityAgent) SetupWithManager(mgr manager.Manager) error {
return controllerruntime.NewControllerManagedBy(mgr).
Named(k.ControllerName).
WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: ptr.To(true)}).
For(&appsv1.DaemonSet{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
return object.GetName() == konnectivity.AgentName && object.GetNamespace() == konnectivity.AgentNamespace

View File

@@ -29,6 +29,7 @@ type KubeadmPhase struct {
GetTenantControlPlaneFunc utils.TenantControlPlaneRetrievalFn
TriggerChannel chan event.GenericEvent
Phase resources.KubeadmPhaseResource
ControllerName string
logger logr.Logger
}
@@ -75,6 +76,7 @@ func (k *KubeadmPhase) SetupWithManager(mgr manager.Manager) error {
k.logger = mgr.GetLogger().WithName(k.Phase.GetName())
return controllerruntime.NewControllerManagedBy(mgr).
Named(k.ControllerName).
WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: ptr.To(true)}).
For(k.Phase.GetWatchedObject(), builder.WithPredicates(predicate.NewPredicateFuncs(k.Phase.GetPredicateFunc()))).
WatchesRawSource(source.Channel(k.TriggerChannel, &handler.EnqueueRequestForObject{})).

View File

@@ -36,6 +36,7 @@ type KubeProxy struct {
AdminClient client.Client
GetTenantControlPlaneFunc utils.TenantControlPlaneRetrievalFn
TriggerChannel chan event.GenericEvent
ControllerName string
}
func (k *KubeProxy) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
@@ -82,6 +83,7 @@ func (k *KubeProxy) Reconcile(ctx context.Context, _ reconcile.Request) (reconci
func (k *KubeProxy) SetupWithManager(mgr manager.Manager) error {
return controllerruntime.NewControllerManagedBy(mgr).
Named(k.ControllerName).
WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: ptr.To(true)}).
For(&rbacv1.ClusterRoleBinding{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
return object.GetName() == kubeadm.KubeProxyClusterRoleBindingName

View File

@@ -39,6 +39,7 @@ type Migrate struct {
WebhookServiceName string
WebhookCABundle []byte
TriggerChannel chan event.GenericEvent
ControllerName string
}
func (m *Migrate) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
@@ -189,6 +190,7 @@ func (m *Migrate) SetupWithManager(mgr manager.Manager) error {
m.TriggerChannel = make(chan event.GenericEvent)
return controllerruntime.NewControllerManagedBy(mgr).
Named(m.ControllerName).
WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: pointer.To(true)}).
For(&admissionregistrationv1.ValidatingWebhookConfiguration{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
vwc := m.object()

View File

@@ -39,6 +39,7 @@ type WritePermissions struct {
WebhookServiceName string
WebhookCABundle []byte
TriggerChannel chan event.GenericEvent
ControllerName string
}
func (r *WritePermissions) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
@@ -190,6 +191,7 @@ func (r *WritePermissions) SetupWithManager(mgr manager.Manager) error {
r.TriggerChannel = make(chan event.GenericEvent)
return controllerruntime.NewControllerManagedBy(mgr).
Named(r.ControllerName).
WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: ptr.To(true)}).
For(&admissionregistrationv1.ValidatingWebhookConfiguration{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
return object.GetName() == r.object().GetName()

View File

@@ -253,6 +253,9 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
//
// Register all the controllers of the soot here:
//
// Generate unique controller name prefix from TenantControlPlane to avoid metric conflicts
controllerNamePrefix := fmt.Sprintf("%s-%s", tcp.GetNamespace(), tcp.GetName())
writePermissions := &controllers.WritePermissions{
Logger: mgr.GetLogger().WithName("writePermissions"),
Client: mgr.GetClient(),
@@ -261,6 +264,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
WebhookServiceName: m.MigrateServiceName,
WebhookCABundle: m.MigrateCABundle,
TriggerChannel: nil,
ControllerName: fmt.Sprintf("%s-writepermissions", controllerNamePrefix),
}
if err = writePermissions.SetupWithManager(mgr); err != nil {
return reconcile.Result{}, err
@@ -273,6 +277,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
Client: mgr.GetClient(),
Logger: mgr.GetLogger().WithName("migrate"),
ControllerName: fmt.Sprintf("%s-migrate", controllerNamePrefix),
}
if err = migrate.SetupWithManager(mgr); err != nil {
return reconcile.Result{}, err
@@ -283,6 +288,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
Logger: mgr.GetLogger().WithName("konnectivity_agent"),
TriggerChannel: make(chan event.GenericEvent),
ControllerName: fmt.Sprintf("%s-konnectivity", controllerNamePrefix),
}
if err = konnectivityAgent.SetupWithManager(mgr); err != nil {
return reconcile.Result{}, err
@@ -293,6 +299,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
Logger: mgr.GetLogger().WithName("kube_proxy"),
TriggerChannel: make(chan event.GenericEvent),
ControllerName: fmt.Sprintf("%s-kubeproxy", controllerNamePrefix),
}
if err = kubeProxy.SetupWithManager(mgr); err != nil {
return reconcile.Result{}, err
@@ -303,6 +310,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
GetTenantControlPlaneFunc: m.retrieveTenantControlPlane(tcpCtx, request),
Logger: mgr.GetLogger().WithName("coredns"),
TriggerChannel: make(chan event.GenericEvent),
ControllerName: fmt.Sprintf("%s-coredns", controllerNamePrefix),
}
if err = coreDNS.SetupWithManager(mgr); err != nil {
return reconcile.Result{}, err
@@ -315,6 +323,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
Phase: resources.PhaseUploadConfigKubeadm,
},
TriggerChannel: make(chan event.GenericEvent),
ControllerName: fmt.Sprintf("%s-kubeadmconfig", controllerNamePrefix),
}
if err = uploadKubeadmConfig.SetupWithManager(mgr); err != nil {
return reconcile.Result{}, err
@@ -327,6 +336,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
Phase: resources.PhaseUploadConfigKubelet,
},
TriggerChannel: make(chan event.GenericEvent),
ControllerName: fmt.Sprintf("%s-kubeletconfig", controllerNamePrefix),
}
if err = uploadKubeletConfig.SetupWithManager(mgr); err != nil {
return reconcile.Result{}, err
@@ -339,6 +349,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
Phase: resources.PhaseBootstrapToken,
},
TriggerChannel: make(chan event.GenericEvent),
ControllerName: fmt.Sprintf("%s-bootstraptoken", controllerNamePrefix),
}
if err = bootstrapToken.SetupWithManager(mgr); err != nil {
return reconcile.Result{}, err
@@ -351,6 +362,7 @@ func (m *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res
Phase: resources.PhaseClusterAdminRBAC,
},
TriggerChannel: make(chan event.GenericEvent),
ControllerName: fmt.Sprintf("%s-kubeadmrbac", controllerNamePrefix),
}
if err = kubeadmRbac.SetupWithManager(mgr); err != nil {
return reconcile.Result{}, err

View File

@@ -18,7 +18,7 @@ var _ = Describe("Deploy a TenantControlPlane resource with DataStoreOverrides",
// Fill TenantControlPlane object
tcp := &kamajiv1alpha1.TenantControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: "tcp-clusterip",
Name: "tcp-datastore-overrides",
Namespace: "default",
},
Spec: kamajiv1alpha1.TenantControlPlaneSpec{

2
go.mod
View File

@@ -29,7 +29,6 @@ require (
gomodules.xyz/jsonpatch/v2 v2.5.0
k8s.io/api v0.35.0
k8s.io/apimachinery v0.35.0
k8s.io/apiserver v0.35.0
k8s.io/client-go v0.35.0
k8s.io/cluster-bootstrap v0.0.0
k8s.io/klog/v2 v2.130.1
@@ -184,6 +183,7 @@ require (
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.34.1 // indirect
k8s.io/apiserver v0.35.0 // indirect
k8s.io/cli-runtime v0.0.0 // indirect
k8s.io/cloud-provider v0.0.0 // indirect
k8s.io/component-base v0.35.0 // indirect

View File

@@ -281,8 +281,10 @@ func (r *Agent) mutate(ctx context.Context, tenantControlPlane *kamajiv1alpha1.T
case kamajiv1alpha1.KonnectivityAgentModeDeployment:
//nolint:forcetypeassert
r.resource.(*appsv1.Deployment).Spec.Template = *podTemplateSpec
//nolint:forcetypeassert
r.resource.(*appsv1.Deployment).Spec.Replicas = pointer.To(tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityAgentSpec.Replicas)
if tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityAgentSpec.Replicas != nil {
//nolint:forcetypeassert
r.resource.(*appsv1.Deployment).Spec.Replicas = tenantControlPlane.Spec.Addons.Konnectivity.KonnectivityAgentSpec.Replicas
}
}
return nil

View File

@@ -10,7 +10,6 @@ import (
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiserverv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
@@ -94,18 +93,18 @@ func (r *EgressSelectorConfigurationResource) mutate(_ context.Context, tenantCo
return func() error {
r.resource.SetLabels(utilities.MergeMaps(r.resource.GetLabels(), utilities.KamajiLabels(tenantControlPlane.GetName(), r.GetName())))
configuration := &apiserverv1alpha1.EgressSelectorConfiguration{
configuration := &EgressSelectorConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: egressSelectorConfigurationKind,
APIVersion: apiServerAPIVersion,
},
EgressSelections: []apiserverv1alpha1.EgressSelection{
EgressSelections: []EgressSelection{
{
Name: egressSelectorConfigurationName,
Connection: apiserverv1alpha1.Connection{
ProxyProtocol: apiserverv1alpha1.ProtocolGRPC,
Transport: &apiserverv1alpha1.Transport{
UDS: &apiserverv1alpha1.UDSTransport{
Connection: Connection{
ProxyProtocol: ProtocolGRPC,
Transport: &Transport{
UDS: &UDSTransport{
UDSName: defaultUDSName,
},
},

View File

@@ -0,0 +1,71 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package konnectivity
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Local type definitions for EgressSelectorConfiguration to avoid importing
// k8s.io/apiserver which conflicts with controller-runtime workqueue metrics.
// These types are based on k8s.io/apiserver/pkg/apis/apiserver/v1alpha1.
// ProtocolType is the type of the proxy protocol used for egress selection.
type ProtocolType string
const (
// ProtocolHTTPConnect uses HTTP CONNECT as the proxy protocol.
ProtocolHTTPConnect ProtocolType = "HTTPConnect"
// ProtocolGRPC uses GRPC as the proxy protocol.
ProtocolGRPC ProtocolType = "GRPC"
// ProtocolDirect establishes a direct connection without proxy.
ProtocolDirect ProtocolType = "Direct"
)
// +kubebuilder:object:root=true
// +kubebuilder:object:generate=true
// EgressSelectorConfiguration provides versioned configuration for egress selector clients.
type EgressSelectorConfiguration struct {
metav1.TypeMeta `json:",inline"`
EgressSelections []EgressSelection `json:"egressSelections"`
}
// +kubebuilder:object:generate=true
// EgressSelection provides the configuration for a single egress selection client.
type EgressSelection struct {
Name string `json:"name"`
Connection Connection `json:"connection"`
}
// +kubebuilder:object:generate=true
// Connection provides the configuration for a single egress selection client connection.
type Connection struct {
ProxyProtocol ProtocolType `json:"proxyProtocol,omitempty"`
Transport *Transport `json:"transport,omitempty"`
}
// +kubebuilder:object:generate=true
// Transport defines the transport configurations we support for egress selector.
type Transport struct {
TCP *TCPTransport `json:"tcp,omitempty"`
UDS *UDSTransport `json:"uds,omitempty"`
}
// +kubebuilder:object:generate=true
// TCPTransport provides the information to connect to a TCP endpoint.
type TCPTransport struct {
URL string `json:"url,omitempty"`
}
// +kubebuilder:object:generate=true
// UDSTransport provides the information to connect to a Unix Domain Socket endpoint.
type UDSTransport struct {
UDSName string `json:"udsName,omitempty"`
}

View File

@@ -0,0 +1,134 @@
//go:build !ignore_autogenerated
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
// Code generated by controller-gen. DO NOT EDIT.
package konnectivity
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Connection) DeepCopyInto(out *Connection) {
*out = *in
if in.Transport != nil {
in, out := &in.Transport, &out.Transport
*out = new(Transport)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection.
func (in *Connection) DeepCopy() *Connection {
if in == nil {
return nil
}
out := new(Connection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressSelection) DeepCopyInto(out *EgressSelection) {
*out = *in
in.Connection.DeepCopyInto(&out.Connection)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection.
func (in *EgressSelection) DeepCopy() *EgressSelection {
if in == nil {
return nil
}
out := new(EgressSelection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.EgressSelections != nil {
in, out := &in.EgressSelections, &out.EgressSelections
*out = make([]EgressSelection, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration.
func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration {
if in == nil {
return nil
}
out := new(EgressSelectorConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TCPTransport) DeepCopyInto(out *TCPTransport) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport.
func (in *TCPTransport) DeepCopy() *TCPTransport {
if in == nil {
return nil
}
out := new(TCPTransport)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Transport) DeepCopyInto(out *Transport) {
*out = *in
if in.TCP != nil {
in, out := &in.TCP, &out.TCP
*out = new(TCPTransport)
**out = **in
}
if in.UDS != nil {
in, out := &in.UDS, &out.UDS
*out = new(UDSTransport)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport.
func (in *Transport) DeepCopy() *Transport {
if in == nil {
return nil
}
out := new(Transport)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UDSTransport) DeepCopyInto(out *UDSTransport) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport.
func (in *UDSTransport) DeepCopy() *UDSTransport {
if in == nil {
return nil
}
out := new(UDSTransport)
in.DeepCopyInto(out)
return out
}

View File

@@ -0,0 +1,156 @@
// Copyright 2022 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package internal
import (
"context"
"testing"
"time"
dto "github.com/prometheus/client_model/go"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/metrics"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// dummyReconciler is a minimal reconciler for testing.
type dummyReconciler struct{}
func (r *dummyReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
// TestWorkqueueMetricsRegistration verifies that controller-runtime workqueue
// metrics are properly registered after removing the k8s.io/apiserver import.
// This test proves that issue #1026 is fixed.
func TestWorkqueueMetricsRegistration(t *testing.T) {
// Create a minimal scheme and manager
scheme := runtime.NewScheme()
// Create a manager with a fake config - this will trigger controller-runtime initialization
mgr, err := manager.New(&rest.Config{
Host: "https://localhost:6443",
}, manager.Options{
Scheme: scheme,
Metrics: metricsserver.Options{
BindAddress: "0", // Disable metrics server binding
},
})
if err != nil {
// If we can't create a manager (e.g., no cluster), skip the full test
// but still verify basic metrics registration
t.Logf("Could not create manager (expected in unit test): %v", err)
t.Log("Falling back to basic metrics check...")
checkBasicMetrics(t)
return
}
// Create a controller with the manager - this triggers workqueue creation
_, err = controller.New("test-controller", mgr, controller.Options{
Reconciler: &dummyReconciler{},
})
if err != nil {
t.Fatalf("Failed to create controller: %v", err)
}
// Start the manager in background
ctx, cancel := context.WithTimeout(t.Context(), 2*time.Second)
defer cancel()
go func() {
_ = mgr.Start(ctx)
}()
// Give it a moment to initialize
time.Sleep(100 * time.Millisecond)
// Gather all registered metrics from controller-runtime's registry
metricFamilies, err := metrics.Registry.Gather()
if err != nil {
t.Fatalf("Failed to gather metrics: %v", err)
}
// Collect all workqueue metrics
workqueueMetrics := make(map[string]*dto.MetricFamily)
for _, mf := range metricFamilies {
name := mf.GetName()
if len(name) > 10 && name[:10] == "workqueue_" {
workqueueMetrics[name] = mf
}
}
t.Logf("Total metrics registered: %d", len(metricFamilies))
t.Logf("Workqueue metrics found: %d", len(workqueueMetrics))
// Verify we have workqueue metrics
if len(workqueueMetrics) == 0 {
t.Fatal("FAILED: No workqueue metrics found! The initialization conflict is still present.")
}
// List all found workqueue metrics
t.Log("Found workqueue metrics:")
for name := range workqueueMetrics {
t.Logf(" - %s", name)
}
// Check for specific expected metrics from controller-runtime
expectedMetrics := []string{
"workqueue_depth",
"workqueue_adds_total",
"workqueue_queue_duration_seconds",
"workqueue_work_duration_seconds",
"workqueue_retries_total",
"workqueue_unfinished_work_seconds",
"workqueue_longest_running_processor_seconds",
}
missingMetrics := []string{}
for _, expected := range expectedMetrics {
if _, found := workqueueMetrics[expected]; !found {
missingMetrics = append(missingMetrics, expected)
}
}
if len(missingMetrics) > 0 {
t.Errorf("Missing expected workqueue metrics: %v", missingMetrics)
} else {
t.Log("✅ SUCCESS: All expected workqueue metrics are present!")
t.Log("The fix successfully resolved issue #1026 - workqueue metrics are now registered.")
}
}
// checkBasicMetrics is a fallback check when we can't create a full manager.
func checkBasicMetrics(t *testing.T) {
t.Helper()
// Gather metrics
metricFamilies, err := metrics.Registry.Gather()
if err != nil {
t.Fatalf("Failed to gather metrics: %v", err)
}
// Count workqueue metrics
workqueueCount := 0
for _, mf := range metricFamilies {
name := mf.GetName()
if len(name) > 10 && name[:10] == "workqueue_" {
workqueueCount++
t.Logf("Found: %s", name)
}
}
t.Logf("Total metrics: %d", len(metricFamilies))
t.Logf("Workqueue metrics: %d", workqueueCount)
if workqueueCount > 0 {
t.Log("✅ Workqueue metrics are being registered!")
} else {
t.Log(" No workqueue metrics yet (this is expected without an actual controller)")
t.Log("The fix removed the import conflict - metrics will appear when controllers run")
}
}