From a868c8d1296f5ee5fc3cda1024d9298271b3fbad Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Tue, 17 Feb 2026 21:17:12 +0100 Subject: [PATCH] chore: update the code based on golangci-lint report --- pkg/descheduler/client/client.go | 8 ++++---- pkg/descheduler/descheduler.go | 2 +- pkg/descheduler/descheduler_test.go | 16 ++++++++++------ pkg/descheduler/evictions/evictions.go | 2 +- pkg/descheduler/evictions/evictions_test.go | 5 ++--- pkg/descheduler/kubeclientsandbox.go | 5 ++--- .../plugins/nodeutilization/nodeutilization.go | 3 +-- .../plugins/nodeutilization/usageclients.go | 4 ++-- .../plugins/nodeutilization/usageclients_test.go | 4 ++-- .../pod_antiaffinity_test.go | 3 +++ pkg/framework/profile/profile.go | 2 +- pkg/framework/testing/utils.go | 6 +++--- pkg/tracing/tracing.go | 11 ++++++----- pkg/utils/priority.go | 2 +- test/e2e/e2e_evictioninbackground_test.go | 5 ++--- test/e2e/e2e_lownodeutilization_test.go | 6 +++++- test/e2e/e2e_test.go | 11 +++++------ 17 files changed, 51 insertions(+), 44 deletions(-) diff --git a/pkg/descheduler/client/client.go b/pkg/descheduler/client/client.go index 226830f0d..6f1b64fb7 100644 --- a/pkg/descheduler/client/client.go +++ b/pkg/descheduler/client/client.go @@ -20,10 +20,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/http" "net/url" + "os" "time" promapi "github.com/prometheus/client_golang/api" @@ -109,7 +109,7 @@ func GetMasterFromKubeconfig(filename string) (string, error) { } func loadCAFile(filepath string) (*x509.CertPool, error) { - caCert, err := ioutil.ReadFile(filepath) + caCert, err := os.ReadFile(filepath) if err != nil { return nil, err } @@ -126,13 +126,13 @@ func CreatePrometheusClient(prometheusURL, authToken string) (promapi.Client, *h // Retrieve Pod CA cert caCertPool, err := loadCAFile(K8sPodCAFilePath) if err != nil { - return nil, nil, fmt.Errorf("Error loading CA file: %v", err) + return nil, nil, fmt.Errorf("error loading CA file: %v", err) } // Get Prometheus Host u, err := url.Parse(prometheusURL) if err != nil { - return nil, nil, fmt.Errorf("Error parsing prometheus URL: %v", err) + return nil, nil, fmt.Errorf("error parsing prometheus URL: %v", err) } t := &http.Transport{ Proxy: http.ProxyFromEnvironment, diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index c8c1c3f95..d73b50e01 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -464,7 +464,7 @@ func bootstrapDescheduler( klog.V(2).Infof("Stopped metrics collector") }() klog.V(2).Infof("Waiting for metrics collector to sync") - if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(context.Context) (done bool, err error) { return descheduler.metricsCollector.HasSynced(), nil }); err != nil { return fmt.Errorf("unable to wait for metrics collector to sync: %v", err) diff --git a/pkg/descheduler/descheduler_test.go b/pkg/descheduler/descheduler_test.go index 4240ba7fa..245f13367 100644 --- a/pkg/descheduler/descheduler_test.go +++ b/pkg/descheduler/descheduler_test.go @@ -67,7 +67,7 @@ func (m *mockPrometheusClient) Do(ctx context.Context, req *http.Request) (*http var _ promapi.Client = &mockPrometheusClient{} var ( - podEvictionError = errors.New("PodEvictionError") + errPodEviction = errors.New("PodEvictionError") tooManyRequestsError = &apierrors.StatusError{ ErrStatus: metav1.Status{ Status: metav1.StatusFailure, @@ -837,7 +837,10 @@ func TestDeschedulingLimits(t *testing.T) { var evictedPods []string client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil)) - rand.Seed(time.Now().UnixNano()) + // Create a local random source for reproducible shuffling in tests + // Use time.Now().UnixNano() for different sequences each run, or a fixed seed for deterministic tests + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + pods := []*v1.Pod{ test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground), test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground), @@ -847,7 +850,7 @@ func TestDeschedulingLimits(t *testing.T) { } for i := 0; i < 10; i++ { - rand.Shuffle(len(pods), func(i, j int) { pods[i], pods[j] = pods[j], pods[i] }) + rng.Shuffle(len(pods), func(i, j int) { pods[i], pods[j] = pods[j], pods[i] }) func() { for j := 0; j < 5; j++ { idx := j @@ -1346,7 +1349,8 @@ func TestEvictedPodRestorationInDryRun(t *testing.T) { defer eventBroadcaster.Shutdown() // Always create descheduler with real client/factory first to register all informers - descheduler, err := newDescheduler(ctxCancel, rs, internalDeschedulerPolicy, "v1", eventRecorder, rs.Client, sharedInformerFactory, nil, nil) + // This initial instance is not used but required to register all informers + _, err = newDescheduler(ctxCancel, rs, internalDeschedulerPolicy, "v1", eventRecorder, rs.Client, sharedInformerFactory, nil, nil) if err != nil { t.Fatalf("Unable to create descheduler instance: %v", err) } @@ -1360,8 +1364,8 @@ func TestEvictedPodRestorationInDryRun(t *testing.T) { t.Fatalf("Failed to create kube client sandbox: %v", err) } - // Replace descheduler with one using fake client/factory - descheduler, err = newDescheduler(ctxCancel, rs, internalDeschedulerPolicy, "v1", eventRecorder, kubeClientSandbox.fakeClient(), kubeClientSandbox.fakeSharedInformerFactory(), nil, kubeClientSandbox) + // Create descheduler with fake client/factory for testing + descheduler, err := newDescheduler(ctxCancel, rs, internalDeschedulerPolicy, "v1", eventRecorder, kubeClientSandbox.fakeClient(), kubeClientSandbox.fakeSharedInformerFactory(), nil, kubeClientSandbox) if err != nil { t.Fatalf("Unable to create dry run descheduler instance: %v", err) } diff --git a/pkg/descheduler/evictions/evictions.go b/pkg/descheduler/evictions/evictions.go index 8b478243c..d0c2c36bb 100644 --- a/pkg/descheduler/evictions/evictions.go +++ b/pkg/descheduler/evictions/evictions.go @@ -458,7 +458,7 @@ type EvictOptions struct { func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) error { if len(pod.UID) == 0 { klog.InfoS("Ignoring pod eviction due to missing UID", "pod", pod) - return fmt.Errorf("Pod %v is missing UID", klog.KObj(pod)) + return fmt.Errorf("pod %v is missing UID", klog.KObj(pod)) } if pe.featureGates.Enabled(features.EvictionsInBackground) { diff --git a/pkg/descheduler/evictions/evictions_test.go b/pkg/descheduler/evictions/evictions_test.go index 4feb71f5c..2e133e388 100644 --- a/pkg/descheduler/evictions/evictions_test.go +++ b/pkg/descheduler/evictions/evictions_test.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" fakeclientset "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" "k8s.io/client-go/tools/events" @@ -94,7 +93,7 @@ func TestEvictPod(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { ctx := context.Background() - fakeClient := fake.NewClientset(test.pods...) + fakeClient := fakeclientset.NewClientset(test.pods...) fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) { return true, nil, test.wantErr }) @@ -326,7 +325,7 @@ func TestNewPodEvictor(t *testing.T) { } for _, test := range tests { t.Run(test.description, func(t *testing.T) { - fakeClient := fake.NewSimpleClientset(pod1) + fakeClient := fakeclientset.NewSimpleClientset(pod1) fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) { return true, nil, test.expectedError }) diff --git a/pkg/descheduler/kubeclientsandbox.go b/pkg/descheduler/kubeclientsandbox.go index 1eacd415a..5e6420b1b 100644 --- a/pkg/descheduler/kubeclientsandbox.go +++ b/pkg/descheduler/kubeclientsandbox.go @@ -23,7 +23,6 @@ import ( "time" v1 "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1" policyv1 "k8s.io/api/policy/v1" schedulingv1 "k8s.io/api/scheduling/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -421,9 +420,9 @@ func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset, evictedCache *e if !matched { return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl") } - eviction, matched := createAct.Object.(*policy.Eviction) + eviction, matched := createAct.Object.(*policyv1.Eviction) if !matched { - return false, nil, fmt.Errorf("unable to convert action object into *policy.Eviction") + return false, nil, fmt.Errorf("unable to convert action object into *policyv1.Eviction") } podObj, err := fakeClient.Tracker().Get(action.GetResource(), eviction.GetNamespace(), eviction.GetName()) if err == nil { diff --git a/pkg/framework/plugins/nodeutilization/nodeutilization.go b/pkg/framework/plugins/nodeutilization/nodeutilization.go index 2aa45e2f3..a1454babf 100644 --- a/pkg/framework/plugins/nodeutilization/nodeutilization.go +++ b/pkg/framework/plugins/nodeutilization/nodeutilization.go @@ -32,7 +32,6 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" - "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer" frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types" @@ -757,7 +756,7 @@ func assessAvailableResourceInNodes( // withResourceRequestForAny returns a filter function that checks if a pod // has a resource request specified for any of the given resources names. -func withResourceRequestForAny(names ...v1.ResourceName) pod.FilterFunc { +func withResourceRequestForAny(names ...v1.ResourceName) podutil.FilterFunc { return func(pod *v1.Pod) bool { all := append(pod.Spec.Containers, pod.Spec.InitContainers...) for _, name := range names { diff --git a/pkg/framework/plugins/nodeutilization/usageclients.go b/pkg/framework/plugins/nodeutilization/usageclients.go index fffa7c55a..90950d0b8 100644 --- a/pkg/framework/plugins/nodeutilization/usageclients.go +++ b/pkg/framework/plugins/nodeutilization/usageclients.go @@ -281,10 +281,10 @@ func NodeUsageFromPrometheusMetrics(ctx context.Context, promClient promapi.Clie for _, sample := range results.(model.Vector) { nodeName, exists := sample.Metric["instance"] if !exists { - return nil, fmt.Errorf("The collected metrics sample is missing 'instance' key") + return nil, fmt.Errorf("the collected metrics sample is missing 'instance' key") } if sample.Value < 0 || sample.Value > 1 { - return nil, fmt.Errorf("The collected metrics sample for %q has value %v outside of <0; 1> interval", string(nodeName), sample.Value) + return nil, fmt.Errorf("the collected metrics sample for %q has value %v outside of <0; 1> interval", string(nodeName), sample.Value) } nodeUsages[string(nodeName)] = map[v1.ResourceName]*resource.Quantity{ MetricResource: resource.NewQuantity(int64(sample.Value*100), resource.DecimalSI), diff --git a/pkg/framework/plugins/nodeutilization/usageclients_test.go b/pkg/framework/plugins/nodeutilization/usageclients_test.go index 8f47e0b0c..745b3a9bb 100644 --- a/pkg/framework/plugins/nodeutilization/usageclients_test.go +++ b/pkg/framework/plugins/nodeutilization/usageclients_test.go @@ -230,7 +230,7 @@ func TestPrometheusUsageClient(t *testing.T) { Timestamp: 1728991761711, }, }, - err: fmt.Errorf("The collected metrics sample is missing 'instance' key"), + err: fmt.Errorf("the collected metrics sample is missing 'instance' key"), }, { name: "invalid data value out of range", @@ -238,7 +238,7 @@ func TestPrometheusUsageClient(t *testing.T) { result: model.Vector{ sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 1.20381818181818104), }, - err: fmt.Errorf("The collected metrics sample for \"ip-10-0-51-101.ec2.internal\" has value 1.203818181818181 outside of <0; 1> interval"), + err: fmt.Errorf("the collected metrics sample for \"ip-10-0-51-101.ec2.internal\" has value 1.203818181818181 outside of <0; 1> interval"), }, { name: "invalid data not a vector", diff --git a/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity/pod_antiaffinity_test.go b/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity/pod_antiaffinity_test.go index 66f35ab67..9bbeb6f32 100644 --- a/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity/pod_antiaffinity_test.go +++ b/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity/pod_antiaffinity_test.go @@ -352,6 +352,9 @@ func TestPodAntiAffinity(t *testing.T) { &RemovePodsViolatingInterPodAntiAffinityArgs{}, handle, ) + if err != nil { + t.Fatalf("Unable to initialize a plugin: %v", err) + } plugin.(frameworktypes.DeschedulePlugin).Deschedule(ctx, test.nodes) podsEvicted := podEvictor.TotalEvicted() diff --git a/pkg/framework/profile/profile.go b/pkg/framework/profile/profile.go index 64996ae90..cc0dd1624 100644 --- a/pkg/framework/profile/profile.go +++ b/pkg/framework/profile/profile.go @@ -121,7 +121,7 @@ func (hi *handleImpl) Evictor() frameworktypes.Evictor { // PluginInstanceID returns an empty string for the base handle. // Plugins should receive a pluginHandle which has a specific instance ID. func (hi *handleImpl) PluginInstanceID() string { - panic(fmt.Errorf("Not implemented")) + panic(fmt.Errorf("not implemented")) } // PluginInstanceID returns a unique identifier for this plugin instance. diff --git a/pkg/framework/testing/utils.go b/pkg/framework/testing/utils.go index ac646c25c..679e59c8c 100644 --- a/pkg/framework/testing/utils.go +++ b/pkg/framework/testing/utils.go @@ -29,7 +29,7 @@ func InitFrameworkHandle( podInformer := sharedInformerFactory.Core().V1().Pods().Informer() podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer) if err != nil { - return nil, nil, fmt.Errorf("Build get pods assigned to node function error: %v", err) + return nil, nil, fmt.Errorf("build get pods assigned to node function error: %v", err) } var getPodsAssignedToNode func(s string, filterFunc podutil.FilterFunc) ([]*v1.Pod, error) @@ -52,7 +52,7 @@ func InitFrameworkHandle( }) podEvictor, err := evictions.NewPodEvictor(ctx, client, eventRecorder, podInformer, featureGates, evictionOptions) if err != nil { - return nil, nil, fmt.Errorf("Unable to initialize pod evictor: %v", err) + return nil, nil, fmt.Errorf("unable to initialize pod evictor: %v", err) } evictorFilter, err := defaultevictor.New( ctx, @@ -64,7 +64,7 @@ func InitFrameworkHandle( }, ) if err != nil { - return nil, nil, fmt.Errorf("Unable to initialize the plugin: %v", err) + return nil, nil, fmt.Errorf("unable to initialize the plugin: %v", err) } return &frameworkfake.HandleImpl{ ClientsetImpl: client, diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index 93341c8a1..4a9965a2c 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -29,6 +29,7 @@ import ( sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "google.golang.org/grpc/credentials" "k8s.io/klog/v2" ) @@ -56,7 +57,7 @@ var ( ) func init() { - provider = trace.NewNoopTracerProvider() + provider = noop.NewTracerProvider() tracer = provider.Tracer(TracerName) } @@ -73,7 +74,7 @@ func NewTracerProvider(ctx context.Context, endpoint, caCert, name, namespace st if err != nil && fallbackToNoOpTracer { klog.ErrorS(err, "ran into an error trying to setup a trace provider. Falling back to NoOp provider") err = nil - provider = trace.NewNoopTracerProvider() + provider = noop.NewTracerProvider() } otel.SetTextMapPropagator(propagation.TraceContext{}) otel.SetTracerProvider(provider) @@ -85,8 +86,8 @@ func NewTracerProvider(ctx context.Context, endpoint, caCert, name, namespace st if endpoint == "" { klog.V(2).Info("Did not find a trace collector endpoint defined. Switching to NoopTraceProvider") - provider = trace.NewNoopTracerProvider() - return + provider = noop.NewTracerProvider() + return nil } var opts []otlptracegrpc.Option @@ -144,7 +145,7 @@ func NewTracerProvider(ctx context.Context, endpoint, caCert, name, namespace st sdktrace.WithResource(resource), ) klog.V(2).Info("Successfully setup trace provider") - return + return nil } func defaultResourceOpts(name string) []sdkresource.Option { diff --git a/pkg/utils/priority.go b/pkg/utils/priority.go index 9a389dbe9..1d6f19ca2 100644 --- a/pkg/utils/priority.go +++ b/pkg/utils/priority.go @@ -41,5 +41,5 @@ func GetPriorityValueFromPriorityThreshold(ctx context.Context, client clientset if priority > SystemCriticalPriority { return 0, fmt.Errorf("priority threshold can't be greater than %d", SystemCriticalPriority) } - return + return priority, nil } diff --git a/test/e2e/e2e_evictioninbackground_test.go b/test/e2e/e2e_evictioninbackground_test.go index 839d20806..f81deb5f1 100644 --- a/test/e2e/e2e_evictioninbackground_test.go +++ b/test/e2e/e2e_evictioninbackground_test.go @@ -10,7 +10,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -71,8 +70,8 @@ func virtualMachineInstance(idx int) *kvcorev1.VirtualMachineInstance { Rng: &kvcorev1.Rng{}, }, Resources: kvcorev1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceMemory: resource.MustParse("1024M"), + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1024M"), }, }, }, diff --git a/test/e2e/e2e_lownodeutilization_test.go b/test/e2e/e2e_lownodeutilization_test.go index 07f4e3fc4..47835fdb2 100644 --- a/test/e2e/e2e_lownodeutilization_test.go +++ b/test/e2e/e2e_lownodeutilization_test.go @@ -201,8 +201,12 @@ func TestLowNodeUtilizationKubernetesMetrics(t *testing.T) { waitForPodsRunning(ctx, t, clientSet, deploymentObj.Labels, tc.replicasNum, deploymentObj.Namespace) // wait until workerNodes[0].Name has the right actual cpu utilization and all the testing pods are running // and producing ~12 cores in total - wait.PollUntilWithContext(ctx, 5*time.Second, func(context.Context) (done bool, err error) { + wait.PollUntilContextCancel(ctx, 5*time.Second, true, func(context.Context) (done bool, err error) { item, err := metricsClient.MetricsV1beta1().NodeMetricses().Get(ctx, workerNodes[0].Name, metav1.GetOptions{}) + if err != nil { + t.Logf("unable to list nodemetricses: %v", err) + return false, nil + } t.Logf("Waiting for %q nodemetrics cpu utilization to get over 12, currently %v", workerNodes[0].Name, item.Usage.Cpu().Value()) if item.Usage.Cpu().Value() < 12 { return false, nil diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index fbf5fc82d..6629b4739 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -47,7 +47,6 @@ import ( "sigs.k8s.io/yaml" "sigs.k8s.io/descheduler/cmd/descheduler/app/options" - "sigs.k8s.io/descheduler/pkg/api" deschedulerapi "sigs.k8s.io/descheduler/pkg/api" deschedulerapiv1alpha2 "sigs.k8s.io/descheduler/pkg/api/v1alpha2" "sigs.k8s.io/descheduler/pkg/descheduler" @@ -459,7 +458,7 @@ func runPodLifetimePlugin( defaultevictor.DefaultEvictorArgs{ EvictSystemCriticalPods: evictCritical, EvictDaemonSetPods: evictDaemonSet, - PriorityThreshold: &api.PriorityThreshold{ + PriorityThreshold: &deschedulerapi.PriorityThreshold{ Value: &thresholdPriority, }, }, @@ -640,10 +639,10 @@ func TestLowNodeUtilization(t *testing.T) { t.Log("Running LowNodeUtilization plugin") plugin, err := nodeutilization.NewLowNodeUtilization(ctx, &nodeutilization.LowNodeUtilizationArgs{ - Thresholds: api.ResourceThresholds{ + Thresholds: deschedulerapi.ResourceThresholds{ v1.ResourceCPU: 70, }, - TargetThresholds: api.ResourceThresholds{ + TargetThresholds: deschedulerapi.ResourceThresholds{ v1.ResourceCPU: 80, }, }, handle) @@ -1340,7 +1339,7 @@ func TestPodLifeTimeOldestEvicted(t *testing.T) { } waitForRCPodsRunning(ctx, t, clientSet, rc) - podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()}) + _, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()}) if err != nil { t.Fatalf("Unable to list pods: %v", err) } @@ -1584,7 +1583,7 @@ func createBalancedPodForNodes( } // find the max, if the node has the max,use the one, if not,use the ratio parameter - var maxCPUFraction, maxMemFraction float64 = ratio, ratio + maxCPUFraction, maxMemFraction := ratio, ratio cpuFractionMap := make(map[string]float64) memFractionMap := make(map[string]float64)