fix lint of files

Signed-off-by: 7h3-3mp7y-m4n <emailtorash@gmail.com>
This commit is contained in:
7h3-3mp7y-m4n
2025-06-23 18:04:25 +05:30
committed by Jean-Philippe Evrard
parent 8093053275
commit d677b436a0
11 changed files with 102 additions and 73 deletions

View File

@@ -26,6 +26,9 @@ linters-settings:
- name: indent-error-flow
- name: var-naming
- name: import-shadowing
# https://github.com/mgechev/revive/blob/HEAD/RULES_DESCRIPTIONS.md#package-comments
- name: package-comments # This is not working!
disabled: true
output:
format: colored-line-number
print-issued-lines: true

View File

@@ -19,7 +19,7 @@ bootstrap-tools: $(HACKDIR)
command -v $(HACKDIR)/cosign || curl -sSfL https://github.com/sigstore/cosign/releases/download/v2.2.3/cosign-linux-amd64 -o $(HACKDIR)/cosign
command -v $(HACKDIR)/shellcheck || (curl -sSfL https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz | tar -J -v -x shellcheck-stable/shellcheck && mv shellcheck-stable/shellcheck $(HACKDIR)/shellcheck && rmdir shellcheck-stable)
chmod +x $(HACKDIR)/goreleaser $(HACKDIR)/cosign $(HACKDIR)/syft $(HACKDIR)/shellcheck
command -v staticcheck || go install honnef.co/go/tools/cmd/staticcheck@latest
command -v $(HACKDIR)/golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(HACKDIR) v2.1.6
clean:
rm -rf ./dist
@@ -66,16 +66,12 @@ manifest:
sed -i "s#image: ghcr.io/.*kured.*#image: ghcr.io/$(DH_ORG)/kured:$(VERSION)#g" kured-ds-signal.yaml
echo "Please generate combined manifest if necessary"
test: bootstrap-tools
test: lint
echo "Running short go tests"
go test -test.short -json ./... > test.json
lint: bootstrap-tools
echo "Running shellcheck"
find . -name '*.sh' | xargs -n1 $(HACKDIR)/shellcheck
staticcheck ./...
lint:
@echo "Ensuring golangci-lint is installed..."
@if ! command -v $(HACKDIR)/golangci-lint > /dev/null; then \
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(HACKDIR) v2.1.6; \
fi
@echo "Running golangci-lint..."
$(HACKDIR)/golangci-lint run ./...

View File

@@ -286,7 +286,7 @@ func main() {
go maintainRebootRequiredMetric(nodeID, rebootChecker)
http.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(fmt.Sprintf("%s:%d", metricsHost, metricsPort), nil))
log.Fatal(http.ListenAndServe(fmt.Sprintf("%s:%d", metricsHost, metricsPort), nil)) // #nosec G114
}
func validateNodeLabels(preRebootNodeLabels []string, postRebootNodeLabels []string) error {
@@ -320,11 +320,11 @@ func validateNotificationURL(notifyURL string, slackHookURL string) string {
log.Errorf("slack-hook-url is not properly formatted... no notification will be sent: %v\n", err)
return ""
}
if len(strings.Split(strings.Replace(parsedURL.Path, "/services/", "", -1), "/")) != 3 {
if len(strings.Split(strings.ReplaceAll(parsedURL.Path, "/services/", ""), "/")) != 3 {
log.Errorf("slack-hook-url is not properly formatted... no notification will be sent: unexpected number of / in URL\n")
return ""
}
return fmt.Sprintf("slack://%s", strings.Replace(parsedURL.Path, "/services/", "", -1))
return fmt.Sprintf("slack://%s", strings.ReplaceAll(parsedURL.Path, "/services/", ""))
}
return ""
}
@@ -368,14 +368,22 @@ func LoadFromEnv() {
case "duration":
// Set duration from the environment variable (e.g., "1h30m")
if _, err := time.ParseDuration(envValue); err == nil {
flag.Set(f.Name, envValue)
err = flag.Set(f.Name, envValue)
if err != nil {
fmt.Printf("cannot set flag %s from env{%s}: %s\n", f.Name, envVarName, envValue)
os.Exit(1)
}
} else {
fmt.Printf("Invalid duration for %s: %s\n", envVarName, envValue)
os.Exit(1)
}
case "regexp":
// For regexp, set it from the environment variable
flag.Set(f.Name, envValue)
err := flag.Set(f.Name, envValue)
if err != nil {
fmt.Printf("cannot set flag %s from env{%s}: %s\n", f.Name, envVarName, envValue)
os.Exit(1)
}
case "stringSlice":
// For stringSlice, split the environment variable by commas and set it
err := flag.Set(f.Name, envValue)
@@ -574,11 +582,11 @@ func rebootAsRequired(nodeID string, rebooter reboot.Rebooter, checker checkers.
if err != nil {
log.Errorf("Unable to uncordon %s: %v, will continue to hold lock and retry uncordon", node.GetName(), err)
continue
} else {
if notifyURL != "" {
if err := shoutrrr.Send(notifyURL, fmt.Sprintf(messageTemplateUncordon, nodeID)); err != nil {
log.Warnf("Error notifying: %v", err)
}
}
if notifyURL != "" {
if err := shoutrrr.Send(notifyURL, fmt.Sprintf(messageTemplateUncordon, nodeID)); err != nil {
log.Warnf("Error notifying: %v", err)
}
}
}
@@ -602,8 +610,6 @@ func rebootAsRequired(nodeID string, rebooter reboot.Rebooter, checker checkers.
continue
}
break
} else {
break
}
}
@@ -686,7 +692,10 @@ func rebootAsRequired(nodeID string, rebooter reboot.Rebooter, checker checkers.
log.Errorf("Error releasing lock: %v", err)
}
log.Infof("Performing a best-effort uncordon after failed cordon and drain")
uncordon(client, node)
err := uncordon(client, node)
if err != nil {
log.Errorf("Failed to uncordon %s: %v", node.GetName(), err)
}
continue
}
}

View File

@@ -2,6 +2,7 @@ package internal
import (
"fmt"
"github.com/kubereboot/kured/pkg/checkers"
"github.com/kubereboot/kured/pkg/reboot"
log "github.com/sirupsen/logrus"
@@ -10,11 +11,11 @@ import (
// NewRebooter validates the rebootMethod, rebootCommand, and rebootSignal input,
// then chains to the right constructor.
func NewRebooter(rebootMethod string, rebootCommand string, rebootSignal int) (reboot.Rebooter, error) {
switch {
case rebootMethod == "command":
switch rebootMethod {
case "command":
log.Infof("Reboot command: %s", rebootCommand)
return reboot.NewCommandRebooter(rebootCommand)
case rebootMethod == "signal":
case "signal":
log.Infof("Reboot signal: %d", rebootSignal)
return reboot.NewSignalRebooter(rebootSignal)
default:

View File

@@ -3,6 +3,7 @@ package blockers
import (
"context"
"fmt"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@@ -23,6 +24,8 @@ type KubernetesBlockingChecker struct {
filter []string
}
// NewKubernetesBlockingChecker creates a new KubernetesBlockingChecker using the provided Kubernetes client,
// node name, and pod selectors.
func NewKubernetesBlockingChecker(client *kubernetes.Clientset, nodename string, podSelectors []string) *KubernetesBlockingChecker {
return &KubernetesBlockingChecker{
client: client,

View File

@@ -3,13 +3,14 @@ package blockers
import (
"context"
"fmt"
"regexp"
"sort"
"time"
papi "github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
log "github.com/sirupsen/logrus"
"regexp"
"sort"
"time"
)
// Compile-time checks to ensure the type implements the interface
@@ -31,6 +32,8 @@ type PrometheusBlockingChecker struct {
promClient papi.Client
}
// NewPrometheusBlockingChecker creates a new PrometheusBlockingChecker using the given
// Prometheus API config, alert filter, and filtering options.
func NewPrometheusBlockingChecker(config papi.Config, alertFilter *regexp.Regexp, firingOnly bool, filterMatchOnly bool) PrometheusBlockingChecker {
promClient, _ := papi.NewClient(config)

View File

@@ -3,11 +3,12 @@ package checkers
import (
"bytes"
"fmt"
"github.com/google/shlex"
log "github.com/sirupsen/logrus"
"os"
"os/exec"
"strings"
"github.com/google/shlex"
log "github.com/sirupsen/logrus"
)
// Checker is the standard interface to use to check
@@ -59,6 +60,7 @@ type CommandChecker struct {
func (rc CommandChecker) RebootRequired() bool {
bufStdout := new(bytes.Buffer)
bufStderr := new(bytes.Buffer)
// #nosec G204 -- CheckCommand is controlled and validated internally
cmd := exec.Command(rc.CheckCommand[0], rc.CheckCommand[1:]...)
cmd.Stdout = bufStdout
cmd.Stderr = bufStderr

View File

@@ -4,10 +4,11 @@ import (
"context"
"encoding/json"
"fmt"
log "github.com/sirupsen/logrus"
"strings"
"time"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -20,17 +21,21 @@ const (
k8sAPICallRetryTimeout = 5 * time.Minute // How long to wait until we determine that the k8s API is definitively unavailable
)
// Lock defines the interface for acquiring, releasing, and checking
// the status of a reboot coordination lock.
type Lock interface {
Acquire(NodeMeta) (bool, string, error)
Release() error
Holding() (bool, LockAnnotationValue, error)
}
// GenericLock holds the configuration for lock TTL and the delay before releasing it.
type GenericLock struct {
TTL time.Duration
releaseDelay time.Duration
}
// NodeMeta contains metadata about a node relevant to scheduling decisions.
type NodeMeta struct {
Unschedulable bool `json:"unschedulable"`
}
@@ -94,20 +99,19 @@ func New(client *kubernetes.Clientset, nodeID, namespace, name, annotation strin
},
maxOwners: concurrency,
}
} else {
return &DaemonSetSingleLock{
GenericLock: GenericLock{
TTL: TTL,
releaseDelay: lockReleaseDelay,
},
DaemonSetLock: DaemonSetLock{
client: client,
nodeID: nodeID,
namespace: namespace,
name: name,
annotation: annotation,
},
}
}
return &DaemonSetSingleLock{
GenericLock: GenericLock{
TTL: TTL,
releaseDelay: lockReleaseDelay,
},
DaemonSetLock: DaemonSetLock{
client: client,
nodeID: nodeID,
namespace: namespace,
name: name,
annotation: annotation,
},
}
}
@@ -135,7 +139,7 @@ func (dsl *DaemonSetSingleLock) Acquire(nodeMetadata NodeMeta) (bool, string, er
return false, "", fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
valueString, exists := ds.Annotations[dsl.annotation]
if exists {
value := LockAnnotationValue{}
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
@@ -147,15 +151,22 @@ func (dsl *DaemonSetSingleLock) Acquire(nodeMetadata NodeMeta) (bool, string, er
}
}
if ds.ObjectMeta.Annotations == nil {
ds.ObjectMeta.Annotations = make(map[string]string)
if ds.Annotations == nil {
ds.Annotations = make(map[string]string)
}
value := LockAnnotationValue{NodeID: dsl.nodeID, Metadata: nodeMetadata, Created: time.Now().UTC(), TTL: dsl.TTL}
value := LockAnnotationValue{
NodeID: dsl.nodeID,
Metadata: nodeMetadata,
Created: time.Now().UTC(),
TTL: dsl.TTL,
}
valueBytes, err := json.Marshal(&value)
if err != nil {
return false, "", err
}
ds.ObjectMeta.Annotations[dsl.annotation] = string(valueBytes)
ds.Annotations[dsl.annotation] = string(valueBytes)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
if err != nil {
@@ -163,15 +174,15 @@ func (dsl *DaemonSetSingleLock) Acquire(nodeMetadata NodeMeta) (bool, string, er
// Something else updated the resource between us reading and writing - try again soon
time.Sleep(time.Second)
continue
} else {
return false, "", err
}
return false, "", err
}
return true, dsl.nodeID, nil
}
}
// Test attempts to check the kured daemonset lock status (existence, expiry) from instantiated DaemonSetLock using client-go
// Holding checks if the current node still holds the lock based on the DaemonSet annotation.
func (dsl *DaemonSetSingleLock) Holding() (bool, LockAnnotationValue, error) {
var lockData LockAnnotationValue
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
@@ -179,7 +190,7 @@ func (dsl *DaemonSetSingleLock) Holding() (bool, LockAnnotationValue, error) {
return false, lockData, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
valueString, exists := ds.Annotations[dsl.annotation]
if exists {
value := LockAnnotationValue{}
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
@@ -206,7 +217,7 @@ func (dsl *DaemonSetSingleLock) Release() error {
return fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
valueString, exists := ds.Annotations[dsl.annotation]
if exists {
value := LockAnnotationValue{}
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
@@ -220,7 +231,7 @@ func (dsl *DaemonSetSingleLock) Release() error {
return fmt.Errorf("lock not held")
}
delete(ds.ObjectMeta.Annotations, dsl.annotation)
delete(ds.Annotations, dsl.annotation)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
if err != nil {
@@ -228,9 +239,8 @@ func (dsl *DaemonSetSingleLock) Release() error {
// Something else updated the resource between us reading and writing - try again soon
time.Sleep(time.Second)
continue
} else {
return err
}
return err
}
return nil
}
@@ -295,7 +305,7 @@ func (dsl *DaemonSetMultiLock) Acquire(nodeMetaData NodeMeta) (bool, string, err
}
annotation := multiLockAnnotationValue{}
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
valueString, exists := ds.Annotations[dsl.annotation]
if exists {
if err := json.Unmarshal([]byte(valueString), &annotation); err != nil {
return false, "", fmt.Errorf("error getting multi lock: %w", err)
@@ -307,29 +317,29 @@ func (dsl *DaemonSetMultiLock) Acquire(nodeMetaData NodeMeta) (bool, string, err
return false, strings.Join(nodeIDsFromMultiLock(newAnnotation), ","), nil
}
if ds.ObjectMeta.Annotations == nil {
ds.ObjectMeta.Annotations = make(map[string]string)
if ds.Annotations == nil {
ds.Annotations = make(map[string]string)
}
newAnnotationBytes, err := json.Marshal(&newAnnotation)
if err != nil {
return false, "", fmt.Errorf("error marshalling new annotation lock: %w", err)
}
ds.ObjectMeta.Annotations[dsl.annotation] = string(newAnnotationBytes)
ds.Annotations[dsl.annotation] = string(newAnnotationBytes)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.Background(), ds, metav1.UpdateOptions{})
if err != nil {
if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
time.Sleep(time.Second)
continue
} else {
return false, "", fmt.Errorf("error updating daemonset with multi lock: %w", err)
}
return false, "", fmt.Errorf("error updating daemonset with multi lock: %w", err)
}
return true, strings.Join(nodeIDsFromMultiLock(newAnnotation), ","), nil
}
}
// TestMultiple attempts to check the kured daemonset lock status for multi locks
// Holding checks whether the current node is holding a valid lock for the DaemonSetMultiLock.
func (dsl *DaemonSetMultiLock) Holding() (bool, LockAnnotationValue, error) {
var lockdata LockAnnotationValue
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
@@ -337,7 +347,7 @@ func (dsl *DaemonSetMultiLock) Holding() (bool, LockAnnotationValue, error) {
return false, lockdata, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
valueString, exists := ds.Annotations[dsl.annotation]
if exists {
value := multiLockAnnotationValue{}
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
@@ -366,7 +376,7 @@ func (dsl *DaemonSetMultiLock) Release() error {
return fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
valueString, exists := ds.Annotations[dsl.annotation]
modified := false
value := multiLockAnnotationValue{}
if exists {
@@ -391,7 +401,7 @@ func (dsl *DaemonSetMultiLock) Release() error {
if err != nil {
return fmt.Errorf("error marshalling new annotation on release: %v", err)
}
ds.ObjectMeta.Annotations[dsl.annotation] = string(newAnnotationBytes)
ds.Annotations[dsl.annotation] = string(newAnnotationBytes)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
if err != nil {
@@ -399,9 +409,8 @@ func (dsl *DaemonSetMultiLock) Release() error {
// Something else updated the resource between us reading and writing - try again soon
time.Sleep(time.Second)
continue
} else {
return err
}
return err
}
return nil
}

View File

@@ -10,6 +10,7 @@ func New(s rand.Source, d time.Duration) <-chan time.Time {
c := make(chan time.Time)
go func() {
// #nosec G404 -- math/rand is used here for non-security timing jitter
random := rand.New(s)
time.Sleep(time.Duration(float64(d)/2 + float64(d)*random.Float64()))
c <- time.Now()

View File

@@ -3,10 +3,11 @@ package reboot
import (
"bytes"
"fmt"
"github.com/google/shlex"
log "github.com/sirupsen/logrus"
"os/exec"
"strings"
"github.com/google/shlex"
log "github.com/sirupsen/logrus"
)
// CommandRebooter holds context-information for a reboot with command
@@ -20,7 +21,7 @@ func (c CommandRebooter) Reboot() error {
bufStdout := new(bytes.Buffer)
bufStderr := new(bytes.Buffer)
cmd := exec.Command(c.RebootCommand[0], c.RebootCommand[1:]...)
cmd := exec.Command(c.RebootCommand[0], c.RebootCommand[1:]...) // #nosec G204
cmd.Stdout = bufStdout
cmd.Stderr = bufStderr

View File

@@ -50,7 +50,7 @@ func parseWeekdays(days []string) (weekdays, error) {
if err != nil {
return weekdays(0), err
}
// #nosec G115 -- weekday is guaranteed to be between 06 by parseWeekday()
result |= 1 << uint32(weekday)
}
@@ -59,6 +59,7 @@ func parseWeekdays(days []string) (weekdays, error) {
// Contains returns true if the specified weekday is a member of this set.
func (w weekdays) Contains(day time.Weekday) bool {
// #nosec G115 -- day is time.Weekday [0-6], shift safe within uint32
return uint32(w)&(1<<uint32(day)) != 0
}