From d3d648283ef0a50c54d50fd35bfe784acbd1c742 Mon Sep 17 00:00:00 2001 From: Jian Zhu <36154065+zhujian7@users.noreply.github.com> Date: Tue, 13 Jun 2023 15:51:48 +0800 Subject: [PATCH] :seedling: Configure the golangci lint (#180) * :seedling: Configure the golangci lint Signed-off-by: zhujian * :seedling: Fix lint issues Signed-off-by: zhujian --------- Signed-off-by: zhujian --- .golangci.yaml | 234 ++++++++++++++++++ Makefile | 4 +- pkg/common/options/options.go | 2 +- pkg/operator/certrotation/cabundle.go | 3 +- pkg/operator/helpers/helpers.go | 13 +- pkg/operator/helpers/sa_syncer.go | 13 +- .../clustermanager_controller.go | 26 +- .../clustermanager_crd_reconcile.go | 6 +- .../clustermanager_hub_reconcile.go | 6 +- .../clustermanager_runtime_reconcile.go | 9 +- .../clustermanager_webhook_reconcile.go | 6 +- .../migration_controller.go | 10 +- .../clustermanager_status_controller.go | 18 +- .../operators/clustermanager/options.go | 3 +- pkg/operator/operators/crdmanager/manager.go | 2 +- .../addonsecretcontroller/controller.go | 11 +- .../klusterlet_cleanup_controller.go | 2 +- .../klusterlet_controller.go | 10 +- .../klusterlet_crd_reconcile.go | 6 +- .../klusterlet_managed_reconcile.go | 9 +- .../klusterlet_management_recocile.go | 6 +- .../klusterlet_runtime_reconcile.go | 20 +- .../klusterlet_status_controller.go | 3 +- .../controllers/scheduling/schedule.go | 16 +- .../scheduling/scheduling_controller.go | 2 - pkg/placement/debugger/debugger.go | 4 +- pkg/placement/helpers/testing/informer.go | 12 +- pkg/placement/plugins/addon/addon.go | 5 +- pkg/placement/plugins/balance/balance.go | 5 +- pkg/placement/plugins/resource/resource.go | 8 +- .../tainttoleration/taint_toleration.go | 8 +- .../clientcert/cert_controller.go | 13 +- .../clientcert/certficate_beta.go | 3 +- pkg/registration/clientcert/certificate.go | 3 +- pkg/registration/helpers/testing/assertion.go | 4 +- .../helpers/testing/testinghelpers.go | 6 +- .../hub/clusterrole/controller.go | 2 +- pkg/registration/hub/csr/controller.go | 12 +- .../default_managedclusterset_controller.go | 3 +- .../global_managedclusterset_controller.go | 3 +- .../spoke/managedcluster/claim_reconcile.go | 3 +- .../spoke/registration/creating_controller.go | 3 +- .../spoke/registration/registration.go | 3 +- pkg/registration/spoke/spokeagent.go | 9 +- pkg/registration/webhook/option.go | 3 +- pkg/registration/webhook/start.go | 4 +- .../webhook/v1/managedcluster_mutating.go | 3 +- .../v1beta2/managedclusterset_conversion.go | 2 +- pkg/work/helper/helpers.go | 6 +- ...tworkreplicaset_add_finalizer_reconcile.go | 3 +- .../manifestworkreplicaset_controller.go | 6 +- ...manifestworkreplicaset_deploy_reconcile.go | 3 +- ...nifestworkreplicaset_finalize_reconcile.go | 3 +- ...manifestworkreplicaset_status_reconcile.go | 3 +- pkg/work/hub/test/helper.go | 4 +- .../appliedmanifestwork_controller.go | 8 +- ...appliedmanifestwork_finalize_controller.go | 3 +- .../manifestwork_controller.go | 8 +- .../availablestatus_controller.go | 14 +- pkg/work/spoke/spokeagent.go | 8 +- pkg/work/webhook/option.go | 3 +- .../manifestworkreplicaset_validating.go | 4 +- test/e2e/common.go | 8 +- test/integration/util/assertion.go | 13 +- test/integration/util/authentication.go | 15 +- test/integration/util/recorder.go | 9 +- 66 files changed, 512 insertions(+), 170 deletions(-) create mode 100644 .golangci.yaml diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 000000000..bfe407f6e --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,234 @@ +# Copyright Contributors to the Open Cluster Management project + +run: + # default concurrency is an available CPU number + concurrency: 10 + # timeout for analysis, e.g. 30s, 5m, default is 1m + deadline: 5m + + # which dirs to skip: they won't be analyzed; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but next dirs are always skipped independently + # from this option's value: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + skip-dirs: + - genfiles$ + - vendor$ + - bindata$ + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + skip-files: + - ".*\\.pb\\.go" + - ".*\\.gen\\.go" + - ".*_test\\.go" + +linters: + # please, do not use `enable-all`: it's deprecated and will be removed soon. + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - unused + - errcheck + - goconst + - gocritic + - gofmt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - lll + - misspell + - typecheck + - unconvert + - gci + # don't enable: + # - gocyclo + # - bodyclose + # - depguard + # - dogsled + # - dupl + # - funlen + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - godox + # - maligned + # - nakedret + # - prealloc + # - scopelint + # - whitespace + +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + govet: + # report about shadowed variables + check-shadowing: false + golint: + # minimal confidence for issues, default is 0.8 + min-confidence: 0.0 + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + # goimports: + # # put imports beginning with prefix after 3rd-party packages; + # # it's a comma-separated list of prefixes + # local-prefixes: open-cluster-management.io/ocm/ + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + misspell: + # Correct spellings using locale preferences for US or UK. + # Default is to use a neutral variety of English. + # Setting locale to US will correct the British spelling of 'colour' to 'color'. + locale: US + ignore-words: + - cancelled + - cancelling + lll: + # max line length, lines longer will be reported. Default is 120. + # '\t' is counted as 1 character by default, and can be changed with the tab-width option + line-length: 160 + # tab width in spaces. Default to 1. + tab-width: 1 + unused: + # treat code as a program (not a library) and report unused exported identifiers; default is false. + # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find funcs usages. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + unparam: + # call graph construction algorithm (cha, rta). In general, use cha for libraries, + # and rta for programs with main packages. Default is cha. + algo: cha + + # Inspect exported functions, default is false. Set to true if no external program/library imports your code. + # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find external interfaces. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + gocritic: + enabled-checks: + - appendCombine + - argOrder + # - assignOp + - badCond + - boolExprSimplify + # - builtinShadow + - captLocal + - caseOrder + - codegenComment + # - commentedOutCode + - commentedOutImport + - defaultCaseOrder + - deprecatedComment + - docStub + - dupArg + - dupBranchBody + - dupCase + - dupSubExpr + - elseif + - emptyFallthrough + - equalFold + - flagDeref + - flagName + - hexLiteral + - indexAlloc + - initClause + - methodExprCall + - nilValReturn + # - octalLiteral + - offBy1 + - rangeExprCopy + - regexpMust + - sloppyLen + - stringXbytes + - switchTrue + - typeAssertChain + - typeSwitchVar + - typeUnparen + - underef + - unlambda + - unnecessaryBlock + - unslice + - valSwap + - weakCond + + # Unused + # - yodaStyleExpr + # - appendAssign + # - commentFormatting + # - emptyStringTest + # - exitAfterDefer + # - ifElseChain + # - hugeParam + # - importShadow + # - nestingReduce + # - paramTypeCombine + # - ptrToRefParam + # - rangeValCopy + # - singleCaseSwitch + # - sloppyReassign + # - unlabelStmt + # - unnamedResult + # - wrapperFunc + gci: + # Section configuration to compare against. + # Section names are case-insensitive and may contain parameters in (). + # The default order of sections is `standard > default > custom > blank > dot`, + # If `custom-order` is `true`, it follows the order of `sections` option. + # Default: ["standard", "default"] + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - prefix(open-cluster-management.io) # Custom section: groups all imports with the specified Prefix. + - prefix(open-cluster-management.io/ocm) + # - blank # Blank section: contains all blank imports. This section is not present unless explicitly enabled. + # - dot # Dot section: contains all dot imports. This section is not present unless explicitly enabled. + # Skip generated files. + # Default: true + skip-generated: true + # Enable custom order of sections. + # If `true`, make the section order the same as the order of `sections`. + # Default: false + custom-order: true +issues: + # We want to make sure we get a full report every time. Setting these + # to zero disables the limit. + max-issues-per-linter: 0 + + # List of regexps of issue texts to exclude, empty list by default. + # But independently from this option we use default exclude patterns, + # it can be disabled by `exclude-use-default: false`. To list all + # excluded by default patterns execute `golangci-lint run --help` + exclude: + - composite literal uses unkeyed fields + + exclude-rules: + # Exclude some linters from running on test files. + - path: _test\.go$|^tests/|^samples/ + linters: + - errcheck + - maligned + + # Independently from option `exclude` we use default exclude patterns, + # it can be disabled by this option. To list all + # excluded by default patterns execute `golangci-lint run --help`. + # Default value for this option is true. + exclude-use-default: true + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 diff --git a/Makefile b/Makefile index 4d3b5828a..640063b38 100644 --- a/Makefile +++ b/Makefile @@ -65,7 +65,7 @@ verify-crds: patch-crd bash -x hack/verify-crds.sh verify-gocilint: - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.45.2 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2 golangci-lint run --timeout=3m --modules-download-mode vendor ./... install-golang-gci: @@ -83,7 +83,7 @@ verify-fmt-imports: install-golang-gci echo "Diff output is empty"; \ fi -verify: verify-crds +verify: verify-crds verify-gocilint ensure-operator-sdk: ifeq "" "$(wildcard $(OPERATOR_SDK))" diff --git a/pkg/common/options/options.go b/pkg/common/options/options.go index cd8dece03..1c6d1b6eb 100644 --- a/pkg/common/options/options.go +++ b/pkg/common/options/options.go @@ -30,7 +30,7 @@ func (o *AgentOptions) AddFlags(flags *pflag.FlagSet) { flags.StringVar(&o.SpokeKubeconfigFile, "spoke-kubeconfig", o.SpokeKubeconfigFile, "Location of kubeconfig file to connect to spoke cluster. If this is not set, will use '--kubeconfig' to build client to connect to the managed cluster.") flags.StringVar(&o.SpokeClusterName, "spoke-cluster-name", o.SpokeClusterName, "Name of the spoke cluster.") - flags.MarkDeprecated("cluster-name", "use spoke-cluster-name flag") + _ = flags.MarkDeprecated("cluster-name", "use spoke-cluster-name flag") flags.StringVar(&o.SpokeClusterName, "cluster-name", o.SpokeClusterName, "Name of the spoke cluster.") flags.Float32Var(&o.QPS, "spoke-kube-api-qps", o.QPS, "QPS to use while talking with apiserver on spoke cluster.") diff --git a/pkg/operator/certrotation/cabundle.go b/pkg/operator/certrotation/cabundle.go index a110c1ba8..517d3e851 100644 --- a/pkg/operator/certrotation/cabundle.go +++ b/pkg/operator/certrotation/cabundle.go @@ -42,7 +42,8 @@ func (c CABundleRotation) EnsureConfigMapCABundle(ctx context.Context, signingCe if _, err = manageCABundleConfigMap(caBundleConfigMap, signingCertKeyPair.Config.Certs[0]); err != nil { return nil, err } - if originalCABundleConfigMap == nil || originalCABundleConfigMap.Data == nil || !equality.Semantic.DeepEqual(originalCABundleConfigMap.Data, caBundleConfigMap.Data) { + if originalCABundleConfigMap == nil || originalCABundleConfigMap.Data == nil || + !equality.Semantic.DeepEqual(originalCABundleConfigMap.Data, caBundleConfigMap.Data) { c.EventRecorder.Eventf("CABundleUpdateRequired", "%q in %q requires update", c.Name, c.Namespace) actualCABundleConfigMap, _, err := resourceapply.ApplyConfigMap(ctx, c.Client, c.EventRecorder, caBundleConfigMap) if err != nil { diff --git a/pkg/operator/helpers/helpers.go b/pkg/operator/helpers/helpers.go index 4415378f0..db2c9402c 100644 --- a/pkg/operator/helpers/helpers.go +++ b/pkg/operator/helpers/helpers.go @@ -554,7 +554,7 @@ func LoadClientConfigFromSecret(secret *corev1.Secret) (*rest.Config, error) { } if authInfo, ok := config.AuthInfos[context.AuthInfo]; ok { - // use embeded cert/key data instead of references to external cert/key files + // use embedded cert/key data instead of references to external cert/key files if certData, ok := secret.Data["tls.crt"]; ok && len(authInfo.ClientCertificateData) == 0 { authInfo.ClientCertificateData = certData authInfo.ClientCertificate = "" @@ -780,7 +780,8 @@ func AgentNamespace(klusterlet *operatorapiv1.Klusterlet) string { return KlusterletNamespace(klusterlet) } -// SyncSecret forked from https://github.com/openshift/library-go/blob/d9cdfbd844ea08465b938c46a16bed2ea23207e4/pkg/operator/resource/resourceapply/core.go#L357, +// SyncSecret forked from: +// https://github.com/openshift/library-go/blob/d9cdfbd844ea08465b938c46a16bed2ea23207e4/pkg/operator/resource/resourceapply/core.go#L357, // add an addition targetClient parameter to support sync secret to another cluster. func SyncSecret(ctx context.Context, client, targetClient coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) { @@ -864,7 +865,7 @@ func BuildFeatureCondition(invalidMsgs ...string) metav1.Condition { Type: FeatureGatesTypeValid, Status: metav1.ConditionTrue, Reason: FeatureGatesReasonAllValid, - Message: fmt.Sprintf("Feature gates are all valid"), + Message: "Feature gates are all valid", } } @@ -877,7 +878,8 @@ func BuildFeatureCondition(invalidMsgs ...string) metav1.Condition { } } -func ConvertToFeatureGateFlags(component string, features []operatorapiv1.FeatureGate, defaultFeatureGates map[featuregate.Feature]featuregate.FeatureSpec) ([]string, string) { +func ConvertToFeatureGateFlags(component string, features []operatorapiv1.FeatureGate, + defaultFeatureGates map[featuregate.Feature]featuregate.FeatureSpec) ([]string, string) { var flags, invalidFeatures []string for _, feature := range features { @@ -904,7 +906,8 @@ func ConvertToFeatureGateFlags(component string, features []operatorapiv1.Featur // FeatureGateEnabled checks if a feature is enabled or disabled in operator API, or fallback to use the // the default setting -func FeatureGateEnabled(features []operatorapiv1.FeatureGate, defaultFeatures map[featuregate.Feature]featuregate.FeatureSpec, featureName featuregate.Feature) bool { +func FeatureGateEnabled(features []operatorapiv1.FeatureGate, + defaultFeatures map[featuregate.Feature]featuregate.FeatureSpec, featureName featuregate.Feature) bool { for _, feature := range features { if feature.Feature == string(featureName) { return feature.Mode == operatorapiv1.FeatureGateModeTypeEnable diff --git a/pkg/operator/helpers/sa_syncer.go b/pkg/operator/helpers/sa_syncer.go index 35641137a..bf0b7c158 100644 --- a/pkg/operator/helpers/sa_syncer.go +++ b/pkg/operator/helpers/sa_syncer.go @@ -94,11 +94,14 @@ func SATokenCreater(ctx context.Context, saName, saNamespace string, saClient ku } } -func SyncKubeConfigSecret(ctx context.Context, secretName, secretNamespace, kubeconfigPath string, templateKubeconfig *rest.Config, secretClient coreclientv1.SecretsGetter, tokenGetter TokenGetterFunc, recorder events.Recorder) error { +func SyncKubeConfigSecret(ctx context.Context, secretName, secretNamespace, kubeconfigPath string, + templateKubeconfig *rest.Config, secretClient coreclientv1.SecretsGetter, + tokenGetter TokenGetterFunc, recorder events.Recorder) error { secret, err := secretClient.Secrets(secretNamespace).Get(ctx, secretName, metav1.GetOptions{}) switch { case errors.IsNotFound(err): - return applyKubeconfigSecret(ctx, templateKubeconfig, secretName, secretNamespace, kubeconfigPath, secretClient, tokenGetter, recorder) + return applyKubeconfigSecret(ctx, templateKubeconfig, secretName, secretNamespace, + kubeconfigPath, secretClient, tokenGetter, recorder) case err != nil: return err } @@ -162,7 +165,7 @@ func clusterInfoNotChanged(secret *corev1.Secret, templateKubeconfig *rest.Confi klog.Infof("Cluster host changed from %s to %s", cluster.Server, templateCluster.Server) return false } - if bytes.Compare(cluster.CertificateAuthorityData, templateCluster.CertificateAuthorityData) != 0 { + if !bytes.Equal(cluster.CertificateAuthorityData, templateCluster.CertificateAuthorityData) { klog.Infof("Cluster certificate authority data changed") return false } @@ -176,7 +179,9 @@ func clusterInfoNotChanged(secret *corev1.Secret, templateKubeconfig *rest.Confi } // applyKubeconfigSecret would render saToken to a secret. -func applyKubeconfigSecret(ctx context.Context, templateKubeconfig *rest.Config, secretName, secretNamespace, kubeconfigPath string, secretClient coreclientv1.SecretsGetter, tokenGetter TokenGetterFunc, recorder events.Recorder) error { +func applyKubeconfigSecret(ctx context.Context, templateKubeconfig *rest.Config, secretName, secretNamespace, + kubeconfigPath string, secretClient coreclientv1.SecretsGetter, tokenGetter TokenGetterFunc, + recorder events.Recorder) error { token, expiration, err := tokenGetter() if err != nil { diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go index 9452575d7..714bbff51 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go @@ -53,9 +53,11 @@ type clusterManagerController struct { recorder events.Recorder cache resourceapply.ResourceCache // For testcases which don't need these functions, we could set fake funcs - ensureSAKubeconfigs func(ctx context.Context, clusterManagerName, clusterManagerNamespace string, hubConfig *rest.Config, hubClient, managementClient kubernetes.Interface, recorder events.Recorder) error - generateHubClusterClients func(hubConfig *rest.Config) (kubernetes.Interface, apiextensionsclient.Interface, migrationclient.StorageVersionMigrationsGetter, error) - skipRemoveCRDs bool + ensureSAKubeconfigs func(ctx context.Context, clusterManagerName, clusterManagerNamespace string, + hubConfig *rest.Config, hubClient, managementClient kubernetes.Interface, recorder events.Recorder) error + generateHubClusterClients func(hubConfig *rest.Config) (kubernetes.Interface, apiextensionsclient.Interface, + migrationclient.StorageVersionMigrationsGetter, error) + skipRemoveCRDs bool } type clusterManagerReconcile interface { @@ -156,7 +158,8 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f registrationFeatureGates = clusterManager.Spec.RegistrationConfiguration.FeatureGates config.AutoApproveUsers = strings.Join(clusterManager.Spec.RegistrationConfiguration.AutoApproveUsers, ",") } - config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration", registrationFeatureGates, ocmfeature.DefaultHubRegistrationFeatureGates) + config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration", + registrationFeatureGates, ocmfeature.DefaultHubRegistrationFeatureGates) workFeatureGates := []operatorapiv1.FeatureGate{} if clusterManager.Spec.WorkConfiguration != nil { @@ -212,9 +215,11 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f var errs []error reconcilers := []clusterManagerReconcile{ - &crdReconcile{cache: n.cache, recorder: n.recorder, hubAPIExtensionClient: hubApiExtensionClient, hubMigrationClient: hubMigrationClient, skipRemoveCRDs: n.skipRemoveCRDs}, + &crdReconcile{cache: n.cache, recorder: n.recorder, hubAPIExtensionClient: hubApiExtensionClient, + hubMigrationClient: hubMigrationClient, skipRemoveCRDs: n.skipRemoveCRDs}, &hubReoncile{cache: n.cache, recorder: n.recorder, hubKubeClient: hubClient}, - &runtimeReconcile{cache: n.cache, recorder: n.recorder, hubKubeConfig: hubKubeConfig, hubKubeClient: hubClient, kubeClient: managementClient, ensureSAKubeconfigs: n.ensureSAKubeconfigs}, + &runtimeReconcile{cache: n.cache, recorder: n.recorder, hubKubeConfig: hubKubeConfig, hubKubeClient: hubClient, + kubeClient: managementClient, ensureSAKubeconfigs: n.ensureSAKubeconfigs}, &webhookReconcile{cache: n.cache, recorder: n.recorder, hubKubeClient: hubClient, kubeClient: managementClient}, } @@ -314,7 +319,8 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f return utilerrors.NewAggregate(errs) } -func removeClusterManagerFinalizer(ctx context.Context, clusterManagerClient operatorv1client.ClusterManagerInterface, deploy *operatorapiv1.ClusterManager) error { +func removeClusterManagerFinalizer(ctx context.Context, + clusterManagerClient operatorv1client.ClusterManagerInterface, deploy *operatorapiv1.ClusterManager) error { copiedFinalizers := []string{} for i := range deploy.Finalizers { if deploy.Finalizers[i] == clusterManagerFinalizer { @@ -332,7 +338,8 @@ func removeClusterManagerFinalizer(ctx context.Context, clusterManagerClient ope return nil } -func generateHubClients(hubKubeConfig *rest.Config) (kubernetes.Interface, apiextensionsclient.Interface, migrationclient.StorageVersionMigrationsGetter, error) { +func generateHubClients(hubKubeConfig *rest.Config) (kubernetes.Interface, apiextensionsclient.Interface, + migrationclient.StorageVersionMigrationsGetter, error) { hubClient, err := kubernetes.NewForConfig(hubKubeConfig) if err != nil { return nil, nil, nil, err @@ -389,7 +396,8 @@ func convertWebhookConfiguration(webhookConfiguration operatorapiv1.WebhookConfi } // clean specified resources -func cleanResources(ctx context.Context, kubeClient kubernetes.Interface, cm *operatorapiv1.ClusterManager, config manifests.HubConfig, resources ...string) (*operatorapiv1.ClusterManager, reconcileState, error) { +func cleanResources(ctx context.Context, kubeClient kubernetes.Interface, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig, resources ...string) (*operatorapiv1.ClusterManager, reconcileState, error) { for _, file := range resources { err := helpers.CleanUpStaticObject( ctx, diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go index 5edfba03f..35ceed422 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go @@ -60,7 +60,8 @@ type crdReconcile struct { recorder events.Recorder } -func (c *crdReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { +func (c *crdReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { crdManager := crdmanager.NewManager[*apiextensionsv1.CustomResourceDefinition]( c.hubAPIExtensionClient.ApiextensionsV1().CustomResourceDefinitions(), crdmanager.EqualV1, @@ -89,7 +90,8 @@ func (c *crdReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterM return cm, reconcileContinue, nil } -func (c *crdReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { +func (c *crdReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { crdManager := crdmanager.NewManager[*apiextensionsv1.CustomResourceDefinition]( c.hubAPIExtensionClient.ApiextensionsV1().CustomResourceDefinitions(), crdmanager.EqualV1, diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go index 74f15ce68..93e50fbd5 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go @@ -80,7 +80,8 @@ type hubReoncile struct { recorder events.Recorder } -func (c *hubReoncile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { +func (c *hubReoncile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { // If AddOnManager is not enabled, remove related resources if !config.AddOnManagerEnabled { _, _, err := cleanResources(ctx, c.hubKubeClient, cm, config, hubAddOnManagerRbacResourceFiles...) @@ -136,7 +137,8 @@ func (c *hubReoncile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterMa return cm, reconcileContinue, nil } -func (c *hubReoncile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { +func (c *hubReoncile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { hubResources := getHubResources(cm.Spec.DeployOption.Mode, config) return cleanResources(ctx, c.hubKubeClient, cm, config, hubResources...) } diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go index 58e70d58d..659bbf299 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go @@ -47,13 +47,15 @@ type runtimeReconcile struct { hubKubeClient kubernetes.Interface hubKubeConfig *rest.Config - ensureSAKubeconfigs func(ctx context.Context, clusterManagerName, clusterManagerNamespace string, hubConfig *rest.Config, hubClient, managementClient kubernetes.Interface, recorder events.Recorder) error + ensureSAKubeconfigs func(ctx context.Context, clusterManagerName, clusterManagerNamespace string, + hubConfig *rest.Config, hubClient, managementClient kubernetes.Interface, recorder events.Recorder) error cache resourceapply.ResourceCache recorder events.Recorder } -func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { +func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { // If AddOnManager is not enabled, remove related resources if !config.AddOnManagerEnabled { _, _, err := cleanResources(ctx, c.kubeClient, cm, config, addOnManagerDeploymentFiles...) @@ -180,7 +182,8 @@ func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus return cm, reconcileContinue, nil } -func (c *runtimeReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { +func (c *runtimeReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { // Remove All Static files managementResources := []string{namespaceResource} // because namespace is removed, we don't need to remove deployments explicitly return cleanResources(ctx, c.kubeClient, cm, config, managementResources...) diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go index bf48e478b..ee3d7b2d6 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go @@ -44,7 +44,8 @@ type webhookReconcile struct { recorder events.Recorder } -func (c *webhookReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { +func (c *webhookReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { var appliedErrs []error if !meta.IsStatusConditionFalse(cm.Status.Conditions, clusterManagerProgressing) { @@ -91,7 +92,8 @@ func (c *webhookReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus return cm, reconcileContinue, nil } -func (c *webhookReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { +func (c *webhookReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { // Remove All webhook files webhookResources := hubRegistrationWebhookResourceFiles webhookResources = append(webhookResources, hubWorkWebhookResourceFiles...) diff --git a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go index 6c77e0e5a..4cac86857 100644 --- a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go @@ -134,7 +134,7 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac Type: MigrationSucceeded, Status: metav1.ConditionFalse, Reason: "StorageVersionMigrationFailed", - Message: fmt.Sprintf("Do not support StorageVersionMigration"), + Message: "Do not support StorageVersionMigration", } _, _, err = helpers.UpdateClusterManagerStatus(ctx, c.clusterManagerClient, clusterManagerName, helpers.UpdateClusterManagerConditionFn(migrationCond), @@ -212,7 +212,8 @@ func removeStorageVersionMigrations( return nil } -func applyStorageVersionMigrations(ctx context.Context, migrationClient migrationv1alpha1client.StorageVersionMigrationsGetter, recorder events.Recorder) error { +func applyStorageVersionMigrations(ctx context.Context, + migrationClient migrationv1alpha1client.StorageVersionMigrationsGetter, recorder events.Recorder) error { errs := []error{} for _, file := range migrationRequestFiles { required, err := parseStorageVersionMigrationFile( @@ -242,7 +243,8 @@ func applyStorageVersionMigrations(ctx context.Context, migrationClient migratio // syncStorageVersionMigrationsCondition sync the migration condition based on all the StorageVersionMigrations status // 1. migrationSucceeded is true only when all the StorageVersionMigrations resources succeed. // 2. migrationSucceeded is false when any of the StorageVersionMigrations resources failed or running -func syncStorageVersionMigrationsCondition(ctx context.Context, migrationClient migrationv1alpha1client.StorageVersionMigrationsGetter) (metav1.Condition, error) { +func syncStorageVersionMigrationsCondition(ctx context.Context, + migrationClient migrationv1alpha1client.StorageVersionMigrationsGetter) (metav1.Condition, error) { for _, file := range migrationRequestFiles { required, err := parseStorageVersionMigrationFile( func(name string) ([]byte, error) { @@ -292,7 +294,7 @@ func syncStorageVersionMigrationsCondition(ctx context.Context, migrationClient Type: MigrationSucceeded, Status: metav1.ConditionTrue, Reason: "StorageVersionMigrationSucceed", - Message: fmt.Sprintf("All StorageVersionMigrations Succeed"), + Message: "All StorageVersionMigrations Succeed", }, nil } diff --git a/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go index c753a211e..812273a04 100644 --- a/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go @@ -100,10 +100,11 @@ func (s *clusterManagerStatusController) updateStatusOfRegistration(ctx context. if unavailablePod := helpers.NumOfUnavailablePod(registrationDeployment); unavailablePod > 0 { return metav1.Condition{ - Type: registrationDegraded, - Status: metav1.ConditionTrue, - Reason: "UnavailableRegistrationPod", - Message: fmt.Sprintf("%v of requested instances are unavailable of registration deployment %q %q", unavailablePod, clusterManagerNamespace, registrationDeploymentName), + Type: registrationDegraded, + Status: metav1.ConditionTrue, + Reason: "UnavailableRegistrationPod", + Message: fmt.Sprintf("%v of requested instances are unavailable of registration deployment %q %q", + unavailablePod, clusterManagerNamespace, registrationDeploymentName), } } @@ -131,10 +132,11 @@ func (s *clusterManagerStatusController) updateStatusOfPlacement(ctx context.Con if unavailablePod := helpers.NumOfUnavailablePod(placementDeployment); unavailablePod > 0 { return metav1.Condition{ - Type: placementDegraded, - Status: metav1.ConditionTrue, - Reason: "UnavailablePlacementPod", - Message: fmt.Sprintf("%v of requested instances are unavailable of placement deployment %q %q", unavailablePod, clusterManagerNamespace, placementDeploymentName), + Type: placementDegraded, + Status: metav1.ConditionTrue, + Reason: "UnavailablePlacementPod", + Message: fmt.Sprintf("%v of requested instances are unavailable of placement deployment %q %q", + unavailablePod, clusterManagerNamespace, placementDeploymentName), } } diff --git a/pkg/operator/operators/clustermanager/options.go b/pkg/operator/operators/clustermanager/options.go index a0c1568e4..d929596e6 100644 --- a/pkg/operator/operators/clustermanager/options.go +++ b/pkg/operator/operators/clustermanager/options.go @@ -35,7 +35,8 @@ func (o *Options) RunClusterManagerOperator(ctx context.Context, controllerConte } // kubeInformer is for 3 usages: configmapInformer, secretInformer, deploynmentInformer - // After we introduced hosted mode, the hub components could be installed in a customized namespace.(Before that, it only inform from "open-cluster-management-hub" namespace) + // After we introduced hosted mode, the hub components could be installed in a customized + // namespace.(Before that, it only inform from "open-cluster-management-hub" namespace) // It requires us to add filter for each Informer respectively. // TODO: Watch all namespace may cause performance issue. kubeInformer := informers.NewSharedInformerFactoryWithOptions(kubeClient, 5*time.Minute) diff --git a/pkg/operator/operators/crdmanager/manager.go b/pkg/operator/operators/crdmanager/manager.go index 59732f765..afc6566b4 100644 --- a/pkg/operator/operators/crdmanager/manager.go +++ b/pkg/operator/operators/crdmanager/manager.go @@ -259,7 +259,7 @@ func (m *Manager[T]) shouldUpdate(old, new T) (bool, error) { existingVersion = accessor.GetAnnotations()[versionAnnotationKey] } - // alwasy update if existing doest not have version annotation + // always update if existing doest not have version annotation if len(existingVersion) == 0 { return true, nil } diff --git a/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go b/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go index 902954aa5..7a30de352 100644 --- a/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go +++ b/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go @@ -20,7 +20,8 @@ const ( addonInstallNamespaceLabelKey = "addon.open-cluster-management.io/namespace" ) -// AddonPullImageSecretController is used to sync pull image secret from operator namespace to addon namespaces(with label "addon.open-cluster-management.io/namespace":"true") +// AddonPullImageSecretController is used to sync pull image secret from operator namespace +// to addon namespaces(with label "addon.open-cluster-management.io/namespace":"true") // Note: // 1. AddonPullImageSecretController only handles namespace events within the same cluster. // 2. If the lable is remove from namespace, controller now would not remove the secret. @@ -31,7 +32,8 @@ type addonPullImageSecretController struct { recorder events.Recorder } -func NewAddonPullImageSecretController(kubeClient kubernetes.Interface, operatorNamespace string, namespaceInformer coreinformer.NamespaceInformer, recorder events.Recorder) factory.Controller { +func NewAddonPullImageSecretController(kubeClient kubernetes.Interface, operatorNamespace string, + namespaceInformer coreinformer.NamespaceInformer, recorder events.Recorder) factory.Controller { ac := &addonPullImageSecretController{ operatorNamespace: operatorNamespace, namespaceInformer: namespaceInformer, @@ -44,10 +46,7 @@ func NewAddonPullImageSecretController(kubeClient kubernetes.Interface, operator }, func(obj interface{}) bool { // if obj has the label, return true namespace := obj.(*corev1.Namespace) - if namespace.Labels[addonInstallNamespaceLabelKey] == "true" { - return true - } - return false + return namespace.Labels[addonInstallNamespaceLabelKey] == "true" }, namespaceInformer.Informer()).WithSync(ac.sync).ToController("AddonPullImageSecretController", recorder) } diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go index 34deb9fc3..9e7f82507 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go @@ -137,7 +137,7 @@ func (n *klusterletCleanupController) sync(ctx context.Context, controllerContex withKubeConfigSecret(config.AgentNamespace, config.ExternalManagedKubeConfigSecret). build(ctx) // stop when hosted kubeconfig is not found. the klustelet controller will monitor the secret and retrigger - // reconcilation of cleanup controller when secret is created again. + // reconciliation of cleanup controller when secret is created again. if errors.IsNotFound(err) { return nil } diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go index bc3f323a0..82772afbe 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go @@ -180,7 +180,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto build(ctx) // update klusterletReadyToApply condition at first in hosted mode - // this conditions should be updated even when klusterlet is in deleteing state. + // this conditions should be updated even when klusterlet is in deleting state. if config.InstallMode == operatorapiv1.InstallModeHosted { cond := metav1.Condition{ Type: klusterletReadyToApply, Status: metav1.ConditionTrue, Reason: "KlusterletPrepared", @@ -225,7 +225,8 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto registrationFeatureGates = klusterlet.Spec.RegistrationConfiguration.FeatureGates config.ClientCertExpirationSeconds = klusterlet.Spec.RegistrationConfiguration.ClientCertExpirationSeconds } - config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration", registrationFeatureGates, ocmfeature.DefaultSpokeRegistrationFeatureGates) + config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration", + registrationFeatureGates, ocmfeature.DefaultSpokeRegistrationFeatureGates) workFeatureGates := []operatorapiv1.FeatureGate{} if klusterlet.Spec.WorkConfiguration != nil { @@ -324,7 +325,7 @@ func getServersFromKlusterlet(klusterlet *operatorapiv1.Klusterlet) string { return strings.Join(serverString, ",") } -// getManagedKubeConfig is a helper func for Hosted mode, it will retrive managed cluster +// getManagedKubeConfig is a helper func for Hosted mode, it will retrieve managed cluster // kubeconfig from "external-managed-kubeconfig" secret. func getManagedKubeConfig(ctx context.Context, kubeClient kubernetes.Interface, namespace, secretName string) (*rest.Config, error) { managedKubeconfigSecret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) @@ -353,7 +354,8 @@ func ensureAgentNamespace(ctx context.Context, kubeClient kubernetes.Interface, } // syncPullSecret will sync pull secret from the sourceClient cluster to the targetClient cluster in desired namespace. -func syncPullSecret(ctx context.Context, sourceClient, targetClient kubernetes.Interface, klusterlet *operatorapiv1.Klusterlet, operatorNamespace, namespace string, recorder events.Recorder) error { +func syncPullSecret(ctx context.Context, sourceClient, targetClient kubernetes.Interface, + klusterlet *operatorapiv1.Klusterlet, operatorNamespace, namespace string, recorder events.Recorder) error { _, _, err := helpers.SyncSecret( ctx, sourceClient.CoreV1(), diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go index 64bf34011..b963f0f81 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go @@ -43,7 +43,8 @@ type crdReconcile struct { cache resourceapply.ResourceCache } -func (r *crdReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { +func (r *crdReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, + config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { var applyErr error if cnt, err := r.kubeVersion.Compare("v1.16.0"); err == nil && cnt < 0 { @@ -96,7 +97,8 @@ func (r *crdReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1. // no longer remove the CRDs (AppliedManifestWork & ClusterClaim), because they might be shared // by multiple klusterlets. Consequently, the CRs of those CRDs will not be deleted as well when deleting a klusterlet. // Only clean the version label on crds, so another klusterlet can update crds later. -func (r *crdReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { +func (r *crdReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, + config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { var deleteErr error if cnt, err := r.kubeVersion.Compare("v1.16.0"); err == nil && cnt < 0 { crdManager := crdmanager.NewManager[*apiextensionsv1beta1.CustomResourceDefinition]( diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go index 9041bb30c..cd9d63a91 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go @@ -55,7 +55,8 @@ type managedReconcile struct { cache resourceapply.ResourceCache } -func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { +func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, + config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { // For now, whether in Default or Hosted mode, the addons will be deployed on the managed cluster. // sync image pull secret from management cluster to managed cluster for addon namespace // TODO(zhujian7): In the future, we may consider deploy addons on the management cluster in Hosted mode. @@ -68,7 +69,8 @@ func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorap // Sync pull secret to the klusterlet addon namespace // The reason we keep syncing secret instead of adding a label to trigger addonsecretcontroller to sync is: // addonsecretcontroller only watch namespaces in the same cluster klusterlet is running on. - // And if addons are deployed in default mode on the managed cluster, but klusterlet is deployed in hosted on management cluster, then we still need to sync the secret here in klusterlet-controller using `managedClusterClients.kubeClient`. + // And if addons are deployed in default mode on the managed cluster, but klusterlet is deployed in hosted + // on management cluster, then we still need to sync the secret here in klusterlet-controller using `managedClusterClients.kubeClient`. err = syncPullSecret(ctx, r.kubeClient, r.managedClusterClients.kubeClient, klusterlet, r.opratorNamespace, addonNamespace, r.recorder) if err != nil { return klusterlet, reconcileStop, err @@ -127,7 +129,8 @@ func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorap return klusterlet, reconcileContinue, nil } -func (r *managedReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { +func (r *managedReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, + config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { // nothing should be done when deploy mode is hosted and hosted finalizer is not added. if klusterlet.Spec.DeployOption.Mode == operatorapiv1.InstallModeHosted && !hasFinalizer(klusterlet, klusterletHostedFinalizer) { return klusterlet, reconcileContinue, nil diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go index cbafdd951..7a9a049fd 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go @@ -46,7 +46,8 @@ type managementReconcile struct { cache resourceapply.ResourceCache } -func (r *managementReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { +func (r *managementReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, + config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { err := ensureNamespace(ctx, r.kubeClient, klusterlet, config.AgentNamespace) if err != nil { return klusterlet, reconcileStop, err @@ -95,7 +96,8 @@ func (r *managementReconcile) reconcile(ctx context.Context, klusterlet *operato return klusterlet, reconcileContinue, nil } -func (r *managementReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { +func (r *managementReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, + config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { // Remove secrets secrets := []string{config.HubKubeConfigSecret} if config.InstallMode == operatorapiv1.InstallModeHosted { diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go index 3fa9a0e7a..1341340de 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go @@ -31,13 +31,18 @@ type runtimeReconcile struct { cache resourceapply.ResourceCache } -func (r *runtimeReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { +func (r *runtimeReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, + config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { if config.InstallMode == operatorapiv1.InstallModeHosted { // Create managed config secret for registration and work. - if err := r.createManagedClusterKubeconfig(ctx, klusterlet, config.KlusterletNamespace, config.AgentNamespace, registrationServiceAccountName(klusterlet.Name), config.ExternalManagedKubeConfigRegistrationSecret, r.recorder); err != nil { + if err := r.createManagedClusterKubeconfig(ctx, klusterlet, config.KlusterletNamespace, config.AgentNamespace, + registrationServiceAccountName(klusterlet.Name), config.ExternalManagedKubeConfigRegistrationSecret, + r.recorder); err != nil { return klusterlet, reconcileStop, err } - if err := r.createManagedClusterKubeconfig(ctx, klusterlet, config.KlusterletNamespace, config.AgentNamespace, workServiceAccountName(klusterlet.Name), config.ExternalManagedKubeConfigWorkSecret, r.recorder); err != nil { + if err := r.createManagedClusterKubeconfig(ctx, klusterlet, config.KlusterletNamespace, config.AgentNamespace, + workServiceAccountName(klusterlet.Name), config.ExternalManagedKubeConfigWorkSecret, + r.recorder); err != nil { return klusterlet, reconcileStop, err } } @@ -86,7 +91,8 @@ func (r *runtimeReconcile) reconcile(ctx context.Context, klusterlet *operatorap hubConnectionDegradedCondition := meta.FindStatusCondition(klusterlet.Status.Conditions, hubConnectionDegraded) if hubConnectionDegradedCondition == nil { workConfig.Replica = 0 - } else if hubConnectionDegradedCondition.Status == metav1.ConditionTrue && strings.Contains(hubConnectionDegradedCondition.Reason, hubKubeConfigSecretMissing) { + } else if hubConnectionDegradedCondition.Status == metav1.ConditionTrue && + strings.Contains(hubConnectionDegradedCondition.Reason, hubKubeConfigSecretMissing) { workConfig.Replica = 0 } @@ -126,7 +132,8 @@ func (r *runtimeReconcile) createManagedClusterKubeconfig( klusterletNamespace, agentNamespace, saName, secretName string, recorder events.Recorder) error { tokenGetter := helpers.SATokenGetter(ctx, saName, klusterletNamespace, r.managedClusterClients.kubeClient) - err := helpers.SyncKubeConfigSecret(ctx, secretName, agentNamespace, "/spoke/config/kubeconfig", r.managedClusterClients.kubeconfig, r.kubeClient.CoreV1(), tokenGetter, recorder) + err := helpers.SyncKubeConfigSecret(ctx, secretName, agentNamespace, "/spoke/config/kubeconfig", + r.managedClusterClients.kubeconfig, r.kubeClient.CoreV1(), tokenGetter, recorder) if err != nil { meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "KlusterletApplyFailed", @@ -157,7 +164,8 @@ func (r *runtimeReconcile) getClusterNameFromHubKubeConfigSecret(ctx context.Con return string(clusterName), nil } -func (r *runtimeReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { +func (r *runtimeReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, + config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { deployments := []string{ fmt.Sprintf("%s-registration-agent", config.KlusterletName), fmt.Sprintf("%s-work-agent", config.KlusterletName), diff --git a/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go index 75e768b39..189b1e1d1 100644 --- a/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go @@ -87,7 +87,8 @@ func (k *klusterletStatusController) sync(ctx context.Context, controllerContext ) availableCondition.ObservedGeneration = klusterlet.Generation - registrationDesiredCondition := checkAgentDeploymentDesired(ctx, k.kubeClient, agentNamespace, registrationDeploymentName, klusterletRegistrationDesiredDegraded) + registrationDesiredCondition := checkAgentDeploymentDesired(ctx, + k.kubeClient, agentNamespace, registrationDeploymentName, klusterletRegistrationDesiredDegraded) registrationDesiredCondition.ObservedGeneration = klusterlet.Generation workDesiredCondition := checkAgentDeploymentDesired(ctx, k.kubeClient, agentNamespace, workDeploymentName, klusterletWorkDesiredDegraded) diff --git a/pkg/placement/controllers/scheduling/schedule.go b/pkg/placement/controllers/scheduling/schedule.go index a257f32ff..7c530d9a2 100644 --- a/pkg/placement/controllers/scheduling/schedule.go +++ b/pkg/placement/controllers/scheduling/schedule.go @@ -100,7 +100,11 @@ type schedulerHandler struct { } func NewSchedulerHandler( - clusterClient clusterclient.Interface, placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister, scoreLister clusterlisterv1alpha1.AddOnPlacementScoreLister, clusterLister clusterlisterv1.ManagedClusterLister, recorder kevents.EventRecorder) plugins.Handle { + clusterClient clusterclient.Interface, + placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister, + scoreLister clusterlisterv1alpha1.AddOnPlacementScoreLister, + clusterLister clusterlisterv1.ManagedClusterLister, + recorder kevents.EventRecorder) plugins.Handle { return &schedulerHandler{ recorder: recorder, @@ -325,7 +329,8 @@ func setRequeueAfter(requeueAfter, newRequeueAfter *time.Duration) *time.Duratio // Get prioritizer weight for the placement. // In Additive and "" mode, will override defaultWeight with what placement has defined and return. // In Exact mode, will return the name and weight defined in placement. -func getWeights(defaultWeight map[clusterapiv1beta1.ScoreCoordinate]int32, placement *clusterapiv1beta1.Placement) (map[clusterapiv1beta1.ScoreCoordinate]int32, *framework.Status) { +func getWeights(defaultWeight map[clusterapiv1beta1.ScoreCoordinate]int32, + placement *clusterapiv1beta1.Placement) (map[clusterapiv1beta1.ScoreCoordinate]int32, *framework.Status) { mode := placement.Spec.PrioritizerPolicy.Mode switch { case mode == clusterapiv1beta1.PrioritizerPolicyModeExact: @@ -338,7 +343,9 @@ func getWeights(defaultWeight map[clusterapiv1beta1.ScoreCoordinate]int32, place } } -func mergeWeights(defaultWeight map[clusterapiv1beta1.ScoreCoordinate]int32, customizedWeight []clusterapiv1beta1.PrioritizerConfig) (map[clusterapiv1beta1.ScoreCoordinate]int32, *framework.Status) { +func mergeWeights(defaultWeight map[clusterapiv1beta1.ScoreCoordinate]int32, + customizedWeight []clusterapiv1beta1.PrioritizerConfig, +) (map[clusterapiv1beta1.ScoreCoordinate]int32, *framework.Status) { weights := make(map[clusterapiv1beta1.ScoreCoordinate]int32) status := framework.NewStatus("", framework.Success, "") // copy the default weight @@ -358,7 +365,8 @@ func mergeWeights(defaultWeight map[clusterapiv1beta1.ScoreCoordinate]int32, cus } // Generate prioritizers for the placement. -func getPrioritizers(weights map[clusterapiv1beta1.ScoreCoordinate]int32, handle plugins.Handle) (map[clusterapiv1beta1.ScoreCoordinate]plugins.Prioritizer, *framework.Status) { +func getPrioritizers(weights map[clusterapiv1beta1.ScoreCoordinate]int32, handle plugins.Handle, +) (map[clusterapiv1beta1.ScoreCoordinate]plugins.Prioritizer, *framework.Status) { result := make(map[clusterapiv1beta1.ScoreCoordinate]plugins.Prioritizer) status := framework.NewStatus("", framework.Success, "") for k, v := range weights { diff --git a/pkg/placement/controllers/scheduling/scheduling_controller.go b/pkg/placement/controllers/scheduling/scheduling_controller.go index f4252f9c3..8fde71cf9 100644 --- a/pkg/placement/controllers/scheduling/scheduling_controller.go +++ b/pkg/placement/controllers/scheduling/scheduling_controller.go @@ -51,8 +51,6 @@ const ( var ResyncInterval = time.Minute * 5 -type enqueuePlacementFunc func(namespace, name string) - // schedulingController schedules cluster decisions for Placements type schedulingController struct { clusterClient clusterclient.Interface diff --git a/pkg/placement/debugger/debugger.go b/pkg/placement/debugger/debugger.go index f814f6c33..4c3caedc9 100644 --- a/pkg/placement/debugger/debugger.go +++ b/pkg/placement/debugger/debugger.go @@ -68,7 +68,7 @@ func (d *Debugger) Handler(w http.ResponseWriter, r *http.Request) { resultByte, _ := json.Marshal(result) - w.Write(resultByte) + _, _ = w.Write(resultByte) } func (d *Debugger) parsePath(path string) (string, string, error) { @@ -81,5 +81,5 @@ func (d *Debugger) reportErr(w http.ResponseWriter, err error) { resultByte, _ := json.Marshal(result) - w.Write(resultByte) + _, _ = w.Write(resultByte) } diff --git a/pkg/placement/helpers/testing/informer.go b/pkg/placement/helpers/testing/informer.go index 259dbc164..6627cecfe 100644 --- a/pkg/placement/helpers/testing/informer.go +++ b/pkg/placement/helpers/testing/informer.go @@ -25,17 +25,17 @@ func NewClusterInformerFactory(clusterClient clusterclient.Interface, objects .. for _, obj := range objects { switch obj.(type) { case *clusterapiv1.ManagedCluster: - clusterStore.Add(obj) + _ = clusterStore.Add(obj) case *clusterapiv1beta2.ManagedClusterSet: - clusterSetStore.Add(obj) + _ = clusterSetStore.Add(obj) case *clusterapiv1beta2.ManagedClusterSetBinding: - clusterSetBindingStore.Add(obj) + _ = clusterSetBindingStore.Add(obj) case *clusterapiv1beta1.Placement: - placementStore.Add(obj) + _ = placementStore.Add(obj) case *clusterapiv1beta1.PlacementDecision: - placementDecisionStore.Add(obj) + _ = placementDecisionStore.Add(obj) case *clusterapiv1alpha1.AddOnPlacementScore: - addOnPlacementStore.Add(obj) + _ = addOnPlacementStore.Add(obj) } } diff --git a/pkg/placement/plugins/addon/addon.go b/pkg/placement/plugins/addon/addon.go index 64c0c529e..f1e5eeb38 100644 --- a/pkg/placement/plugins/addon/addon.go +++ b/pkg/placement/plugins/addon/addon.go @@ -24,7 +24,7 @@ const ( ) var _ plugins.Prioritizer = &AddOn{} -var AddOnClock = (clock.Clock)(clock.RealClock{}) +var AddOnClock = clock.Clock(clock.RealClock{}) type AddOn struct { handle plugins.Handle @@ -68,7 +68,8 @@ func (c *AddOn) Description() string { return description } -func (c *AddOn) Score(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (plugins.PluginScoreResult, *framework.Status) { +func (c *AddOn) Score(ctx context.Context, placement *clusterapiv1beta1.Placement, + clusters []*clusterapiv1.ManagedCluster) (plugins.PluginScoreResult, *framework.Status) { scores := map[string]int64{} expiredScores := "" status := framework.NewStatus(c.Name(), framework.Success, "") diff --git a/pkg/placement/plugins/balance/balance.go b/pkg/placement/plugins/balance/balance.go index 1a47ac5d9..9ac965605 100644 --- a/pkg/placement/plugins/balance/balance.go +++ b/pkg/placement/plugins/balance/balance.go @@ -42,7 +42,8 @@ func (b *Balance) Description() string { return description } -func (b *Balance) Score(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (plugins.PluginScoreResult, *framework.Status) { +func (b *Balance) Score(ctx context.Context, placement *clusterapiv1beta1.Placement, + clusters []*clusterapiv1.ManagedCluster) (plugins.PluginScoreResult, *framework.Status) { scores := map[string]int64{} for _, cluster := range clusters { scores[cluster.Name] = plugins.MaxClusterScore @@ -76,7 +77,7 @@ func (b *Balance) Score(ctx context.Context, placement *clusterapiv1beta1.Placem if count, ok := decisionCount[clusterName]; ok { usage := float64(count) / float64(maxCount) - // Negate the usage and substracted by 0.5, then we double it and muliply by maxCount, + // Negate the usage and subtracted by 0.5, then we double it and muliply by maxCount, // which normalize the score to value between 100 and -100 scores[clusterName] = 2 * int64(float64(plugins.MaxClusterScore)*(0.5-usage)) } diff --git a/pkg/placement/plugins/resource/resource.go b/pkg/placement/plugins/resource/resource.go index 88bd7a945..931f17654 100644 --- a/pkg/placement/plugins/resource/resource.go +++ b/pkg/placement/plugins/resource/resource.go @@ -79,7 +79,8 @@ func (r *ResourcePrioritizer) Description() string { return description } -func (r *ResourcePrioritizer) Score(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (plugins.PluginScoreResult, *framework.Status) { +func (r *ResourcePrioritizer) Score(ctx context.Context, placement *clusterapiv1beta1.Placement, + clusters []*clusterapiv1.ManagedCluster) (plugins.PluginScoreResult, *framework.Status) { status := framework.NewStatus(r.Name(), framework.Success, "") if r.algorithm == "Allocatable" { return mostResourceAllocatableScores(r.resource, clusters), status @@ -114,7 +115,7 @@ func mostResourceAllocatableScores(resourceName clusterapiv1.ResourceName, clust // score = ((resource_x_allocatable - min(resource_x_allocatable)) / (max(resource_x_allocatable) - min(resource_x_allocatable)) - 0.5) * 2 * 100 if (maxAllocatable - minAllocatable) != 0 { - ratio := float64(allocatable-minAllocatable) / float64(maxAllocatable-minAllocatable) + ratio := (allocatable - minAllocatable) / (maxAllocatable - minAllocatable) scores[cluster.Name] = int64((ratio - 0.5) * 2.0 * 100.0) } else { scores[cluster.Name] = 100.0 @@ -144,7 +145,8 @@ func getClusterResource(cluster *clusterapiv1.ManagedCluster, resourceName clust } // Go through all the cluster resources and return the min and max allocatable value of the resourceName. -func getClustersMinMaxAllocatableResource(clusters []*clusterapiv1.ManagedCluster, resourceName clusterapiv1.ResourceName) (minAllocatable, maxAllocatable float64, err error) { +func getClustersMinMaxAllocatableResource(clusters []*clusterapiv1.ManagedCluster, + resourceName clusterapiv1.ResourceName) (minAllocatable, maxAllocatable float64, err error) { allocatable := sort.Float64Slice{} // get allocatable resources diff --git a/pkg/placement/plugins/tainttoleration/taint_toleration.go b/pkg/placement/plugins/tainttoleration/taint_toleration.go index 04baadcc2..52222cbe1 100644 --- a/pkg/placement/plugins/tainttoleration/taint_toleration.go +++ b/pkg/placement/plugins/tainttoleration/taint_toleration.go @@ -20,7 +20,7 @@ import ( ) var _ plugins.Filter = &TaintToleration{} -var TolerationClock = (clock.Clock)(clock.RealClock{}) +var TolerationClock = clock.Clock(clock.RealClock{}) const ( placementLabel = "cluster.open-cluster-management.io/placement" @@ -45,7 +45,8 @@ func (pl *TaintToleration) Description() string { return description } -func (pl *TaintToleration) Filter(ctx context.Context, placement *clusterapiv1beta1.Placement, clusters []*clusterapiv1.ManagedCluster) (plugins.PluginFilterResult, *framework.Status) { +func (pl *TaintToleration) Filter(ctx context.Context, placement *clusterapiv1beta1.Placement, + clusters []*clusterapiv1.ManagedCluster) (plugins.PluginFilterResult, *framework.Status) { status := framework.NewStatus(pl.Name(), framework.Success, "") if len(clusters) == 0 { @@ -113,7 +114,8 @@ func (pl *TaintToleration) RequeueAfter(ctx context.Context, placement *clustera } // isClusterTolerated returns true if a cluster is tolerated by the given toleration array -func isClusterTolerated(cluster *clusterapiv1.ManagedCluster, tolerations []clusterapiv1beta1.Toleration, inDecision bool) (bool, *plugins.PluginRequeueResult, string) { +func isClusterTolerated(cluster *clusterapiv1.ManagedCluster, tolerations []clusterapiv1beta1.Toleration, + inDecision bool) (bool, *plugins.PluginRequeueResult, string) { var minRequeue *plugins.PluginRequeueResult for _, taint := range cluster.Spec.Taints { tolerated, requeue, message := isTaintTolerated(taint, tolerations, inDecision) diff --git a/pkg/registration/clientcert/cert_controller.go b/pkg/registration/clientcert/cert_controller.go index 13008206b..544ba6978 100644 --- a/pkg/registration/clientcert/cert_controller.go +++ b/pkg/registration/clientcert/cert_controller.go @@ -368,9 +368,11 @@ func shouldCreateCSR( additionalSecretData map[string][]byte) (bool, error) { switch { case !hasValidClientCertificate(subject, secret): - recorder.Eventf("NoValidCertificateFound", "No valid client certificate for %s is found. Bootstrap is required", controllerName) + recorder.Eventf("NoValidCertificateFound", + "No valid client certificate for %s is found. Bootstrap is required", controllerName) case additionalSecretDataSensitive && !hasAdditionalSecretData(additionalSecretData, secret): - recorder.Eventf("AdditonalSecretDataChanged", "The additonal secret data is changed. Re-create the client certificate for %s", controllerName) + recorder.Eventf("AdditonalSecretDataChanged", + "The additional secret data is changed. Re-create the client certificate for %s", controllerName) default: notBefore, notAfter, err := getCertValidityPeriod(secret) if err != nil { @@ -379,14 +381,17 @@ func shouldCreateCSR( total := notAfter.Sub(*notBefore) remaining := time.Until(*notAfter) - klog.V(4).Infof("Client certificate for %s: time total=%v, remaining=%v, remaining/total=%v", controllerName, total, remaining, remaining.Seconds()/total.Seconds()) + klog.V(4).Infof("Client certificate for %s: time total=%v, remaining=%v, remaining/total=%v", + controllerName, total, remaining, remaining.Seconds()/total.Seconds()) threshold := jitter(0.2, 0.25) if remaining.Seconds()/total.Seconds() > threshold { // Do nothing if the client certificate is valid and has more than a random percentage range from 20% to 25% of its life remaining klog.V(4).Infof("Client certificate for %s is valid and has more than %.2f%% of its life remaining", controllerName, threshold*100) return false, nil } - recorder.Eventf("CertificateRotationStarted", "The current client certificate for %s expires in %v. Start certificate rotation", controllerName, remaining.Round(time.Second)) + recorder.Eventf("CertificateRotationStarted", + "The current client certificate for %s expires in %v. Start certificate rotation", + controllerName, remaining.Round(time.Second)) } return true, nil } diff --git a/pkg/registration/clientcert/certficate_beta.go b/pkg/registration/clientcert/certficate_beta.go index c9fbf8643..e3374e98d 100644 --- a/pkg/registration/clientcert/certficate_beta.go +++ b/pkg/registration/clientcert/certficate_beta.go @@ -52,7 +52,8 @@ func (v *v1beta1CSRControl) getIssuedCertificate(name string) ([]byte, error) { return v1beta1CSR.Status.Certificate, nil } -func (v *v1beta1CSRControl) create(ctx context.Context, recorder events.Recorder, objMeta metav1.ObjectMeta, csrData []byte, signerName string, expirationSeconds *int32) (string, error) { +func (v *v1beta1CSRControl) create(ctx context.Context, recorder events.Recorder, objMeta metav1.ObjectMeta, + csrData []byte, signerName string, expirationSeconds *int32) (string, error) { csr := &certificates.CertificateSigningRequest{ ObjectMeta: objMeta, Spec: certificates.CertificateSigningRequestSpec{ diff --git a/pkg/registration/clientcert/certificate.go b/pkg/registration/clientcert/certificate.go index 6265879c7..8e26c8421 100644 --- a/pkg/registration/clientcert/certificate.go +++ b/pkg/registration/clientcert/certificate.go @@ -215,7 +215,8 @@ func (v *v1CSRControl) getIssuedCertificate(name string) ([]byte, error) { return v1CSR.Status.Certificate, nil } -func (v *v1CSRControl) create(ctx context.Context, recorder events.Recorder, objMeta metav1.ObjectMeta, csrData []byte, signerName string, expirationSeconds *int32) (string, error) { +func (v *v1CSRControl) create(ctx context.Context, recorder events.Recorder, objMeta metav1.ObjectMeta, csrData []byte, + signerName string, expirationSeconds *int32) (string, error) { csr := &certificates.CertificateSigningRequest{ ObjectMeta: objMeta, Spec: certificates.CertificateSigningRequestSpec{ diff --git a/pkg/registration/helpers/testing/assertion.go b/pkg/registration/helpers/testing/assertion.go index 7dfc9f1e6..bdcfa3c18 100644 --- a/pkg/registration/helpers/testing/assertion.go +++ b/pkg/registration/helpers/testing/assertion.go @@ -32,7 +32,7 @@ func AssertFinalizers(t *testing.T, obj runtime.Object, finalizers []string) { } // AssertManagedClusterClientConfigs asserts the actual managed cluster client configs are the -// same wiht the expected +// same with the expected func AssertManagedClusterClientConfigs(t *testing.T, actual, expected []clusterv1.ClientConfig) { if len(actual) == 0 && len(expected) == 0 { return @@ -43,7 +43,7 @@ func AssertManagedClusterClientConfigs(t *testing.T, actual, expected []clusterv } // AssertManagedClusterStatus sserts the actual managed cluster status is the same -// wiht the expected +// with the expected func AssertManagedClusterStatus(t *testing.T, actual, expected clusterv1.ManagedClusterStatus) { if !reflect.DeepEqual(actual.Version, expected.Version) { t.Errorf("expected version %#v but got: %#v", expected.Version, actual.Version) diff --git a/pkg/registration/helpers/testing/testinghelpers.go b/pkg/registration/helpers/testing/testinghelpers.go index 7cbe3e61b..00c26a416 100644 --- a/pkg/registration/helpers/testing/testinghelpers.go +++ b/pkg/registration/helpers/testing/testinghelpers.go @@ -277,7 +277,7 @@ type CSRHolder struct { } func NewCSR(holder CSRHolder) *certv1.CertificateSigningRequest { - insecureRand := rand.New(rand.NewSource(0)) + insecureRand := rand.New(rand.NewSource(0)) //nolint:gosec pk, err := ecdsa.GenerateKey(elliptic.P256(), insecureRand) if err != nil { panic(err) @@ -310,7 +310,7 @@ func NewCSR(holder CSRHolder) *certv1.CertificateSigningRequest { } func NewV1beta1CSR(holder CSRHolder) *certv1beta1.CertificateSigningRequest { - insecureRand := rand.New(rand.NewSource(0)) + insecureRand := rand.New(rand.NewSource(0)) //nolint:gosec pk, err := ecdsa.GenerateKey(elliptic.P256(), insecureRand) if err != nil { panic(err) @@ -499,7 +499,7 @@ func NewTestCert(commonName string, duration time.Duration) *TestCert { } func WriteFile(filename string, data []byte) { - if err := ioutil.WriteFile(filename, data, 0644); err != nil { + if err := ioutil.WriteFile(filename, data, 0600); err != nil { panic(err) } } diff --git a/pkg/registration/hub/clusterrole/controller.go b/pkg/registration/hub/clusterrole/controller.go index a6617b4c0..9811d8de1 100644 --- a/pkg/registration/hub/clusterrole/controller.go +++ b/pkg/registration/hub/clusterrole/controller.go @@ -34,7 +34,7 @@ var clusterRoleFiles = []string{ //go:embed manifests var manifestFiles embed.FS -// clusterroleController maintains the necessary clusterroles for registraion and work agent on hub cluster. +// clusterroleController maintains the necessary clusterroles for registration and work agent on hub cluster. type clusterroleController struct { kubeClient kubernetes.Interface clusterLister clusterv1listers.ManagedClusterLister diff --git a/pkg/registration/hub/csr/controller.go b/pkg/registration/hub/csr/controller.go index 5e1033b88..6115917aa 100644 --- a/pkg/registration/hub/csr/controller.go +++ b/pkg/registration/hub/csr/controller.go @@ -91,6 +91,8 @@ func (c *csrApprovingController[T]) sync(ctx context.Context, syncCtx factory.Sy return nil } +var _ CSRApprover[*certificatesv1.CertificateSigningRequest] = &CSRV1Approver{} + // CSRV1Approver implement CSRApprover interface type CSRV1Approver struct { kubeClient kubernetes.Interface @@ -100,11 +102,11 @@ func NewCSRV1Approver(client kubernetes.Interface) *CSRV1Approver { return &CSRV1Approver{kubeClient: client} } -func (c *CSRV1Approver) isInTerminalState(csr *certificatesv1.CertificateSigningRequest) bool { +func (c *CSRV1Approver) isInTerminalState(csr *certificatesv1.CertificateSigningRequest) bool { //nolint:unused return helpers.IsCSRInTerminalState(&csr.Status) } -func (c *CSRV1Approver) approve(ctx context.Context, csr *certificatesv1.CertificateSigningRequest) approveCSRFunc { +func (c *CSRV1Approver) approve(ctx context.Context, csr *certificatesv1.CertificateSigningRequest) approveCSRFunc { //nolint:unused return func(kubeClient kubernetes.Interface) error { csrCopy := csr.DeepCopy() // Auto approve the spoke cluster csr @@ -119,6 +121,8 @@ func (c *CSRV1Approver) approve(ctx context.Context, csr *certificatesv1.Certifi } } +var _ CSRApprover[*certificatesv1beta1.CertificateSigningRequest] = &CSRV1beta1Approver{} + type CSRV1beta1Approver struct { kubeClient kubernetes.Interface } @@ -127,11 +131,11 @@ func NewCSRV1beta1Approver(client kubernetes.Interface) *CSRV1beta1Approver { return &CSRV1beta1Approver{kubeClient: client} } -func (c *CSRV1beta1Approver) isInTerminalState(csr *certificatesv1beta1.CertificateSigningRequest) bool { +func (c *CSRV1beta1Approver) isInTerminalState(csr *certificatesv1beta1.CertificateSigningRequest) bool { //nolint:unused return helpers.Isv1beta1CSRInTerminalState(&csr.Status) } -func (c *CSRV1beta1Approver) approve(ctx context.Context, csr *certificatesv1beta1.CertificateSigningRequest) approveCSRFunc { +func (c *CSRV1beta1Approver) approve(ctx context.Context, csr *certificatesv1beta1.CertificateSigningRequest) approveCSRFunc { //nolint:unused return func(kubeClient kubernetes.Interface) error { csrCopy := csr.DeepCopy() // Auto approve the spoke cluster csr diff --git a/pkg/registration/hub/managedclusterset/default_managedclusterset_controller.go b/pkg/registration/hub/managedclusterset/default_managedclusterset_controller.go index 1ad80eef7..e9fb842fb 100644 --- a/pkg/registration/hub/managedclusterset/default_managedclusterset_controller.go +++ b/pkg/registration/hub/managedclusterset/default_managedclusterset_controller.go @@ -86,7 +86,8 @@ func (c *defaultManagedClusterSetController) sync(ctx context.Context, syncCtx f if errors.IsNotFound(err) { _, err := c.clusterSetClient.ManagedClusterSets().Create(ctx, DefaultManagedClusterSet, metav1.CreateOptions{}) if err == nil { - c.eventRecorder.Eventf("DefaultManagedClusterSetCreated", "Set the DefaultManagedClusterSet name to %+v. spec to %+v", DefaultManagedClusterSetName, DefaultManagedClusterSet.Spec) + c.eventRecorder.Eventf("DefaultManagedClusterSetCreated", + "Set the DefaultManagedClusterSet name to %+v. spec to %+v", DefaultManagedClusterSetName, DefaultManagedClusterSet.Spec) } return err } diff --git a/pkg/registration/hub/managedclusterset/global_managedclusterset_controller.go b/pkg/registration/hub/managedclusterset/global_managedclusterset_controller.go index 204474a4e..a698df99d 100644 --- a/pkg/registration/hub/managedclusterset/global_managedclusterset_controller.go +++ b/pkg/registration/hub/managedclusterset/global_managedclusterset_controller.go @@ -85,7 +85,8 @@ func (c *globalManagedClusterSetController) sync(ctx context.Context, syncCtx fa if errors.IsNotFound(err) { _, err := c.clusterSetClient.ManagedClusterSets().Create(ctx, GlobalManagedClusterSet, metav1.CreateOptions{}) if err == nil { - c.eventRecorder.Eventf("GlobalManagedClusterSetCreated", "Set the GlobalManagedClusterSet name to %+v. spec to %+v", GlobalManagedClusterSetName, GlobalManagedClusterSet.Spec) + c.eventRecorder.Eventf("GlobalManagedClusterSetCreated", + "Set the GlobalManagedClusterSet name to %+v. spec to %+v", GlobalManagedClusterSetName, GlobalManagedClusterSet.Spec) } return err } diff --git a/pkg/registration/spoke/managedcluster/claim_reconcile.go b/pkg/registration/spoke/managedcluster/claim_reconcile.go index 2e86bef1e..c6ad756b8 100644 --- a/pkg/registration/spoke/managedcluster/claim_reconcile.go +++ b/pkg/registration/spoke/managedcluster/claim_reconcile.go @@ -81,7 +81,8 @@ func (r *claimReconcile) exposeClaims(ctx context.Context, cluster *clusterv1.Ma // truncate custom claims if the number exceeds `max-custom-cluster-claims` if n := len(customClaims); n > r.maxCustomClusterClaims { customClaims = customClaims[:r.maxCustomClusterClaims] - r.recorder.Eventf("CustomClusterClaimsTruncated", "%d cluster claims are found. It exceeds the max number of custom cluster claims (%d). %d custom cluster claims are not exposed.", + r.recorder.Eventf("CustomClusterClaimsTruncated", + "%d cluster claims are found. It exceeds the max number of custom cluster claims (%d). %d custom cluster claims are not exposed.", n, r.maxCustomClusterClaims, n-r.maxCustomClusterClaims) } diff --git a/pkg/registration/spoke/registration/creating_controller.go b/pkg/registration/spoke/registration/creating_controller.go index b23b45c1f..4199de2f4 100644 --- a/pkg/registration/spoke/registration/creating_controller.go +++ b/pkg/registration/spoke/registration/creating_controller.go @@ -119,7 +119,8 @@ func (c *managedClusterCreatingController) sync(ctx context.Context, syncCtx fac clusterCopy := existingCluster.DeepCopy() clusterCopy.Spec.ManagedClusterClientConfigs = managedClusterClientConfigs _, err = c.hubClusterClient.ClusterV1().ManagedClusters().Update(ctx, clusterCopy, metav1.UpdateOptions{}) - // ManagedClusterClientConfigs in ManagedCluster is only allowed updated during bootstrap. After bootstrap secret expired, an unauthorized error will be got, skip it + // ManagedClusterClientConfigs in ManagedCluster is only allowed updated during bootstrap. + // After bootstrap secret expired, an unauthorized error will be got, skip it if skipUnauthorizedError(err) != nil { return fmt.Errorf("unable to update ManagedClusterClientConfigs of managed cluster %q in hub: %w", c.clusterName, err) } diff --git a/pkg/registration/spoke/registration/registration.go b/pkg/registration/spoke/registration/registration.go index 8ddc63134..2dcca692d 100644 --- a/pkg/registration/spoke/registration/registration.go +++ b/pkg/registration/spoke/registration/registration.go @@ -145,7 +145,8 @@ func GenerateBootstrapStatusUpdater() clientcert.StatusUpdateFunc { } // GenerateStatusUpdater generates status update func for the certificate management -func GenerateStatusUpdater(hubClusterClient clientset.Interface, hubClusterLister clusterv1listers.ManagedClusterLister, clusterName string) clientcert.StatusUpdateFunc { +func GenerateStatusUpdater(hubClusterClient clientset.Interface, + hubClusterLister clusterv1listers.ManagedClusterLister, clusterName string) clientcert.StatusUpdateFunc { return func(ctx context.Context, cond metav1.Condition) error { cluster, err := hubClusterLister.Get(clusterName) if errors.IsNotFound(err) { diff --git a/pkg/registration/spoke/spokeagent.go b/pkg/registration/spoke/spokeagent.go index feeecc7a8..9a260850f 100644 --- a/pkg/registration/spoke/spokeagent.go +++ b/pkg/registration/spoke/spokeagent.go @@ -168,7 +168,8 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context, } // create a shared informer factory with specific namespace for the management cluster. - namespacedManagementKubeInformerFactory := informers.NewSharedInformerFactoryWithOptions(managementKubeClient, 10*time.Minute, informers.WithNamespace(o.ComponentNamespace)) + namespacedManagementKubeInformerFactory := informers.NewSharedInformerFactoryWithOptions( + managementKubeClient, 10*time.Minute, informers.WithNamespace(o.ComponentNamespace)) // load bootstrap client config and create bootstrap clients bootstrapClientConfig, err := clientcmd.BuildConfigFromFlags("", o.BootstrapKubeconfig) @@ -218,7 +219,8 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context, // create a ClientCertForHubController for spoke agent bootstrap // the bootstrap informers are supposed to be terminated after completing the bootstrap process. bootstrapInformerFactory := informers.NewSharedInformerFactory(bootstrapKubeClient, 10*time.Minute) - bootstrapNamespacedManagementKubeInformerFactory := informers.NewSharedInformerFactoryWithOptions(managementKubeClient, 10*time.Minute, informers.WithNamespace(o.ComponentNamespace)) + bootstrapNamespacedManagementKubeInformerFactory := informers.NewSharedInformerFactoryWithOptions( + managementKubeClient, 10*time.Minute, informers.WithNamespace(o.ComponentNamespace)) // create a kubeconfig with references to the key/cert files in the same secret kubeconfig := clientcert.BuildKubeconfig(bootstrapClientConfig, clientcert.TLSCertFile, clientcert.TLSKeyFile) @@ -428,7 +430,8 @@ func (o *SpokeAgentOptions) AddFlags(fs *pflag.FlagSet) { fs.IntVar(&o.MaxCustomClusterClaims, "max-custom-cluster-claims", o.MaxCustomClusterClaims, "The max number of custom cluster claims to expose.") fs.Int32Var(&o.ClientCertExpirationSeconds, "client-cert-expiration-seconds", o.ClientCertExpirationSeconds, - "The requested duration in seconds of validity of the issued client certificate. If this is not set, the value of --cluster-signing-duration command-line flag of the kube-controller-manager will be used.") + "The requested duration in seconds of validity of the issued client certificate. If this is not set, "+ + "the value of --cluster-signing-duration command-line flag of the kube-controller-manager will be used.") } // Validate verifies the inputs. diff --git a/pkg/registration/webhook/option.go b/pkg/registration/webhook/option.go index f8dbd282e..82ab745ae 100644 --- a/pkg/registration/webhook/option.go +++ b/pkg/registration/webhook/option.go @@ -19,5 +19,6 @@ func (c *Options) AddFlags(fs *pflag.FlagSet) { fs.IntVar(&c.Port, "port", c.Port, "Port is the port that the webhook server serves at.") fs.StringVar(&c.CertDir, "certdir", c.CertDir, - "CertDir is the directory that contains the server key and certificate. If not set, webhook server would look up the server key and certificate in {TempDir}/k8s-webhook-server/serving-certs") + "CertDir is the directory that contains the server key and certificate. If not set, "+ + "webhook server would look up the server key and certificate in {TempDir}/k8s-webhook-server/serving-certs") } diff --git a/pkg/registration/webhook/start.go b/pkg/registration/webhook/start.go index 833835469..d3606d350 100644 --- a/pkg/registration/webhook/start.go +++ b/pkg/registration/webhook/start.go @@ -4,9 +4,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - // to ensure that exec-entrypoint and run can make use of them. - _ "k8s.io/client-go/plugin/pkg/client/auth" + _ "k8s.io/client-go/plugin/pkg/client/auth" // Import all auth plugins (e.g. Azure, GCP, OIDC, etc.) to ensure exec-entrypoint and run can make use of them. "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" diff --git a/pkg/registration/webhook/v1/managedcluster_mutating.go b/pkg/registration/webhook/v1/managedcluster_mutating.go index 41ccfe190..4eff161a9 100644 --- a/pkg/registration/webhook/v1/managedcluster_mutating.go +++ b/pkg/registration/webhook/v1/managedcluster_mutating.go @@ -110,7 +110,8 @@ func (r *ManagedClusterWebhook) processTaints(managedCluster, oldManagedCluster return apierrors.NewBadRequest(fmt.Sprintf("It is not allowed to set TimeAdded of Taint %q.", strings.Join(invalidTaints, ","))) } -// addDefaultClusterSetLabel add label "cluster.open-cluster-management.io/clusterset:default" for ManagedCluster if the managedCluster has no ManagedClusterSet label +// addDefaultClusterSetLabel add label "cluster.open-cluster-management.io/clusterset:default" for +// ManagedCluster if the managedCluster has no ManagedClusterSet label func (a *ManagedClusterWebhook) addDefaultClusterSetLabel(managedCluster *clusterv1.ManagedCluster) { if len(managedCluster.Labels) == 0 { managedCluster.Labels = map[string]string{ diff --git a/pkg/registration/webhook/v1beta2/managedclusterset_conversion.go b/pkg/registration/webhook/v1beta2/managedclusterset_conversion.go index 986fe3cc5..5cfbcfcad 100644 --- a/pkg/registration/webhook/v1beta2/managedclusterset_conversion.go +++ b/pkg/registration/webhook/v1beta2/managedclusterset_conversion.go @@ -21,7 +21,7 @@ func (src *ManagedClusterSet) ConvertTo(dstRaw conversion.Hub) error { dst.ObjectMeta = src.ObjectMeta if len(src.Spec.ClusterSelector.SelectorType) == 0 || src.Spec.ClusterSelector.SelectorType == v1beta2.ExclusiveClusterSetLabel { - dst.Spec.ClusterSelector.SelectorType = v1beta1.SelectorType(v1beta1.LegacyClusterSetLabel) + dst.Spec.ClusterSelector.SelectorType = v1beta1.LegacyClusterSetLabel } else { dst.Spec.ClusterSelector.SelectorType = v1beta1.SelectorType(src.Spec.ClusterSelector.SelectorType) dst.Spec.ClusterSelector.LabelSelector = src.Spec.ClusterSelector.LabelSelector diff --git a/pkg/work/helper/helpers.go b/pkg/work/helper/helpers.go index 5faac4454..8cfd703d9 100644 --- a/pkg/work/helper/helpers.go +++ b/pkg/work/helper/helpers.go @@ -432,7 +432,8 @@ func FindManifestConiguration(resourceMeta workapiv1.ManifestResourceMeta, manif return nil } -func ApplyOwnerReferences(ctx context.Context, dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, existing runtime.Object, requiredOwner metav1.OwnerReference) error { +func ApplyOwnerReferences(ctx context.Context, dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, + existing runtime.Object, requiredOwner metav1.OwnerReference) error { accessor, err := meta.Accessor(existing) if err != nil { return fmt.Errorf("type %t cannot be accessed: %v", existing, err) @@ -557,7 +558,8 @@ func (pdl PlacementDecisionGetter) List(selector labels.Selector, namespace stri } // Get added and deleted clusters names -func GetClusters(client clusterlister.PlacementDecisionLister, placement *clusterv1beta1.Placement, existingClusters sets.Set[string]) (sets.Set[string], sets.Set[string], error) { +func GetClusters(client clusterlister.PlacementDecisionLister, placement *clusterv1beta1.Placement, + existingClusters sets.Set[string]) (sets.Set[string], sets.Set[string], error) { pdtracker := clusterv1beta1.NewPlacementDecisionClustersTracker(placement, PlacementDecisionGetter{Client: client}, existingClusters) return pdtracker.Get() diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_reconcile.go index 3554b19b3..c9de16888 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_reconcile.go @@ -14,7 +14,8 @@ type addFinalizerReconciler struct { workClient workclientset.Interface } -func (a *addFinalizerReconciler) reconcile(ctx context.Context, pw *workapiv1alpha1.ManifestWorkReplicaSet) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) { +func (a *addFinalizerReconciler) reconcile(ctx context.Context, pw *workapiv1alpha1.ManifestWorkReplicaSet, +) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) { // Do not need to add finalizer if it is in delete state already. if !pw.DeletionTimestamp.IsZero() { return pw, reconcileStop, nil diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go index 47e918a67..2b15c6d61 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go @@ -201,11 +201,13 @@ func (m *ManifestWorkReplicaSetController) patchPlaceManifestStatus(ctx context. return fmt.Errorf("failed to create patch for work %s: %w", old.Name, err) } - _, err = m.workClient.WorkV1alpha1().ManifestWorkReplicaSets(old.Namespace).Patch(ctx, old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + _, err = m.workClient.WorkV1alpha1().ManifestWorkReplicaSets(old.Namespace).Patch(ctx, + old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") return err } -func listManifestWorksByManifestWorkReplicaSet(mwrs *workapiv1alpha1.ManifestWorkReplicaSet, manifestWorkLister worklisterv1.ManifestWorkLister) ([]*workapiv1.ManifestWork, error) { +func listManifestWorksByManifestWorkReplicaSet(mwrs *workapiv1alpha1.ManifestWorkReplicaSet, + manifestWorkLister worklisterv1.ManifestWorkLister) ([]*workapiv1.ManifestWork, error) { req, err := labels.NewRequirement(ManifestWorkReplicaSetControllerNameLabelKey, selection.Equals, []string{manifestWorkReplicaSetKey(mwrs)}) if err != nil { return nil, err diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go index 5f3750e7e..24b294091 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go @@ -28,7 +28,8 @@ type deployReconciler struct { placementLister clusterlister.PlacementLister } -func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha1.ManifestWorkReplicaSet) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) { +func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, +) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) { // Manifestwork create/update/delete logic. var placements []*clusterv1beta1.Placement for _, placementRef := range mwrSet.Spec.PlacementRefs { diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go index 191a5201e..67c99f740 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go @@ -21,7 +21,8 @@ type finalizeReconciler struct { manifestWorkLister worklisterv1.ManifestWorkLister } -func (f *finalizeReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha1.ManifestWorkReplicaSet) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) { +func (f *finalizeReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, +) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) { if mwrSet.DeletionTimestamp.IsZero() { return mwrSet, reconcileContinue, nil } diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go index 22d2cbcb8..a54eb8a81 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go @@ -15,7 +15,8 @@ type statusReconciler struct { manifestWorkLister worklisterv1.ManifestWorkLister } -func (d *statusReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha1.ManifestWorkReplicaSet) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) { +func (d *statusReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, +) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) { // The logic for update manifestWorkReplicaSet status if mwrSet.Status.Summary.Total == 0 { condition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified) diff --git a/pkg/work/hub/test/helper.go b/pkg/work/hub/test/helper.go index 5f9bef049..a66e9cd56 100644 --- a/pkg/work/hub/test/helper.go +++ b/pkg/work/hub/test/helper.go @@ -34,9 +34,7 @@ func CreateTestPlacement(name string, ns string, clusters ...string) (*clusterv1 namereq := metav1.LabelSelectorRequirement{} namereq.Key = "name" namereq.Operator = metav1.LabelSelectorOpIn - for _, cls := range clusters { - namereq.Values = append(namereq.Values, cls) - } + namereq.Values = append(namereq.Values, clusters...) labelSelector := &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{namereq}, diff --git a/pkg/work/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller.go b/pkg/work/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller.go index 718104c64..7bfe084d0 100644 --- a/pkg/work/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller.go +++ b/pkg/work/spoke/controllers/appliedmanifestcontroller/appliedmanifestwork_controller.go @@ -28,7 +28,7 @@ import ( ) // AppliedManifestWorkController is to sync the applied resources of appliedmanifestwork with related -// manifestwork and delete any resouce which is no longer maintained by the manifestwork +// manifestwork and delete any resource which is no longer maintained by the manifestwork type AppliedManifestWorkController struct { manifestWorkClient workv1client.ManifestWorkInterface manifestWorkLister worklister.ManifestWorkNamespaceLister @@ -121,7 +121,11 @@ func (m *AppliedManifestWorkController) syncManifestWork( var appliedResources []workapiv1.AppliedManifestResourceMeta var errs []error for _, resourceStatus := range manifestWork.Status.ResourceStatus.Manifests { - gvr := schema.GroupVersionResource{Group: resourceStatus.ResourceMeta.Group, Version: resourceStatus.ResourceMeta.Version, Resource: resourceStatus.ResourceMeta.Resource} + gvr := schema.GroupVersionResource{ + Group: resourceStatus.ResourceMeta.Group, + Version: resourceStatus.ResourceMeta.Version, + Resource: resourceStatus.ResourceMeta.Resource, + } if len(gvr.Resource) == 0 || len(gvr.Version) == 0 || len(resourceStatus.ResourceMeta.Name) == 0 { continue } diff --git a/pkg/work/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go b/pkg/work/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go index b84615021..2a8d55ac5 100644 --- a/pkg/work/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go +++ b/pkg/work/spoke/controllers/finalizercontroller/appliedmanifestwork_finalize_controller.go @@ -75,7 +75,8 @@ func (m *AppliedManifestWorkFinalizeController) sync(ctx context.Context, contro // syncAppliedManifestWork ensures that when a appliedmanifestwork has been deleted, everything it created is also deleted. // Foreground deletion is implemented, which means all resources created will be deleted and finalized // before removing finalizer from appliedmanifestwork -func (m *AppliedManifestWorkFinalizeController) syncAppliedManifestWork(ctx context.Context, controllerContext factory.SyncContext, originalManifestWork *workapiv1.AppliedManifestWork) error { +func (m *AppliedManifestWorkFinalizeController) syncAppliedManifestWork(ctx context.Context, + controllerContext factory.SyncContext, originalManifestWork *workapiv1.AppliedManifestWork) error { appliedManifestWork := originalManifestWork.DeepCopy() // no work to do until we're deleted diff --git a/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller.go b/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller.go index bfa5594c8..3bc5224fe 100644 --- a/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller.go +++ b/pkg/work/spoke/controllers/manifestcontroller/manifestwork_controller.go @@ -282,7 +282,7 @@ func (m *ManifestWorkController) applyManifests( // Apply if there is no result. existingResults[index] = m.applyOneManifest(ctx, index, manifest, workSpec, recorder, owner) case apierrors.IsConflict(existingResults[index].Error): - // Apply if there is a resource confilct error. + // Apply if there is a resource conflict error. existingResults[index] = m.applyOneManifest(ctx, index, manifest, workSpec, recorder, owner) } } @@ -364,10 +364,12 @@ func manageOwnerRef( // Rules to generate work status conditions from manifest conditions // #1: Applied - work status condition (with type Applied) is applied if all manifest conditions (with type Applied) are applied // TODO: add rules for other condition types, like Progressing, Available, Degraded -func (m *ManifestWorkController) generateUpdateStatusFunc(generation int64, newManifestConditions []workapiv1.ManifestCondition) helper.UpdateManifestWorkStatusFunc { +func (m *ManifestWorkController) generateUpdateStatusFunc(generation int64, + newManifestConditions []workapiv1.ManifestCondition) helper.UpdateManifestWorkStatusFunc { return func(oldStatus *workapiv1.ManifestWorkStatus) error { // merge the new manifest conditions with the existing manifest conditions - oldStatus.ResourceStatus.Manifests = helper.MergeManifestConditions(oldStatus.ResourceStatus.Manifests, newManifestConditions) + oldStatus.ResourceStatus.Manifests = helper.MergeManifestConditions( + oldStatus.ResourceStatus.Manifests, newManifestConditions) // aggregate manifest condition to generate work condition newConditions := []metav1.Condition{} diff --git a/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller.go b/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller.go index fc0acd3a2..ec26c5291 100644 --- a/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller.go +++ b/pkg/work/spoke/controllers/statuscontroller/availablestatus_controller.go @@ -169,7 +169,7 @@ func aggregateManifestConditions(generation int64, manifests []workapiv1.Manifes switch { case unavailable > 0: return metav1.Condition{ - Type: string(workapiv1.WorkAvailable), + Type: workapiv1.WorkAvailable, Status: metav1.ConditionFalse, Reason: "ResourcesNotAvailable", ObservedGeneration: generation, @@ -177,7 +177,7 @@ func aggregateManifestConditions(generation int64, manifests []workapiv1.Manifes } case unknown > 0: return metav1.Condition{ - Type: string(workapiv1.WorkAvailable), + Type: workapiv1.WorkAvailable, Status: metav1.ConditionUnknown, Reason: "ResourcesStatusUnknown", ObservedGeneration: generation, @@ -185,7 +185,7 @@ func aggregateManifestConditions(generation int64, manifests []workapiv1.Manifes } case available == 0: return metav1.Condition{ - Type: string(workapiv1.WorkAvailable), + Type: workapiv1.WorkAvailable, Status: metav1.ConditionUnknown, Reason: "ResourcesStatusUnknown", ObservedGeneration: generation, @@ -193,7 +193,7 @@ func aggregateManifestConditions(generation int64, manifests []workapiv1.Manifes } default: return metav1.Condition{ - Type: string(workapiv1.WorkAvailable), + Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "ResourcesAvailable", ObservedGeneration: generation, @@ -203,7 +203,8 @@ func aggregateManifestConditions(generation int64, manifests []workapiv1.Manifes } func (c *AvailableStatusController) getFeedbackValues( - resourceMeta workapiv1.ManifestResourceMeta, obj *unstructured.Unstructured, manifestOptions []workapiv1.ManifestConfigOption) ([]workapiv1.FeedbackValue, metav1.Condition) { + resourceMeta workapiv1.ManifestResourceMeta, obj *unstructured.Unstructured, + manifestOptions []workapiv1.ManifestConfigOption) ([]workapiv1.FeedbackValue, metav1.Condition) { errs := []error{} values := []workapiv1.FeedbackValue{} @@ -246,7 +247,8 @@ func (c *AvailableStatusController) getFeedbackValues( } // buildAvailableStatusCondition returns a StatusCondition with type Available for a given manifest resource -func buildAvailableStatusCondition(resourceMeta workapiv1.ManifestResourceMeta, dynamicClient dynamic.Interface) (*unstructured.Unstructured, metav1.Condition, error) { +func buildAvailableStatusCondition(resourceMeta workapiv1.ManifestResourceMeta, + dynamicClient dynamic.Interface) (*unstructured.Unstructured, metav1.Condition, error) { conditionType := string(workapiv1.ManifestAvailable) if len(resourceMeta.Resource) == 0 || len(resourceMeta.Version) == 0 || len(resourceMeta.Name) == 0 { diff --git a/pkg/work/spoke/spokeagent.go b/pkg/work/spoke/spokeagent.go index e78342e64..7840195fe 100644 --- a/pkg/work/spoke/spokeagent.go +++ b/pkg/work/spoke/spokeagent.go @@ -29,7 +29,7 @@ import ( const ( // If a controller queue size is too large (>500), the processing speed of the controller will drop significantly - // with one worker, increasing the work numbers can imporve the processing speed. + // with one worker, increasing the work numbers can improve the processing speed. // We compared the two situations where the worker is set to 1 and 10, when the worker is 10, the resource // utilization of the kubeapi-server and work agent do not increase significantly. // @@ -66,7 +66,8 @@ func (o *WorkloadAgentOptions) AddFlags(cmd *cobra.Command) { flags.StringVar(&o.HubKubeconfigFile, "hub-kubeconfig", o.HubKubeconfigFile, "Location of kubeconfig file to connect to hub cluster.") flags.StringVar(&o.AgentID, "agent-id", o.AgentID, "ID of the work agent to identify the work this agent should handle after restart/recovery.") flags.DurationVar(&o.StatusSyncInterval, "status-sync-interval", o.StatusSyncInterval, "Interval to sync resource status to hub.") - flags.DurationVar(&o.AppliedManifestWorkEvictionGracePeriod, "appliedmanifestwork-eviction-grace-period", o.AppliedManifestWorkEvictionGracePeriod, "Grace period for appliedmanifestwork eviction") + flags.DurationVar(&o.AppliedManifestWorkEvictionGracePeriod, "appliedmanifestwork-eviction-grace-period", + o.AppliedManifestWorkEvictionGracePeriod, "Grace period for appliedmanifestwork eviction") } // RunWorkloadAgent starts the controllers on agent to process work from hub. @@ -88,7 +89,8 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC return err } // Only watch the cluster namespace on hub - workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(hubWorkClient, 5*time.Minute, workinformers.WithNamespace(o.AgentOptions.SpokeClusterName)) + workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(hubWorkClient, 5*time.Minute, + workinformers.WithNamespace(o.AgentOptions.SpokeClusterName)) // load spoke client config and create spoke clients, // the work agent may not running in the spoke/managed cluster. diff --git a/pkg/work/webhook/option.go b/pkg/work/webhook/option.go index ea46acc08..ae1090a45 100644 --- a/pkg/work/webhook/option.go +++ b/pkg/work/webhook/option.go @@ -21,7 +21,8 @@ func (c *Options) AddFlags(fs *pflag.FlagSet) { fs.IntVar(&c.Port, "port", c.Port, "Port is the port that the webhook server serves at.") fs.StringVar(&c.CertDir, "certdir", c.CertDir, - "CertDir is the directory that contains the server key and certificate. If not set, webhook server would look up the server key and certificate in {TempDir}/k8s-webhook-server/serving-certs") + "CertDir is the directory that contains the server key and certificate. If not set, "+ + "webhook server would look up the server key and certificate in {TempDir}/k8s-webhook-server/serving-certs") fs.IntVar(&c.ManifestLimit, "manifestLimit", c.ManifestLimit, "ManifestLimit is the max size of manifests in a manifestWork. If not set, the default is 500k.") } diff --git a/pkg/work/webhook/v1alpha1/manifestworkreplicaset_validating.go b/pkg/work/webhook/v1alpha1/manifestworkreplicaset_validating.go index b2d0dfa6d..9bb170787 100644 --- a/pkg/work/webhook/v1alpha1/manifestworkreplicaset_validating.go +++ b/pkg/work/webhook/v1alpha1/manifestworkreplicaset_validating.go @@ -54,7 +54,9 @@ func (r *ManifestWorkReplicaSetWebhook) ValidateDelete(_ context.Context, obj ru return nil, nil } -func (r *ManifestWorkReplicaSetWebhook) validateRequest(newmwrSet *workv1alpha1.ManifestWorkReplicaSet, oldmwrSet *workv1alpha1.ManifestWorkReplicaSet, ctx context.Context) error { +func (r *ManifestWorkReplicaSetWebhook) validateRequest( + newmwrSet *workv1alpha1.ManifestWorkReplicaSet, oldmwrSet *workv1alpha1.ManifestWorkReplicaSet, + ctx context.Context) error { if err := checkFeatureEnabled(); err != nil { return err } diff --git a/test/e2e/common.go b/test/e2e/common.go index 330fcb897..56ed542b4 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -599,11 +599,13 @@ func (t *Tester) CheckHubReady() error { var hubWorkControllerEnabled, addonManagerControllerEnabled bool if cm.Spec.WorkConfiguration != nil { - hubWorkControllerEnabled = helpers.FeatureGateEnabled(cm.Spec.WorkConfiguration.FeatureGates, ocmfeature.DefaultHubWorkFeatureGates, ocmfeature.ManifestWorkReplicaSet) + hubWorkControllerEnabled = helpers.FeatureGateEnabled(cm.Spec.WorkConfiguration.FeatureGates, + ocmfeature.DefaultHubWorkFeatureGates, ocmfeature.ManifestWorkReplicaSet) } if cm.Spec.AddOnManagerConfiguration != nil { - addonManagerControllerEnabled = helpers.FeatureGateEnabled(cm.Spec.AddOnManagerConfiguration.FeatureGates, ocmfeature.DefaultHubAddonManagerFeatureGates, ocmfeature.AddonManagement) + addonManagerControllerEnabled = helpers.FeatureGateEnabled(cm.Spec.AddOnManagerConfiguration.FeatureGates, + ocmfeature.DefaultHubAddonManagerFeatureGates, ocmfeature.AddonManagement) } if hubWorkControllerEnabled { @@ -1072,7 +1074,7 @@ func (t *Tester) DeleteManageClusterAndRelatedNamespace(clusterName string) erro return fmt.Errorf("delete managed cluster %q failed: %v", clusterName, err) } - // delete namespace created by hub automaticly + // delete namespace created by hub automatically if err := wait.Poll(1*time.Second, 5*time.Second, func() (bool, error) { err := t.HubKubeClient.CoreV1().Namespaces().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) // some managed cluster just created, but the csr is not approved, diff --git a/test/integration/util/assertion.go b/test/integration/util/assertion.go index 5bdf1c6c2..dd2f65678 100644 --- a/test/integration/util/assertion.go +++ b/test/integration/util/assertion.go @@ -103,7 +103,9 @@ func AssertWorkGeneration(namespace, name string, workClient workclientset.Inter } // AssertWorkDeleted check if work is deleted -func AssertWorkDeleted(namespace, name, hubhash string, manifests []workapiv1.Manifest, workClient workclientset.Interface, kubeClient kubernetes.Interface, eventuallyTimeout, eventuallyInterval int) { +func AssertWorkDeleted(namespace, name, hubhash string, manifests []workapiv1.Manifest, + workClient workclientset.Interface, kubeClient kubernetes.Interface, + eventuallyTimeout, eventuallyInterval int) { // wait for deletion of manifest work gomega.Eventually(func() error { _, err := workClient.WorkV1().ManifestWorks(namespace).Get(context.Background(), name, metav1.GetOptions{}) @@ -193,7 +195,8 @@ func AssertNonexistenceOfConfigMaps(manifests []workapiv1.Manifest, kubeClient k } // AssertExistenceOfResources check the existence of resource with GVR, namespace and name -func AssertExistenceOfResources(gvrs []schema.GroupVersionResource, namespaces, names []string, dynamicClient dynamic.Interface, eventuallyTimeout, eventuallyInterval int) { +func AssertExistenceOfResources(gvrs []schema.GroupVersionResource, namespaces, names []string, + dynamicClient dynamic.Interface, eventuallyTimeout, eventuallyInterval int) { gomega.Expect(gvrs).To(gomega.HaveLen(len(namespaces))) gomega.Expect(gvrs).To(gomega.HaveLen(len(names))) @@ -210,7 +213,8 @@ func AssertExistenceOfResources(gvrs []schema.GroupVersionResource, namespaces, } // AssertNonexistenceOfResources check if resource with GVR, namespace and name does not exists -func AssertNonexistenceOfResources(gvrs []schema.GroupVersionResource, namespaces, names []string, dynamicClient dynamic.Interface, eventuallyTimeout, eventuallyInterval int) { +func AssertNonexistenceOfResources(gvrs []schema.GroupVersionResource, namespaces, names []string, + dynamicClient dynamic.Interface, eventuallyTimeout, eventuallyInterval int) { gomega.Expect(gvrs).To(gomega.HaveLen(len(namespaces))) gomega.Expect(gvrs).To(gomega.HaveLen(len(names))) @@ -227,7 +231,8 @@ func AssertNonexistenceOfResources(gvrs []schema.GroupVersionResource, namespace } // AssertAppliedResources check if applied resources in work status are updated correctly -func AssertAppliedResources(hubHash, workName string, gvrs []schema.GroupVersionResource, namespaces, names []string, workClient workclientset.Interface, eventuallyTimeout, eventuallyInterval int) { +func AssertAppliedResources(hubHash, workName string, gvrs []schema.GroupVersionResource, namespaces, names []string, + workClient workclientset.Interface, eventuallyTimeout, eventuallyInterval int) { gomega.Expect(gvrs).To(gomega.HaveLen(len(namespaces))) gomega.Expect(gvrs).To(gomega.HaveLen(len(names))) diff --git a/test/integration/util/authentication.go b/test/integration/util/authentication.go index 664b377e7..11759e002 100644 --- a/test/integration/util/authentication.go +++ b/test/integration/util/authentication.go @@ -122,7 +122,7 @@ func (t *TestAuthn) Start() error { if err := pem.Encode(&caCertBuffer, &pem.Block{Type: certutil.CertificateBlockType, Bytes: caDERBytes}); err != nil { return err } - if err := os.WriteFile(t.caFile, caCertBuffer.Bytes(), 0644); err != nil { + if err := os.WriteFile(t.caFile, caCertBuffer.Bytes(), 0600); err != nil { return err } @@ -131,7 +131,7 @@ func (t *TestAuthn) Start() error { &caKeyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(caKey)}); err != nil { return err } - if err := os.WriteFile(t.caKeyFile, caKeyBuffer.Bytes(), 0644); err != nil { + if err := os.WriteFile(t.caKeyFile, caKeyBuffer.Bytes(), 0600); err != nil { return err } @@ -181,14 +181,15 @@ func (t *TestAuthn) CreateBootstrapKubeConfig(configFileName, serverCertFile, se } } - if err := os.WriteFile(path.Join(configDir, "bootstrap.crt"), certData, 0644); err != nil { + if err := os.WriteFile(path.Join(configDir, "bootstrap.crt"), certData, 0600); err != nil { return err } - if err := os.WriteFile(path.Join(configDir, "bootstrap.key"), keyData, 0644); err != nil { + if err := os.WriteFile(path.Join(configDir, "bootstrap.key"), keyData, 0600); err != nil { return err } - config, err := createKubeConfigByClientCert(configFileName, securePort, serverCertFile, path.Join(configDir, "bootstrap.crt"), path.Join(configDir, "bootstrap.key")) + config, err := createKubeConfigByClientCert(configFileName, securePort, serverCertFile, + path.Join(configDir, "bootstrap.crt"), path.Join(configDir, "bootstrap.key")) if err != nil { return err } @@ -336,6 +337,7 @@ func FindAddOnCSRs(kubeClient kubernetes.Interface, spokeClusterName, addOnName csrs := []*certificates.CertificateSigningRequest{} for _, csr := range csrList.Items { + csr := csr csrs = append(csrs, &csr) } @@ -436,7 +438,8 @@ func (t *TestAuthn) ApproveCSR(kubeClient kubernetes.Interface, csr *certificate return err } -func (t *TestAuthn) FillCertificateToApprovedCSR(kubeClient kubernetes.Interface, csr *certificates.CertificateSigningRequest, notBefore, notAfter time.Time) error { +func (t *TestAuthn) FillCertificateToApprovedCSR(kubeClient kubernetes.Interface, + csr *certificates.CertificateSigningRequest, notBefore, notAfter time.Time) error { block, _ := pem.Decode(csr.Spec.Request) cr, err := x509.ParseCertificateRequest(block.Bytes) if err != nil { diff --git a/test/integration/util/recorder.go b/test/integration/util/recorder.go index 59053c485..82968c5b8 100644 --- a/test/integration/util/recorder.go +++ b/test/integration/util/recorder.go @@ -9,8 +9,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func NewIntegrationTestEventRecorder(componet string) events.Recorder { - return &IntegrationTestEventRecorder{component: componet} +func NewIntegrationTestEventRecorder(component string) events.Recorder { + return &IntegrationTestEventRecorder{component: component} } type IntegrationTestEventRecorder struct { @@ -58,12 +58,11 @@ func HasCondition( expectedType, expectedReason string, expectedStatus metav1.ConditionStatus, ) bool { - found := false + for _, condition := range conditions { if condition.Type != expectedType { continue } - found = true if condition.Status != expectedStatus { return false @@ -81,5 +80,5 @@ func HasCondition( return true } - return found + return false }