feat: use klog as the default logging library (#1008)

This commit is contained in:
Evans Mungai
2023-02-24 18:24:51 +00:00
committed by GitHub
parent 299258ef87
commit 546ffde14b
28 changed files with 144 additions and 113 deletions

View File

@@ -213,9 +213,9 @@ scan:
./
.PHONY: lint
lint:
lint: fmt vet
golangci-lint run --new -c .golangci.yaml ${BUILDPATHS}
.PHONY: lint-and-fix
lint-and-fix:
lint-and-fix: fmt vet
golangci-lint run --new --fix -c .golangci.yaml ${BUILDPATHS}

View File

@@ -4,7 +4,6 @@ import (
"os"
"strings"
"github.com/go-logr/logr"
"github.com/replicatedhq/troubleshoot/cmd/util"
"github.com/replicatedhq/troubleshoot/pkg/k8sutil"
"github.com/replicatedhq/troubleshoot/pkg/logger"
@@ -24,13 +23,10 @@ func RootCmd() *cobra.Command {
v := viper.GetViper()
v.BindPFlags(cmd.Flags())
if !v.GetBool("debug") {
klog.SetLogger(logr.Discard())
}
logger.SetQuiet(v.GetBool("quiet"))
logger.SetupLogger(v)
if err := util.StartProfiling(); err != nil {
logger.Printf("Failed to start profiling: %v", err)
klog.Errorf("Failed to start profiling: %v", err)
}
},
RunE: func(cmd *cobra.Command, args []string) error {
@@ -40,7 +36,7 @@ func RootCmd() *cobra.Command {
},
PostRun: func(cmd *cobra.Command, args []string) {
if err := util.StopProfiling(); err != nil {
logger.Printf("Failed to stop profiling: %v", err)
klog.Errorf("Failed to stop profiling: %v", err)
}
},
}
@@ -54,6 +50,9 @@ func RootCmd() *cobra.Command {
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
// Initialize klog flags
logger.InitKlogFlags(cmd)
k8sutil.AddFlags(cmd.Flags())
// CPU and memory profiling flags

View File

@@ -4,7 +4,6 @@ import (
"os"
"strings"
"github.com/go-logr/logr"
"github.com/replicatedhq/troubleshoot/cmd/util"
"github.com/replicatedhq/troubleshoot/pkg/k8sutil"
"github.com/replicatedhq/troubleshoot/pkg/logger"
@@ -24,23 +23,20 @@ func RootCmd() *cobra.Command {
v := viper.GetViper()
v.BindPFlags(cmd.Flags())
if !v.GetBool("debug") {
klog.SetLogger(logr.Discard())
}
logger.SetupLogger(v)
if err := util.StartProfiling(); err != nil {
logger.Printf("Failed to start profiling: %v", err)
klog.Errorf("Failed to start profiling: %v", err)
}
},
RunE: func(cmd *cobra.Command, args []string) error {
v := viper.GetViper()
logger.SetQuiet(v.GetBool("quiet"))
return runCollect(v, args[0])
},
PostRun: func(cmd *cobra.Command, args []string) {
if err := util.StopProfiling(); err != nil {
logger.Printf("Failed to stop profiling: %v", err)
klog.Errorf("Failed to stop profiling: %v", err)
}
},
}
@@ -68,6 +64,9 @@ func RootCmd() *cobra.Command {
k8sutil.AddFlags(cmd.Flags())
// Initialize klog flags
logger.InitKlogFlags(cmd)
// CPU and memory profiling flags
util.AddProfilingFlags(cmd)

View File

@@ -5,7 +5,6 @@ import (
"os"
"strings"
"github.com/go-logr/logr"
"github.com/replicatedhq/troubleshoot/cmd/util"
"github.com/replicatedhq/troubleshoot/internal/traces"
"github.com/replicatedhq/troubleshoot/pkg/k8sutil"
@@ -29,12 +28,10 @@ that a cluster meets the requirements to run an application.`,
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
v.BindPFlags(cmd.Flags())
if !v.GetBool("debug") {
klog.SetLogger(logr.Discard())
}
logger.SetupLogger(v)
if err := util.StartProfiling(); err != nil {
logger.Printf("Failed to start profiling: %v", err)
klog.Errorf("Failed to start profiling: %v", err)
}
},
RunE: func(cmd *cobra.Command, args []string) error {
@@ -42,20 +39,20 @@ that a cluster meets the requirements to run an application.`,
closer, err := traces.ConfigureTracing("preflight")
if err != nil {
// Do not fail running preflights if tracing fails
logger.Printf("Failed to initialize open tracing provider: %v", err)
klog.Errorf("Failed to initialize open tracing provider: %v", err)
} else {
defer closer()
}
err = preflight.RunPreflights(v.GetBool("interactive"), v.GetString("output"), v.GetString("format"), args)
if v.GetBool("debug") {
if v.GetBool("debug") || v.IsSet("v") {
fmt.Printf("\n%s", traces.GetExporterInstance().GetSummary())
}
return err
},
PostRun: func(cmd *cobra.Command, args []string) {
if err := util.StopProfiling(); err != nil {
logger.Printf("Failed to stop profiling: %v", err)
klog.Errorf("Failed to stop profiling: %v", err)
}
},
}
@@ -67,6 +64,9 @@ that a cluster meets the requirements to run an application.`,
k8sutil.AddFlags(cmd.Flags())
// Initialize klog flags
logger.InitKlogFlags(cmd)
// CPU and memory profiling flags
util.AddProfilingFlags(cmd)

View File

@@ -10,7 +10,6 @@ import (
"github.com/replicatedhq/troubleshoot/cmd/util"
analyzer "github.com/replicatedhq/troubleshoot/pkg/analyze"
"github.com/replicatedhq/troubleshoot/pkg/convert"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
@@ -28,8 +27,6 @@ func Analyze() *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
v := viper.GetViper()
logger.SetQuiet(v.GetBool("quiet"))
specPath := args[0]
analyzerSpec, err := downloadAnalyzerSpec(specPath)
if err != nil {

View File

@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
analyzer "github.com/replicatedhq/troubleshoot/pkg/analyze"
"github.com/replicatedhq/troubleshoot/pkg/collect"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/replicatedhq/troubleshoot/pkg/supportbundle"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@@ -35,8 +34,6 @@ For more information on redactors visit https://troubleshoot.sh/docs/redact/
RunE: func(cmd *cobra.Command, args []string) error {
v := viper.GetViper()
logger.SetQuiet(v.GetBool("quiet"))
// 1. Decode redactors from provided URLs
redactors, err := supportbundle.GetRedactorsFromURIs(args)
if err != nil {

View File

@@ -5,7 +5,6 @@ import (
"os"
"strings"
"github.com/go-logr/logr"
"github.com/replicatedhq/troubleshoot/cmd/util"
"github.com/replicatedhq/troubleshoot/internal/traces"
"github.com/replicatedhq/troubleshoot/pkg/k8sutil"
@@ -28,30 +27,25 @@ from a server that can be used to assist when troubleshooting a Kubernetes clust
v.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
v.BindPFlags(cmd.Flags())
logger.SetupLogger(v)
if err := util.StartProfiling(); err != nil {
logger.Printf("Failed to start profiling: %v", err)
klog.Errorf("Failed to start profiling: %v", err)
}
},
PreRun: func(cmd *cobra.Command, args []string) {
v := viper.GetViper()
if !v.GetBool("debug") {
klog.SetLogger(logr.Discard())
}
logger.SetQuiet(!v.GetBool("debug"))
},
RunE: func(cmd *cobra.Command, args []string) error {
v := viper.GetViper()
closer, err := traces.ConfigureTracing("support-bundle")
if err != nil {
// Do not fail running support-bundle if tracing fails
logger.Printf("Failed to initialize open tracing provider: %v", err)
klog.Errorf("Failed to initialize open tracing provider: %v", err)
} else {
defer closer()
}
err = runTroubleshoot(v, args)
if v.GetBool("debug") {
if v.GetBool("debug") || v.IsSet("v") {
fmt.Printf("\n%s", traces.GetExporterInstance().GetSummary())
}
@@ -59,7 +53,7 @@ from a server that can be used to assist when troubleshooting a Kubernetes clust
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
if err := util.StopProfiling(); err != nil {
logger.Printf("Failed to stop profiling: %v", err)
klog.Errorf("Failed to stop profiling: %v", err)
}
},
}
@@ -79,7 +73,7 @@ from a server that can be used to assist when troubleshooting a Kubernetes clust
cmd.Flags().String("since-time", "", "force pod logs collectors to return logs after a specific date (RFC3339)")
cmd.Flags().String("since", "", "force pod logs collectors to return logs newer than a relative duration like 5s, 2m, or 3h.")
cmd.Flags().StringP("output", "o", "", "specify the output file path for the support bundle")
cmd.Flags().Bool("debug", false, "enable debug logging")
cmd.Flags().Bool("debug", false, "enable debug logging. This is equivalent to --v=0")
// hidden in favor of the `insecure-skip-tls-verify` flag
cmd.Flags().Bool("allow-insecure-connections", false, "when set, do not verify TLS certs when retrieving spec and reporting results")
@@ -91,6 +85,9 @@ from a server that can be used to assist when troubleshooting a Kubernetes clust
k8sutil.AddFlags(cmd.Flags())
// Initialize klog flags
logger.InitKlogFlags(cmd)
// CPU and memory profiling flags
util.AddProfilingFlags(cmd)

View File

@@ -23,7 +23,6 @@ import (
"github.com/replicatedhq/troubleshoot/pkg/convert"
"github.com/replicatedhq/troubleshoot/pkg/httputil"
"github.com/replicatedhq/troubleshoot/pkg/k8sutil"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/replicatedhq/troubleshoot/pkg/specs"
"github.com/replicatedhq/troubleshoot/pkg/supportbundle"
"github.com/spf13/viper"
@@ -31,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
)
func runTroubleshoot(v *viper.Viper, arg []string) error {
@@ -134,13 +134,13 @@ func runTroubleshoot(v *viper.Viper, arg []string) error {
// Search cluster for Troubleshoot objects in cluster
bundlesFromSecrets, err := specs.LoadFromSecretMatchingLabel(client, parsedSelector.String(), namespace, specs.SupportBundleKey)
if err != nil {
logger.Printf("failed to load support bundle spec from secrets: %s", err)
klog.Errorf("failed to load support bundle spec from secrets: %s", err)
}
bundlesFromCluster = append(bundlesFromCluster, bundlesFromSecrets...)
bundlesFromConfigMaps, err := specs.LoadFromConfigMapMatchingLabel(client, parsedSelector.String(), namespace, specs.SupportBundleKey)
if err != nil {
logger.Printf("failed to load support bundle spec from secrets: %s", err)
klog.Errorf("failed to load support bundle spec from secrets: %s", err)
}
bundlesFromCluster = append(bundlesFromCluster, bundlesFromConfigMaps...)
@@ -148,7 +148,7 @@ func runTroubleshoot(v *viper.Viper, arg []string) error {
multidocs := strings.Split(string(bundle), "\n---\n")
parsedBundleFromSecret, err := supportbundle.ParseSupportBundleFromDoc([]byte(multidocs[0]))
if err != nil {
logger.Printf("failed to parse support bundle spec: %s", err)
klog.Errorf("failed to parse support bundle spec: %s", err)
continue
}
@@ -160,7 +160,7 @@ func runTroubleshoot(v *viper.Viper, arg []string) error {
parsedRedactors, err := supportbundle.ParseRedactorsFromDocs(multidocs)
if err != nil {
logger.Printf("failed to parse redactors from doc: %s", err)
klog.Errorf("failed to parse redactors from doc: %s", err)
continue
}
@@ -172,13 +172,13 @@ func runTroubleshoot(v *viper.Viper, arg []string) error {
// Search cluster for Troubleshoot objects in ConfigMaps
redactorsFromSecrets, err := specs.LoadFromSecretMatchingLabel(client, parsedSelector.String(), namespace, specs.RedactorKey)
if err != nil {
logger.Printf("failed to load redactor specs from config maps: %s", err)
klog.Errorf("failed to load redactor specs from config maps: %s", err)
}
redactorsFromCluster = append(redactorsFromCluster, redactorsFromSecrets...)
redactorsFromConfigMaps, err := specs.LoadFromConfigMapMatchingLabel(client, parsedSelector.String(), namespace, specs.RedactorKey)
if err != nil {
logger.Printf("failed to load redactor specs from config maps: %s", err)
klog.Errorf("failed to load redactor specs from config maps: %s", err)
}
redactorsFromCluster = append(redactorsFromCluster, redactorsFromConfigMaps...)
@@ -186,7 +186,7 @@ func runTroubleshoot(v *viper.Viper, arg []string) error {
multidocs := strings.Split(string(redactor), "\n---\n")
parsedRedactors, err := supportbundle.ParseRedactorsFromDocs(multidocs)
if err != nil {
logger.Printf("failed to parse redactors from doc: %s", err)
klog.Errorf("failed to parse redactors from doc: %s", err)
}
additionalRedactors.Spec.Redactors = append(additionalRedactors.Spec.Redactors, parsedRedactors...)
@@ -229,7 +229,7 @@ func runTroubleshoot(v *viper.Viper, arg []string) error {
go func() {
defer wg.Done()
for msg := range progressChan {
logger.Printf("Collecting support bundle: %v", msg)
klog.Infof("Collecting support bundle: %v", msg)
}
}()
} else {

View File

@@ -4,6 +4,9 @@ import (
"net/url"
"os"
"strings"
"golang.org/x/text/cases"
"golang.org/x/text/language"
)
func HomeDir() string {
@@ -23,7 +26,7 @@ func IsURL(str string) bool {
}
func AppName(name string) string {
words := strings.Split(strings.Title(strings.Replace(name, "-", " ", -1)), " ")
words := strings.Split(cases.Title(language.English).String(strings.ReplaceAll(name, "-", " ")), " ")
casedWords := []string{}
for i, word := range words {
if strings.ToLower(word) == "ai" {

View File

@@ -3,13 +3,13 @@ package traces
import (
"context"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/replicatedhq/troubleshoot/pkg/version"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
"k8s.io/klog/v2"
)
// ConfigureTracing configures the OpenTelemetry trace provider for CLI
@@ -54,7 +54,7 @@ func ConfigureTracing(processName string) (func(), error) {
return func() {
if err := tp.Shutdown(context.Background()); err != nil {
logger.Printf("Failed to shutdown trace provider: %v", err)
klog.Errorf("Failed to shutdown trace provider: %v", err)
}
}, nil
}

View File

@@ -9,12 +9,12 @@ import (
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/constants"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/replicatedhq/troubleshoot/pkg/multitype"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
)
type AnalyzeResult struct {
@@ -73,7 +73,7 @@ func HostAnalyze(
isExcluded, _ := analyzer.IsExcluded()
if isExcluded {
logger.Printf("Excluding %q analyzer", analyzer.Title())
klog.Infof("excluding %q analyzer", analyzer.Title())
span.SetAttributes(attribute.Bool(constants.EXCLUDED, true))
return nil
}
@@ -129,7 +129,7 @@ func Analyze(
return nil, err
}
if isExcluded {
logger.Printf("Excluding %q analyzer", analyzerInst.Title())
klog.Infof("excluding %q analyzer", analyzerInst.Title())
span.SetAttributes(attribute.Bool(constants.EXCLUDED, true))
return nil, nil
}

View File

@@ -15,9 +15,9 @@ import (
troubleshootscheme "github.com/replicatedhq/troubleshoot/pkg/client/troubleshootclientset/scheme"
"github.com/replicatedhq/troubleshoot/pkg/constants"
"github.com/replicatedhq/troubleshoot/pkg/docrewrite"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/replicatedhq/troubleshoot/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog/v2"
)
type fileContentProvider struct {
@@ -42,7 +42,7 @@ func AnalyzeLocal(
for _, analyzer := range analyzers {
analyzeResult, err := Analyze(ctx, analyzer, fcp.getFileContents, fcp.getChildFileContents)
if err != nil {
logger.Printf("An analyzer failed to run: %v", err)
klog.Errorf("An analyzer failed to run: %v", err)
continue
}

View File

@@ -9,10 +9,10 @@ import (
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/logger"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
)
const (
@@ -213,7 +213,7 @@ func findRookCephToolsPod(ctx context.Context, c *CollectCeph, namespace string)
return &pods[0], nil
}
logger.Printf("rook ceph tools pod not found")
klog.Info("rook ceph tools pod not found")
return nil, nil
}

View File

@@ -71,6 +71,7 @@ func CollectHost(c *troubleshootv1beta2.HostCollector, additionalRedactors *trou
for _, collector := range collectors {
isExcluded, _ := collector.IsExcluded()
if isExcluded {
opts.ProgressChan <- fmt.Sprintf("[%s] Excluding collector", collector.Title())
continue
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/k8sutil"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/segmentio/ksuid"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -24,6 +23,7 @@ import (
"k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/klog/v2"
)
type CollectCopyFromHost struct {
@@ -204,7 +204,7 @@ func copyFromHostCreateDaemonSet(ctx context.Context, client kubernetes.Interfac
cleanupFuncs = append(cleanupFuncs, func() {
err := client.CoreV1().Secrets(namespace).Delete(context.Background(), collector.ImagePullSecret.Name, metav1.DeleteOptions{})
if err != nil && !kuberneteserrors.IsNotFound(err) {
logger.Printf("Failed to delete secret %s: %v", collector.ImagePullSecret.Name, err)
klog.Errorf("Failed to delete secret %s: %v", collector.ImagePullSecret.Name, err)
}
})
}
@@ -215,7 +215,7 @@ func copyFromHostCreateDaemonSet(ctx context.Context, client kubernetes.Interfac
}
cleanupFuncs = append(cleanupFuncs, func() {
if err := client.AppsV1().DaemonSets(namespace).Delete(context.Background(), createdDS.Name, metav1.DeleteOptions{}); err != nil {
logger.Printf("Failed to delete daemonset %s: %v", createdDS.Name, err)
klog.Errorf("Failed to delete daemonset %s: %v", createdDS.Name, err)
}
})

View File

@@ -11,11 +11,11 @@ import (
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/constants"
"github.com/replicatedhq/troubleshoot/pkg/logger"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
)
type CollectLogs struct {
@@ -243,7 +243,7 @@ func savePodLogs(
func convertMaxAgeToTime(maxAge string) *metav1.Time {
parsedDuration, err := time.ParseDuration(maxAge)
if err != nil {
logger.Printf("Failed to parse time duration %s", maxAge)
klog.Errorf("Failed to parse time duration %s", maxAge)
return nil
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/logger"
longhornv1beta1types "github.com/replicatedhq/troubleshoot/pkg/longhorn/apis/longhorn/v1beta1"
longhornv1beta1 "github.com/replicatedhq/troubleshoot/pkg/longhorn/client/clientset/versioned/typed/longhorn/v1beta1"
longhorntypes "github.com/replicatedhq/troubleshoot/pkg/longhorn/types"
@@ -22,6 +21,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/klog/v2"
)
const (
@@ -69,8 +69,8 @@ func (c *CollectLonghorn) Collect(progressChan chan<- interface{}) (CollectorRes
if err != nil {
if apiErr, ok := err.(*apiErrors.StatusError); ok {
if apiErr.ErrStatus.Code == http.StatusNotFound {
logger.Printf("list nodes.longhorn.io not found")
return nil, nil
klog.Error("list nodes.longhorn.io not found")
return NewResult(), nil
}
}
return nil, errors.Wrap(err, "list nodes.longhorn.io")
@@ -280,7 +280,7 @@ func (c *CollectLonghorn) Collect(progressChan chan<- interface{}) (CollectorRes
defer wg.Done()
checksums, err := GetLonghornReplicaChecksum(c.ClientConfig, replica, podName)
if err != nil {
logger.Printf("Failed to get replica %s checksum: %v", replica.Name, err)
klog.Errorf("Failed to get replica %s checksum: %v", replica.Name, err)
return
}
volsDir := GetLonghornVolumesDirectory(ns)

View File

@@ -49,9 +49,9 @@ func RedactResult(bundlePath string, input CollectorResult, additionalRedactors
if err != nil {
return errors.Wrap(err, "failed to get relative path")
}
klog.V(4).Infof("Redacting %s (symlink => %s)\n", file, symlink)
klog.V(2).Infof("Redacting %s (symlink => %s)\n", file, symlink)
} else {
klog.V(4).Infof("Redacting %s\n", file)
klog.V(2).Infof("Redacting %s\n", file)
}
r, err := input.GetReader(bundlePath, file)
if err != nil {

View File

@@ -12,11 +12,11 @@ import (
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/k8sutil"
"github.com/replicatedhq/troubleshoot/pkg/logger"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -54,7 +54,7 @@ func (c *CollectRunPod) Collect(progressChan chan<- interface{}) (CollectorResul
}
defer func() {
if err := client.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}); err != nil {
logger.Printf("Failed to delete pod %s: %v", pod.Name, err)
klog.Errorf("Failed to delete pod %s: %v", pod.Name, err)
}
}()
@@ -62,7 +62,7 @@ func (c *CollectRunPod) Collect(progressChan chan<- interface{}) (CollectorResul
defer func() {
for _, k := range pod.Spec.ImagePullSecrets {
if err := client.CoreV1().Secrets(pod.Namespace).Delete(context.Background(), k.Name, metav1.DeleteOptions{}); err != nil {
logger.Printf("Failed to delete secret %s: %v", k.Name, err)
klog.Errorf("Failed to delete secret %s: %v", k.Name, err)
}
}
}()
@@ -316,7 +316,7 @@ func RunPodsReadyNodes(ctx context.Context, client v1.CoreV1Interface, opts RunP
}
logs, err := RunPodLogs(ctx, client, pod)
if err != nil {
logger.Printf("Failed to run pod on node %s: %v", node, err)
klog.Errorf("Failed to run pod on node %s: %v", node, err)
return
}
@@ -341,7 +341,7 @@ func RunPodLogs(ctx context.Context, client v1.CoreV1Interface, podSpec *corev1.
defer func() {
err := client.Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})
if err != nil && !kuberneteserrors.IsNotFound(err) {
logger.Printf("Failed to delete pod %s: %v\n", pod.Name, err)
klog.Errorf("Failed to delete pod %s: %v\n", pod.Name, err)
}
}()

View File

@@ -10,7 +10,6 @@ import (
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/logger"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
@@ -18,6 +17,7 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
@@ -43,10 +43,10 @@ func (r *podRunner) run(ctx context.Context, collector *troubleshootv1beta2.Host
defer func() {
if err := r.client.CoreV1().Pods(namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}); err != nil {
logger.Printf("Failed to delete pod %s: %v\n", pod.Name, err)
klog.Errorf("Failed to delete pod %s: %v\n", pod.Name, err)
}
if err := r.client.CoreV1().ConfigMaps(namespace).Delete(context.Background(), cm.Name, metav1.DeleteOptions{}); err != nil {
logger.Printf("Failed to delete configmap %s: %v\n", pod.Name, err)
klog.Errorf("Failed to delete configmap %s: %v\n", pod.Name, err)
}
}()

View File

@@ -9,11 +9,11 @@ import (
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/k8sutil"
"github.com/replicatedhq/troubleshoot/pkg/logger"
kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
)
type CollectSysctl struct {
@@ -82,7 +82,7 @@ find /proc/sys/vm -type f | while read f; do v=$(cat $f 2>/dev/null); echo "$f =
defer func() {
err := c.Client.CoreV1().Secrets(c.Collector.Namespace).Delete(context.Background(), c.Collector.ImagePullSecret.Name, metav1.DeleteOptions{})
if err != nil && !kuberneteserrors.IsNotFound(err) {
logger.Printf("Failed to delete secret %s: %v", c.Collector.ImagePullSecret.Name, err)
klog.Errorf("Failed to delete secret %s: %v", c.Collector.ImagePullSecret.Name, err)
}
}()

View File

@@ -1,26 +1,65 @@
/*
Logging library for the troubleshoot framework.
Logging levels
TODO: Document me here => https://github.com/replicatedhq/troubleshoot/issues/1031
0: also the same as not using V() log progress related information within the framework. Logs within each component (collector/analyzers/etc) should not use this level.
1: High level logs within each component (collector/analyzers/etc) should use this level. A log such as "Ceph collector connected to the cluster" belongs here.
2: Everything else goes here. If you do not know which level to use, use this level.
The best approach is to always use V(2) then after testing your code as a whole, you can elevate the log level of the messages you find useful to V(1) or V(0).
Do not log errors in functions that return an error. Instead, return the error and let the caller log it.
*/
package logger
import (
"log"
"os"
"flag"
"sync"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/klog/v2"
)
var (
logger *log.Logger
quiet = false
)
var lock sync.Mutex
func init() {
logger = log.New(os.Stderr, "", log.LstdFlags)
// InitKlogFlags initializes klog flags and adds them to the cobra command.
func InitKlogFlags(cmd *cobra.Command) {
// Initialize klog flags
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
klogFlags.VisitAll(func(f *flag.Flag) {
// Just the flags we want to expose in our CLI
if f.Name == "v" {
// If we ever want to expose the klog flags that have underscores ("_") in them
// we need to replace them with hyphens ("-") in the flag name using
// pflag.NormalizedName(strings.ReplaceAll(name, "_", "-")). Check how kubectl does it
cmd.Flags().AddGoFlag(f)
}
})
}
func SetQuiet(s bool) {
quiet = s
// SetupLogger sets up klog logger based on viper configuration.
func SetupLogger(v *viper.Viper) {
quiet := v.GetBool("debug") || v.IsSet("v")
SetQuiet(!quiet)
}
func Printf(format string, args ...interface{}) {
// SetQuiet enables or disables klog logger.
func SetQuiet(quiet bool) {
lock.Lock()
defer lock.Unlock()
if quiet {
return
klog.SetLogger(logr.Discard())
} else {
// Restore the default logger
klog.ClearLogger()
}
logger.Printf(format, args...)
}

View File

@@ -9,7 +9,7 @@ import (
analyze "github.com/replicatedhq/troubleshoot/pkg/analyze"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"k8s.io/klog/v2"
)
// Analyze runs the analyze phase of preflight checks
@@ -100,7 +100,7 @@ func doAnalyze(
if err != nil {
strict, strictErr := HasStrictAnalyzer(analyzer)
if strictErr != nil {
logger.Printf("failed to determine if analyzer %v is strict: %s", analyzer, strictErr)
klog.Errorf("failed to determine if analyzer %v is strict: %s", analyzer, strictErr)
}
analyzeResult = []*analyze.AnalyzeResult{

View File

@@ -12,13 +12,13 @@ import (
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/collect"
"github.com/replicatedhq/troubleshoot/pkg/constants"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/replicatedhq/troubleshoot/pkg/version"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
)
type CollectOpts struct {
@@ -123,7 +123,7 @@ func CollectHostWithContext(
isExcluded, _ := collector.IsExcluded()
if isExcluded {
logger.Printf("Excluding %q collector", collector.Title())
opts.ProgressChan <- fmt.Sprintf("[%s] Excluding collector", collector.Title())
span.SetAttributes(attribute.Bool(constants.EXCLUDED, true))
span.End()
continue
@@ -236,7 +236,7 @@ func CollectWithContext(ctx context.Context, opts CollectOpts, p *troubleshootv1
isExcluded, _ := collector.IsExcluded()
if isExcluded {
logger.Printf("Excluding %q collector", collector.Title())
klog.Infof("excluding %q collector", collector.Title())
span.SetAttributes(attribute.Bool(constants.EXCLUDED, true))
span.End()
continue

View File

@@ -9,8 +9,8 @@ import (
"strconv"
"strings"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"gopkg.in/yaml.v2"
"k8s.io/klog/v2"
)
type YamlRedactor struct {
@@ -30,7 +30,7 @@ func (r *YamlRedactor) Redact(input io.Reader, path string) io.Reader {
if r.filePath != "" {
match, err := filepath.Match(r.filePath, path)
if err != nil {
logger.Printf("Failed to match %q and %q: %v", r.filePath, path, err)
klog.Errorf("Failed to match %q and %q: %v", r.filePath, path, err)
return input
}
if !match {

View File

@@ -15,7 +15,6 @@ import (
"github.com/replicatedhq/troubleshoot/pkg/collect"
"github.com/replicatedhq/troubleshoot/pkg/constants"
"github.com/replicatedhq/troubleshoot/pkg/convert"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/replicatedhq/troubleshoot/pkg/version"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
@@ -45,7 +44,7 @@ func runHostCollectors(ctx context.Context, hostCollectors []*troubleshootv1beta
isExcluded, _ := collector.IsExcluded()
if isExcluded {
logger.Printf("Excluding %q collector", collector.Title())
opts.ProgressChan <- fmt.Sprintf("[%s] Excluding host collector", collector.Title())
span.SetAttributes(attribute.Bool(constants.EXCLUDED, true))
span.End()
continue
@@ -152,7 +151,8 @@ func runCollectors(ctx context.Context, collectors []*troubleshootv1beta2.Collec
isExcluded, _ := collector.IsExcluded()
if isExcluded {
logger.Printf("Excluding %q collector", collector.Title())
msg := fmt.Sprintf("excluding %q collector", collector.Title())
opts.CollectorProgressCallback(opts.ProgressChan, msg)
span.SetAttributes(attribute.Bool(constants.EXCLUDED, true))
span.End()
continue
@@ -161,7 +161,7 @@ func runCollectors(ctx context.Context, collectors []*troubleshootv1beta2.Collec
// skip collectors with RBAC errors unless its the ClusterResources collector
if collector.HasRBACErrors() {
if _, ok := collector.(*collect.CollectClusterResources); !ok {
msg := fmt.Sprintf("skipping collector %s with insufficient RBAC permissions", collector.Title())
msg := fmt.Sprintf("skipping collector %q with insufficient RBAC permissions", collector.Title())
opts.CollectorProgressCallback(opts.ProgressChan, msg)
span.SetStatus(codes.Error, "skipping collector, insufficient RBAC permissions")
span.End()

View File

@@ -15,10 +15,10 @@ import (
troubleshootclientsetscheme "github.com/replicatedhq/troubleshoot/pkg/client/troubleshootclientset/scheme"
"github.com/replicatedhq/troubleshoot/pkg/docrewrite"
"github.com/replicatedhq/troubleshoot/pkg/httputil"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/replicatedhq/troubleshoot/pkg/oci"
"github.com/replicatedhq/troubleshoot/pkg/specs"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
)
func GetSupportBundleFromURI(bundleURI string) (*troubleshootv1beta2.SupportBundle, error) {
@@ -78,10 +78,10 @@ func ParseSupportBundle(doc []byte, followURI bool) (*troubleshootv1beta2.Suppor
// use the upstream spec, otherwise fall back to
// what's defined in the current spec
if supportBundle.Spec.Uri != "" && followURI {
logger.Printf("using upstream reference: %+v\n", supportBundle.Spec.Uri)
klog.Infof("using upstream reference: %+v\n", supportBundle.Spec.Uri)
upstreamSupportBundleContent, err := LoadSupportBundleSpec(supportBundle.Spec.Uri)
if err != nil {
logger.Printf("failed to load upstream supportbundle, falling back")
klog.Errorf("failed to load upstream supportbundle, falling back")
return supportBundle, nil
}
@@ -89,7 +89,7 @@ func ParseSupportBundle(doc []byte, followURI bool) (*troubleshootv1beta2.Suppor
upstreamSupportBundle, err := ParseSupportBundle([]byte(multidocs[0]), false)
if err != nil {
logger.Printf("failed to parse upstream supportbundle, falling back")
klog.Errorf("failed to parse upstream supportbundle, falling back")
return supportBundle, nil
}
return upstreamSupportBundle, nil

View File

@@ -19,7 +19,6 @@ import (
"github.com/replicatedhq/troubleshoot/pkg/collect"
"github.com/replicatedhq/troubleshoot/pkg/constants"
"github.com/replicatedhq/troubleshoot/pkg/convert"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"go.opentelemetry.io/otel"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
@@ -181,7 +180,7 @@ func CollectSupportBundleFromSpec(
err = result.SaveResult(bundlePath, "execution-data/summary.txt", bytes.NewReader([]byte(summary)))
if err != nil {
// Don't fail the support bundle if we can't save the execution summary
logger.Printf("failed to save execution summary file in the support bundle: %v", err)
klog.Errorf("failed to save execution summary file in the support bundle: %v", err)
}
// Archive Support Bundle