mirror of
https://github.com/kubernetes/node-problem-detector.git
synced 2026-02-14 18:09:57 +00:00
Move glog/klog logging to klog/v2
This commit is contained in:
committed by
Ciprian Hacman
parent
eeab0ab06f
commit
e43459d86d
@@ -23,18 +23,21 @@ import (
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/node-problem-detector/cmd/healthchecker/options"
|
||||
"k8s.io/node-problem-detector/pkg/custompluginmonitor/types"
|
||||
"k8s.io/node-problem-detector/pkg/healthchecker"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Set glog flag so that it does not log to files.
|
||||
// Set klog flag so that it does not log to files.
|
||||
klog.InitFlags(nil)
|
||||
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||
fmt.Printf("Failed to set logtostderr=true: %v", err)
|
||||
os.Exit(int(types.Unknown))
|
||||
}
|
||||
|
||||
|
||||
hco := options.NewHealthCheckerOptions()
|
||||
hco.AddFlags(pflag.CommandLine)
|
||||
pflag.Parse()
|
||||
|
||||
@@ -26,13 +26,16 @@ import (
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/node-problem-detector/cmd/logcounter/options"
|
||||
"k8s.io/node-problem-detector/pkg/custompluginmonitor/types"
|
||||
"k8s.io/node-problem-detector/pkg/logcounter"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Set glog flag so that it does not log to files.
|
||||
// Set klog flag so that it does not log to files.
|
||||
klog.InitFlags(nil)
|
||||
|
||||
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||
fmt.Printf("Failed to set logtostderr=true: %v", err)
|
||||
os.Exit(int(types.Unknown))
|
||||
|
||||
@@ -19,7 +19,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
_ "k8s.io/node-problem-detector/cmd/nodeproblemdetector/exporterplugins"
|
||||
_ "k8s.io/node-problem-detector/cmd/nodeproblemdetector/problemdaemonplugins"
|
||||
@@ -46,18 +46,18 @@ func npdMain(ctx context.Context, npdo *options.NodeProblemDetectorOptions) erro
|
||||
// Initialize problem daemons.
|
||||
problemDaemons := problemdaemon.NewProblemDaemons(npdo.MonitorConfigPaths)
|
||||
if len(problemDaemons) == 0 {
|
||||
glog.Fatalf("No problem daemon is configured")
|
||||
klog.Fatalf("No problem daemon is configured")
|
||||
}
|
||||
|
||||
// Initialize exporters.
|
||||
defaultExporters := []types.Exporter{}
|
||||
if ke := k8sexporter.NewExporterOrDie(ctx, npdo); ke != nil {
|
||||
defaultExporters = append(defaultExporters, ke)
|
||||
glog.Info("K8s exporter started.")
|
||||
klog.Info("K8s exporter started.")
|
||||
}
|
||||
if pe := prometheusexporter.NewExporterOrDie(npdo); pe != nil {
|
||||
defaultExporters = append(defaultExporters, pe)
|
||||
glog.Info("Prometheus exporter started.")
|
||||
klog.Info("Prometheus exporter started.")
|
||||
}
|
||||
|
||||
plugableExporters := exporters.NewExporters()
|
||||
@@ -67,7 +67,7 @@ func npdMain(ctx context.Context, npdo *options.NodeProblemDetectorOptions) erro
|
||||
npdExporters = append(npdExporters, plugableExporters...)
|
||||
|
||||
if len(npdExporters) == 0 {
|
||||
glog.Fatalf("No exporter is successfully setup")
|
||||
klog.Fatalf("No exporter is successfully setup")
|
||||
}
|
||||
|
||||
// Initialize NPD core.
|
||||
|
||||
@@ -19,8 +19,8 @@ package main
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/node-problem-detector/cmd/options"
|
||||
)
|
||||
|
||||
@@ -30,6 +30,6 @@ func main() {
|
||||
|
||||
pflag.Parse()
|
||||
if err := npdMain(context.Background(), npdo); err != nil {
|
||||
glog.Fatalf("Problem detector failed with error: %v", err)
|
||||
klog.Fatalf("Problem detector failed with error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,11 +22,11 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/sys/windows/svc"
|
||||
"golang.org/x/sys/windows/svc/debug"
|
||||
"golang.org/x/sys/windows/svc/eventlog"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/node-problem-detector/cmd/options"
|
||||
)
|
||||
|
||||
@@ -62,7 +62,7 @@ func main() {
|
||||
func isRunningAsWindowsService() bool {
|
||||
runningAsService, err := svc.IsWindowsService()
|
||||
if err != nil {
|
||||
glog.Errorf("cannot determine if running as Windows Service assuming standalone, %v", err)
|
||||
klog.Errorf("cannot determine if running as Windows Service assuming standalone, %v", err)
|
||||
return false
|
||||
}
|
||||
return runningAsService
|
||||
|
||||
@@ -20,7 +20,6 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/sys/windows/svc"
|
||||
|
||||
4
go.mod
4
go.mod
@@ -11,7 +11,6 @@ require (
|
||||
github.com/avast/retry-go v3.0.0+incompatible
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/euank/go-kmsg-parser v2.0.0+incompatible
|
||||
github.com/golang/glog v1.1.2
|
||||
github.com/hpcloud/tail v1.0.0
|
||||
github.com/prometheus/client_model v0.4.0
|
||||
github.com/prometheus/common v0.44.0
|
||||
@@ -25,7 +24,7 @@ require (
|
||||
k8s.io/api v0.28.2
|
||||
k8s.io/apimachinery v0.28.2
|
||||
k8s.io/client-go v0.28.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.100.1
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
)
|
||||
|
||||
@@ -96,7 +95,6 @@ require (
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.100.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
|
||||
3
go.sum
3
go.sum
@@ -524,8 +524,7 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/custompluginmonitor/plugin"
|
||||
cpmtypes "k8s.io/node-problem-detector/pkg/custompluginmonitor/types"
|
||||
@@ -59,25 +59,25 @@ func NewCustomPluginMonitorOrDie(configPath string) types.Monitor {
|
||||
}
|
||||
f, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to read configuration file %q: %v", configPath, err)
|
||||
klog.Fatalf("Failed to read configuration file %q: %v", configPath, err)
|
||||
}
|
||||
err = json.Unmarshal(f, &c.config)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to unmarshal configuration file %q: %v", configPath, err)
|
||||
klog.Fatalf("Failed to unmarshal configuration file %q: %v", configPath, err)
|
||||
}
|
||||
// Apply configurations
|
||||
err = (&c.config).ApplyConfiguration()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to apply configuration for %q: %v", configPath, err)
|
||||
klog.Fatalf("Failed to apply configuration for %q: %v", configPath, err)
|
||||
}
|
||||
|
||||
// Validate configurations
|
||||
err = c.config.Validate()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to validate custom plugin config %+v: %v", c.config, err)
|
||||
klog.Fatalf("Failed to validate custom plugin config %+v: %v", c.config, err)
|
||||
}
|
||||
|
||||
glog.Infof("Finish parsing custom plugin monitor config file %s: %+v", c.configPath, c.config)
|
||||
klog.Infof("Finish parsing custom plugin monitor config file %s: %+v", c.configPath, c.config)
|
||||
|
||||
c.plugin = plugin.NewPlugin(c.config)
|
||||
// A 1000 size channel should be big enough.
|
||||
@@ -96,26 +96,26 @@ func initializeProblemMetricsOrDie(rules []*cpmtypes.CustomRule) {
|
||||
if rule.Type == types.Perm {
|
||||
err := problemmetrics.GlobalProblemMetricsManager.SetProblemGauge(rule.Condition, rule.Reason, false)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to initialize problem gauge metrics for problem %q, reason %q: %v",
|
||||
klog.Fatalf("Failed to initialize problem gauge metrics for problem %q, reason %q: %v",
|
||||
rule.Condition, rule.Reason, err)
|
||||
}
|
||||
}
|
||||
err := problemmetrics.GlobalProblemMetricsManager.IncrementProblemCounter(rule.Reason, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to initialize problem counter metrics for %q: %v", rule.Reason, err)
|
||||
klog.Fatalf("Failed to initialize problem counter metrics for %q: %v", rule.Reason, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *customPluginMonitor) Start() (<-chan *types.Status, error) {
|
||||
glog.Infof("Start custom plugin monitor %s", c.configPath)
|
||||
klog.Infof("Start custom plugin monitor %s", c.configPath)
|
||||
go c.plugin.Run()
|
||||
go c.monitorLoop()
|
||||
return c.statusChan, nil
|
||||
}
|
||||
|
||||
func (c *customPluginMonitor) Stop() {
|
||||
glog.Infof("Stop custom plugin monitor %s", c.configPath)
|
||||
klog.Infof("Stop custom plugin monitor %s", c.configPath)
|
||||
c.tomb.Stop()
|
||||
}
|
||||
|
||||
@@ -133,16 +133,16 @@ func (c *customPluginMonitor) monitorLoop() {
|
||||
select {
|
||||
case result, ok := <-resultChan:
|
||||
if !ok {
|
||||
glog.Errorf("Result channel closed: %s", c.configPath)
|
||||
klog.Errorf("Result channel closed: %s", c.configPath)
|
||||
return
|
||||
}
|
||||
glog.V(3).Infof("Receive new plugin result for %s: %+v", c.configPath, result)
|
||||
klog.V(3).Infof("Receive new plugin result for %s: %+v", c.configPath, result)
|
||||
status := c.generateStatus(result)
|
||||
glog.V(3).Infof("New status generated: %+v", status)
|
||||
klog.V(3).Infof("New status generated: %+v", status)
|
||||
c.statusChan <- status
|
||||
case <-c.tomb.Stopping():
|
||||
c.plugin.Stop()
|
||||
glog.Infof("Custom plugin monitor stopped: %s", c.configPath)
|
||||
klog.Infof("Custom plugin monitor stopped: %s", c.configPath)
|
||||
c.tomb.Done()
|
||||
return
|
||||
}
|
||||
@@ -256,7 +256,7 @@ func (c *customPluginMonitor) generateStatus(result cpmtypes.Result) *types.Stat
|
||||
err := problemmetrics.GlobalProblemMetricsManager.IncrementProblemCounter(
|
||||
event.Reason, 1)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to update problem counter metrics for %q: %v",
|
||||
klog.Errorf("Failed to update problem counter metrics for %q: %v",
|
||||
event.Reason, err)
|
||||
}
|
||||
}
|
||||
@@ -264,7 +264,7 @@ func (c *customPluginMonitor) generateStatus(result cpmtypes.Result) *types.Stat
|
||||
err := problemmetrics.GlobalProblemMetricsManager.SetProblemGauge(
|
||||
condition.Type, condition.Reason, condition.Status == types.True)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to update problem gauge metrics for problem %q, reason %q: %v",
|
||||
klog.Errorf("Failed to update problem gauge metrics for problem %q, reason %q: %v",
|
||||
condition.Type, condition.Reason, err)
|
||||
}
|
||||
}
|
||||
@@ -277,7 +277,7 @@ func (c *customPluginMonitor) generateStatus(result cpmtypes.Result) *types.Stat
|
||||
}
|
||||
// Log only if condition has changed
|
||||
if len(activeProblemEvents) != 0 || len(inactiveProblemEvents) != 0 {
|
||||
glog.V(0).Infof("New status generated: %+v", status)
|
||||
klog.V(0).Infof("New status generated: %+v", status)
|
||||
}
|
||||
return status
|
||||
}
|
||||
@@ -297,7 +297,7 @@ func toConditionStatus(s cpmtypes.Status) types.ConditionStatus {
|
||||
func (c *customPluginMonitor) initializeStatus() {
|
||||
// Initialize the default node conditions
|
||||
c.conditions = initialConditions(c.config.DefaultConditions)
|
||||
glog.Infof("Initialize condition generated: %+v", c.conditions)
|
||||
klog.Infof("Initialize condition generated: %+v", c.conditions)
|
||||
// Update the initial status
|
||||
c.statusChan <- &types.Status{
|
||||
Source: c.config.Source,
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
cpmtypes "k8s.io/node-problem-detector/pkg/custompluginmonitor/types"
|
||||
"k8s.io/node-problem-detector/pkg/util"
|
||||
"k8s.io/node-problem-detector/pkg/util/tomb"
|
||||
@@ -60,7 +60,7 @@ func (p *Plugin) GetResultChan() <-chan cpmtypes.Result {
|
||||
|
||||
func (p *Plugin) Run() {
|
||||
defer func() {
|
||||
glog.Info("Stopping plugin execution")
|
||||
klog.Info("Stopping plugin execution")
|
||||
close(p.resultChan)
|
||||
p.tomb.Done()
|
||||
}()
|
||||
@@ -89,7 +89,7 @@ func (p *Plugin) Run() {
|
||||
|
||||
// run each rule in parallel and wait for them to complete
|
||||
func (p *Plugin) runRules() {
|
||||
glog.V(3).Info("Start to run custom plugins")
|
||||
klog.V(3).Info("Start to run custom plugins")
|
||||
|
||||
for _, rule := range p.config.Rules {
|
||||
// syncChan limits concurrent goroutines to configured PluginGlobalConfig.Concurrency value
|
||||
@@ -103,12 +103,12 @@ func (p *Plugin) runRules() {
|
||||
|
||||
start := time.Now()
|
||||
exitStatus, message := p.run(*rule)
|
||||
level := glog.Level(3)
|
||||
level := klog.Level(3)
|
||||
if exitStatus != 0 {
|
||||
level = glog.Level(2)
|
||||
level = klog.Level(2)
|
||||
}
|
||||
|
||||
glog.V(level).Infof("Rule: %+v. Start time: %v. End time: %v. Duration: %v", rule, start, time.Now(), time.Since(start))
|
||||
klog.V(level).Infof("Rule: %+v. Start time: %v. End time: %v. Duration: %v", rule, start, time.Now(), time.Since(start))
|
||||
|
||||
result := cpmtypes.Result{
|
||||
Rule: rule,
|
||||
@@ -120,12 +120,12 @@ func (p *Plugin) runRules() {
|
||||
p.resultChan <- result
|
||||
|
||||
// Let the result be logged at a higher verbosity level. If there is a change in status it is logged later.
|
||||
glog.V(level).Infof("Add check result %+v for rule %+v", result, rule)
|
||||
klog.V(level).Infof("Add check result %+v for rule %+v", result, rule)
|
||||
}(rule)
|
||||
}
|
||||
|
||||
p.Wait()
|
||||
glog.V(3).Info("Finish running custom plugins")
|
||||
klog.V(3).Info("Finish running custom plugins")
|
||||
}
|
||||
|
||||
// readFromReader reads the maxBytes from the reader and drains the rest.
|
||||
@@ -157,16 +157,16 @@ func (p *Plugin) run(rule cpmtypes.CustomRule) (exitStatus cpmtypes.Status, outp
|
||||
|
||||
stdoutPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating stdout pipe for plugin %q: error - %v", rule.Path, err)
|
||||
klog.Errorf("Error creating stdout pipe for plugin %q: error - %v", rule.Path, err)
|
||||
return cpmtypes.Unknown, "Error creating stdout pipe for plugin. Please check the error log"
|
||||
}
|
||||
stderrPipe, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating stderr pipe for plugin %q: error - %v", rule.Path, err)
|
||||
klog.Errorf("Error creating stderr pipe for plugin %q: error - %v", rule.Path, err)
|
||||
return cpmtypes.Unknown, "Error creating stderr pipe for plugin. Please check the error log"
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
glog.Errorf("Error in starting plugin %q: error - %v", rule.Path, err)
|
||||
klog.Errorf("Error in starting plugin %q: error - %v", rule.Path, err)
|
||||
return cpmtypes.Unknown, "Error in starting plugin. Please check the error log"
|
||||
}
|
||||
|
||||
@@ -182,9 +182,9 @@ func (p *Plugin) run(rule cpmtypes.CustomRule) (exitStatus cpmtypes.Status, outp
|
||||
if ctx.Err() == context.Canceled {
|
||||
return
|
||||
}
|
||||
glog.Errorf("Error in running plugin timeout %q", rule.Path)
|
||||
klog.Errorf("Error in running plugin timeout %q", rule.Path)
|
||||
if cmd.Process == nil || cmd.Process.Pid == 0 {
|
||||
glog.Errorf("Error in cmd.Process check %q", rule.Path)
|
||||
klog.Errorf("Error in cmd.Process check %q", rule.Path)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -194,7 +194,7 @@ func (p *Plugin) run(rule cpmtypes.CustomRule) (exitStatus cpmtypes.Status, outp
|
||||
|
||||
err := util.Kill(cmd)
|
||||
if err != nil {
|
||||
glog.Errorf("Error in kill process %d, %v", cmd.Process.Pid, err)
|
||||
klog.Errorf("Error in kill process %d, %v", cmd.Process.Pid, err)
|
||||
}
|
||||
case <-waitChan:
|
||||
return
|
||||
@@ -223,18 +223,18 @@ func (p *Plugin) run(rule cpmtypes.CustomRule) (exitStatus cpmtypes.Status, outp
|
||||
wg.Wait()
|
||||
|
||||
if stdoutErr != nil {
|
||||
glog.Errorf("Error reading stdout for plugin %q: error - %v", rule.Path, err)
|
||||
klog.Errorf("Error reading stdout for plugin %q: error - %v", rule.Path, err)
|
||||
return cpmtypes.Unknown, "Error reading stdout for plugin. Please check the error log"
|
||||
}
|
||||
|
||||
if stderrErr != nil {
|
||||
glog.Errorf("Error reading stderr for plugin %q: error - %v", rule.Path, err)
|
||||
klog.Errorf("Error reading stderr for plugin %q: error - %v", rule.Path, err)
|
||||
return cpmtypes.Unknown, "Error reading stderr for plugin. Please check the error log"
|
||||
}
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); !ok {
|
||||
glog.Errorf("Error in waiting for plugin %q: error - %v. output - %q", rule.Path, err, string(stdout))
|
||||
klog.Errorf("Error in waiting for plugin %q: error - %v. output - %q", rule.Path, err, string(stdout))
|
||||
return cpmtypes.Unknown, "Error in waiting for plugin. Please check the error log"
|
||||
}
|
||||
}
|
||||
@@ -273,12 +273,12 @@ func (p *Plugin) run(rule cpmtypes.CustomRule) (exitStatus cpmtypes.Status, outp
|
||||
// Stop the plugin.
|
||||
func (p *Plugin) Stop() {
|
||||
p.tomb.Stop()
|
||||
glog.Info("Stop plugin execution")
|
||||
klog.Info("Stop plugin execution")
|
||||
}
|
||||
|
||||
func logPluginStderr(rule cpmtypes.CustomRule, logs string, logLevel glog.Level) {
|
||||
func logPluginStderr(rule cpmtypes.CustomRule, logs string, logLevel klog.Level) {
|
||||
if len(logs) != 0 {
|
||||
glog.V(logLevel).Infof("Start logs from plugin %+v \n %s", rule, logs)
|
||||
glog.V(logLevel).Infof("End logs from plugin %+v", rule)
|
||||
klog.V(logLevel).Infof("Start logs from plugin %+v \n %s", rule, logs)
|
||||
klog.V(logLevel).Infof("End logs from plugin %+v", rule)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package types
|
||||
|
||||
import (
|
||||
"k8s.io/node-problem-detector/pkg/types"
|
||||
"time"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/types"
|
||||
)
|
||||
|
||||
type Status int
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -162,7 +162,7 @@ func (c *conditionManager) sync(ctx context.Context) {
|
||||
}
|
||||
if err := c.client.SetConditions(ctx, conditions); err != nil {
|
||||
// The conditions will be updated again in future sync
|
||||
glog.Errorf("failed to update node conditions: %v", err)
|
||||
klog.Errorf("failed to update node conditions: %v", err)
|
||||
c.resyncNeeded = true
|
||||
return
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
_ "net/http/pprof"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/clock"
|
||||
@@ -52,9 +52,9 @@ func NewExporterOrDie(ctx context.Context, npdo *options.NodeProblemDetectorOpti
|
||||
|
||||
c := problemclient.NewClientOrDie(npdo)
|
||||
|
||||
glog.Infof("Waiting for kube-apiserver to be ready (timeout %v)...", npdo.APIServerWaitTimeout)
|
||||
klog.Infof("Waiting for kube-apiserver to be ready (timeout %v)...", npdo.APIServerWaitTimeout)
|
||||
if err := waitForAPIServerReadyWithTimeout(ctx, c, npdo); err != nil {
|
||||
glog.Warningf("kube-apiserver did not become ready: timed out on waiting for kube-apiserver to return the node object: %v", err)
|
||||
klog.Warningf("kube-apiserver did not become ready: timed out on waiting for kube-apiserver to return the node object: %v", err)
|
||||
}
|
||||
|
||||
ke := k8sExporter{
|
||||
@@ -99,7 +99,7 @@ func (ke *k8sExporter) startHTTPReporting(npdo *options.NodeProblemDetectorOptio
|
||||
go func() {
|
||||
err := http.ListenAndServe(addr, mux)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to start server: %v", err)
|
||||
klog.Fatalf("Failed to start server: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -109,7 +109,7 @@ func waitForAPIServerReadyWithTimeout(ctx context.Context, c problemclient.Clien
|
||||
// If NPD can get the node object from kube-apiserver, the server is
|
||||
// ready and the RBAC permission is set correctly.
|
||||
if _, err := c.GetNode(ctx); err != nil {
|
||||
glog.Errorf("Can't get node object: %v", err)
|
||||
klog.Errorf("Can't get node object: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -32,6 +31,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
"k8s.io/node-problem-detector/cmd/options"
|
||||
@@ -136,7 +136,7 @@ func generatePatch(conditions []v1.NodeCondition) ([]byte, error) {
|
||||
// getEventRecorder generates a recorder for specific node name and source.
|
||||
func getEventRecorder(c typedcorev1.CoreV1Interface, namespace, nodeName, source string) record.EventRecorder {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.V(4).Infof)
|
||||
eventBroadcaster.StartLogging(klog.V(4).Infof)
|
||||
recorder := eventBroadcaster.NewRecorder(runtime.NewScheme(), v1.EventSource{Component: source, Host: nodeName})
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: c.Events(namespace)})
|
||||
return recorder
|
||||
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
"strconv"
|
||||
|
||||
"contrib.go.opencensus.io/exporter/prometheus"
|
||||
"github.com/golang/glog"
|
||||
"go.opencensus.io/stats/view"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/cmd/options"
|
||||
"k8s.io/node-problem-detector/pkg/types"
|
||||
@@ -40,13 +40,13 @@ func NewExporterOrDie(npdo *options.NodeProblemDetectorOptions) types.Exporter {
|
||||
addr := net.JoinHostPort(npdo.PrometheusServerAddress, strconv.Itoa(npdo.PrometheusServerPort))
|
||||
pe, err := prometheus.NewExporter(prometheus.Options{})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create Prometheus exporter: %v", err)
|
||||
klog.Fatalf("Failed to create Prometheus exporter: %v", err)
|
||||
}
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", pe)
|
||||
if err := http.ListenAndServe(addr, mux); err != nil {
|
||||
glog.Fatalf("Failed to start Prometheus scrape endpoint: %v", err)
|
||||
klog.Fatalf("Failed to start Prometheus scrape endpoint: %v", err)
|
||||
}
|
||||
}()
|
||||
view.RegisterExporter(pe)
|
||||
|
||||
@@ -18,7 +18,7 @@ package gce
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type Metadata struct {
|
||||
@@ -37,7 +37,7 @@ func (md *Metadata) HasMissingField() bool {
|
||||
|
||||
func (md *Metadata) PopulateFromGCE() error {
|
||||
var err error
|
||||
glog.Info("Fetching GCE metadata from metadata server")
|
||||
klog.Info("Fetching GCE metadata from metadata server")
|
||||
if md.ProjectID == "" {
|
||||
md.ProjectID, err = metadata.ProjectID()
|
||||
if err != nil {
|
||||
|
||||
@@ -25,10 +25,10 @@ import (
|
||||
|
||||
"contrib.go.opencensus.io/exporter/stackdriver"
|
||||
monitoredres "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
"go.opencensus.io/stats/view"
|
||||
"google.golang.org/api/option"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/avast/retry-go"
|
||||
"k8s.io/node-problem-detector/pkg/exporters"
|
||||
@@ -137,12 +137,12 @@ func (se *stackdriverExporter) setupOpenCensusViewExporterOrDie() {
|
||||
DefaultMonitoringLabels: &globalLabels,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create Stackdriver OpenCensus view exporter: %v", err)
|
||||
klog.Fatalf("Failed to create Stackdriver OpenCensus view exporter: %v", err)
|
||||
}
|
||||
|
||||
exportPeriod, err := time.ParseDuration(se.config.ExportPeriod)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to parse ExportPeriod %q: %v", se.config.ExportPeriod, err)
|
||||
klog.Fatalf("Failed to parse ExportPeriod %q: %v", se.config.ExportPeriod, err)
|
||||
}
|
||||
|
||||
view.SetReportingPeriod(exportPeriod)
|
||||
@@ -151,33 +151,33 @@ func (se *stackdriverExporter) setupOpenCensusViewExporterOrDie() {
|
||||
|
||||
func (se *stackdriverExporter) populateMetadataOrDie() {
|
||||
if !se.config.GCEMetadata.HasMissingField() {
|
||||
glog.Infof("Using GCE metadata specified in the config file: %+v", se.config.GCEMetadata)
|
||||
klog.Infof("Using GCE metadata specified in the config file: %+v", se.config.GCEMetadata)
|
||||
return
|
||||
}
|
||||
|
||||
metadataFetchTimeout, err := time.ParseDuration(se.config.MetadataFetchTimeout)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to parse MetadataFetchTimeout %q: %v", se.config.MetadataFetchTimeout, err)
|
||||
klog.Fatalf("Failed to parse MetadataFetchTimeout %q: %v", se.config.MetadataFetchTimeout, err)
|
||||
}
|
||||
|
||||
metadataFetchInterval, err := time.ParseDuration(se.config.MetadataFetchInterval)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to parse MetadataFetchInterval %q: %v", se.config.MetadataFetchInterval, err)
|
||||
klog.Fatalf("Failed to parse MetadataFetchInterval %q: %v", se.config.MetadataFetchInterval, err)
|
||||
}
|
||||
|
||||
glog.Infof("Populating GCE metadata by querying GCE metadata server.")
|
||||
klog.Infof("Populating GCE metadata by querying GCE metadata server.")
|
||||
err = retry.Do(se.config.GCEMetadata.PopulateFromGCE,
|
||||
retry.Delay(metadataFetchInterval),
|
||||
retry.Attempts(uint(metadataFetchTimeout/metadataFetchInterval)),
|
||||
retry.DelayType(retry.FixedDelay))
|
||||
if err == nil {
|
||||
glog.Infof("Using GCE metadata: %+v", se.config.GCEMetadata)
|
||||
klog.Infof("Using GCE metadata: %+v", se.config.GCEMetadata)
|
||||
return
|
||||
}
|
||||
if se.config.PanicOnMetadataFetchFailure {
|
||||
glog.Fatalf("Failed to populate GCE metadata: %v", err)
|
||||
klog.Fatalf("Failed to populate GCE metadata: %v", err)
|
||||
} else {
|
||||
glog.Errorf("Failed to populate GCE metadata: %v", err)
|
||||
klog.Errorf("Failed to populate GCE metadata: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,7 +200,7 @@ func (clo *commandLineOptions) SetFlags(fs *pflag.FlagSet) {
|
||||
func NewExporterOrDie(clo types.CommandLineOptions) types.Exporter {
|
||||
options, ok := clo.(*commandLineOptions)
|
||||
if !ok {
|
||||
glog.Fatalf("Wrong type for the command line options of Stackdriver Exporter: %s.", reflect.TypeOf(clo))
|
||||
klog.Fatalf("Wrong type for the command line options of Stackdriver Exporter: %s.", reflect.TypeOf(clo))
|
||||
}
|
||||
if options.configPath == "" {
|
||||
return nil
|
||||
@@ -211,15 +211,15 @@ func NewExporterOrDie(clo types.CommandLineOptions) types.Exporter {
|
||||
// Apply configurations.
|
||||
f, err := os.ReadFile(options.configPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to read configuration file %q: %v", options.configPath, err)
|
||||
klog.Fatalf("Failed to read configuration file %q: %v", options.configPath, err)
|
||||
}
|
||||
err = json.Unmarshal(f, &se.config)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to unmarshal configuration file %q: %v", options.configPath, err)
|
||||
klog.Fatalf("Failed to unmarshal configuration file %q: %v", options.configPath, err)
|
||||
}
|
||||
se.config.ApplyConfiguration()
|
||||
|
||||
glog.Infof("Starting Stackdriver exporter %s", options.configPath)
|
||||
klog.Infof("Starting Stackdriver exporter %s", options.configPath)
|
||||
|
||||
se.populateMetadataOrDie()
|
||||
se.setupOpenCensusViewExporterOrDie()
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/node-problem-detector/cmd/healthchecker/options"
|
||||
"k8s.io/node-problem-detector/pkg/healthchecker/types"
|
||||
)
|
||||
@@ -83,12 +83,12 @@ func (hc *healthChecker) CheckHealth() (bool, error) {
|
||||
// repair if the service has been up for the cool down period.
|
||||
uptime, err := hc.uptimeFunc()
|
||||
if err != nil {
|
||||
glog.Infof("error in getting uptime for %v: %v\n", hc.component, err)
|
||||
klog.Infof("error in getting uptime for %v: %v\n", hc.component, err)
|
||||
return false, nil
|
||||
}
|
||||
glog.Infof("%v is unhealthy, component uptime: %v\n", hc.component, uptime)
|
||||
klog.Infof("%v is unhealthy, component uptime: %v\n", hc.component, uptime)
|
||||
if uptime > hc.coolDownTime {
|
||||
glog.Infof("%v cooldown period of %v exceeded, repairing", hc.component, hc.coolDownTime)
|
||||
klog.Infof("%v cooldown period of %v exceeded, repairing", hc.component, hc.coolDownTime)
|
||||
hc.repairFunc()
|
||||
}
|
||||
}
|
||||
@@ -102,10 +102,10 @@ func logPatternHealthCheck(service string, loopBackTime time.Duration, logPatter
|
||||
return true, nil
|
||||
}
|
||||
uptimeFunc := getUptimeFunc(service)
|
||||
glog.Infof("Getting uptime for service: %v\n", service)
|
||||
klog.Infof("Getting uptime for service: %v\n", service)
|
||||
uptime, err := uptimeFunc()
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get the uptime: %+v", err)
|
||||
klog.Warningf("Failed to get the uptime: %+v", err)
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -164,7 +164,7 @@ func getHealthCheckFunc(hco *options.HealthCheckerOptions) func() (bool, error)
|
||||
return true, nil
|
||||
}
|
||||
default:
|
||||
glog.Warningf("Unsupported component: %v", hco.Component)
|
||||
klog.Warningf("Unsupported component: %v", hco.Component)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -177,7 +177,7 @@ func execCommand(timeout time.Duration, command string, args ...string) (string,
|
||||
cmd := exec.CommandContext(ctx, command, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Infof("command %v failed: %v, %v\n", cmd, err, out)
|
||||
klog.Infof("command %v failed: %v, %v\n", cmd, err, out)
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/cmd/healthchecker/options"
|
||||
"k8s.io/node-problem-detector/pkg/healthchecker/types"
|
||||
@@ -90,7 +90,7 @@ func checkForPattern(service, logStartTime, logPattern string, logCountThreshold
|
||||
return true, err
|
||||
}
|
||||
if occurrences >= logCountThreshold {
|
||||
glog.Infof("%s failed log pattern check, %s occurrences: %v", service, logPattern, occurrences)
|
||||
klog.Infof("%s failed log pattern check, %s occurrences: %v", service, logPattern, occurrences)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/cmd/healthchecker/options"
|
||||
"k8s.io/node-problem-detector/pkg/healthchecker/types"
|
||||
@@ -80,7 +80,7 @@ func powershell(args ...string) (string, error) {
|
||||
func extractCommandOutput(cmd *exec.Cmd) (string, error) {
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
glog.Infof("command %v failed: %v, %v\n", cmd, err, out)
|
||||
klog.Infof("command %v failed: %v, %v\n", cmd, err, out)
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSuffix(string(out), "\r\n"), nil
|
||||
@@ -101,7 +101,7 @@ func checkForPattern(service, logStartTime, logPattern string, logCountThreshold
|
||||
return true, err
|
||||
}
|
||||
if occurrences >= logCountThreshold {
|
||||
glog.Infof("%s failed log pattern check, %s occurrences: %v", service, logPattern, occurrences)
|
||||
klog.Infof("%s failed log pattern check, %s occurrences: %v", service, logPattern, occurrences)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
@@ -19,7 +19,7 @@ package problemdaemon
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/types"
|
||||
)
|
||||
@@ -58,7 +58,7 @@ func NewProblemDaemons(monitorConfigPaths types.ProblemDaemonConfigPathMap) []ty
|
||||
for _, config := range *configs {
|
||||
if _, ok := problemDaemonMap[config]; ok {
|
||||
// Skip the config if it's duplicated.
|
||||
glog.Warningf("Duplicated problem daemon configuration %q", config)
|
||||
klog.Warningf("Duplicated problem daemon configuration %q", config)
|
||||
continue
|
||||
}
|
||||
problemDaemonMap[config] = handlers[problemDaemonType].CreateProblemDaemonOrDie(config)
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/types"
|
||||
)
|
||||
@@ -53,7 +53,7 @@ func (p *problemDetector) Run(ctx context.Context) error {
|
||||
ch, err := m.Start()
|
||||
if err != nil {
|
||||
// Do not return error and keep on trying the following config files.
|
||||
glog.Errorf("Failed to start problem daemon %v: %v", m, err)
|
||||
klog.Errorf("Failed to start problem daemon %v: %v", m, err)
|
||||
failureCount++
|
||||
continue
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func (p *problemDetector) Run(ctx context.Context) error {
|
||||
}()
|
||||
|
||||
ch := groupChannel(chans)
|
||||
glog.Info("Problem detector started")
|
||||
klog.Info("Problem detector started")
|
||||
|
||||
for {
|
||||
select {
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/util/metrics"
|
||||
)
|
||||
@@ -56,7 +56,7 @@ func NewProblemMetricsManagerOrDie() *ProblemMetricsManager {
|
||||
metrics.Sum,
|
||||
[]string{"reason"})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create problem_counter metric: %v", err)
|
||||
klog.Fatalf("Failed to create problem_counter metric: %v", err)
|
||||
}
|
||||
|
||||
pmm.problemGauge, err = metrics.NewInt64Metric(
|
||||
@@ -67,7 +67,7 @@ func NewProblemMetricsManagerOrDie() *ProblemMetricsManager {
|
||||
metrics.LastValue,
|
||||
[]string{"type", "reason"})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create problem_gauge metric: %v", err)
|
||||
klog.Fatalf("Failed to create problem_gauge metric: %v", err)
|
||||
}
|
||||
|
||||
pmm.problemTypeToReason = make(map[string]string)
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/problemdaemon"
|
||||
"k8s.io/node-problem-detector/pkg/problemmetrics"
|
||||
@@ -63,19 +63,19 @@ func NewLogMonitorOrDie(configPath string) types.Monitor {
|
||||
|
||||
f, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to read configuration file %q: %v", configPath, err)
|
||||
klog.Fatalf("Failed to read configuration file %q: %v", configPath, err)
|
||||
}
|
||||
err = json.Unmarshal(f, &l.config)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to unmarshal configuration file %q: %v", configPath, err)
|
||||
klog.Fatalf("Failed to unmarshal configuration file %q: %v", configPath, err)
|
||||
}
|
||||
// Apply default configurations
|
||||
(&l.config).ApplyDefaultConfiguration()
|
||||
err = l.config.ValidateRules()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to validate %s matching rules %+v: %v", l.configPath, l.config.Rules, err)
|
||||
klog.Fatalf("Failed to validate %s matching rules %+v: %v", l.configPath, l.config.Rules, err)
|
||||
}
|
||||
glog.Infof("Finish parsing log monitor config file %s: %+v", l.configPath, l.config)
|
||||
klog.Infof("Finish parsing log monitor config file %s: %+v", l.configPath, l.config)
|
||||
|
||||
l.watcher = logwatchers.GetLogWatcherOrDie(l.config.WatcherConfig)
|
||||
l.buffer = NewLogBuffer(l.config.BufferSize)
|
||||
@@ -95,19 +95,19 @@ func initializeProblemMetricsOrDie(rules []systemlogtypes.Rule) {
|
||||
if rule.Type == types.Perm {
|
||||
err := problemmetrics.GlobalProblemMetricsManager.SetProblemGauge(rule.Condition, rule.Reason, false)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to initialize problem gauge metrics for problem %q, reason %q: %v",
|
||||
klog.Fatalf("Failed to initialize problem gauge metrics for problem %q, reason %q: %v",
|
||||
rule.Condition, rule.Reason, err)
|
||||
}
|
||||
}
|
||||
err := problemmetrics.GlobalProblemMetricsManager.IncrementProblemCounter(rule.Reason, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to initialize problem counter metrics for %q: %v", rule.Reason, err)
|
||||
klog.Fatalf("Failed to initialize problem counter metrics for %q: %v", rule.Reason, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *logMonitor) Start() (<-chan *types.Status, error) {
|
||||
glog.Infof("Start log monitor %s", l.configPath)
|
||||
klog.Infof("Start log monitor %s", l.configPath)
|
||||
var err error
|
||||
l.logCh, err = l.watcher.Watch()
|
||||
if err != nil {
|
||||
@@ -118,7 +118,7 @@ func (l *logMonitor) Start() (<-chan *types.Status, error) {
|
||||
}
|
||||
|
||||
func (l *logMonitor) Stop() {
|
||||
glog.Infof("Stop log monitor %s", l.configPath)
|
||||
klog.Infof("Stop log monitor %s", l.configPath)
|
||||
l.tomb.Stop()
|
||||
}
|
||||
|
||||
@@ -133,13 +133,13 @@ func (l *logMonitor) monitorLoop() {
|
||||
select {
|
||||
case log, ok := <-l.logCh:
|
||||
if !ok {
|
||||
glog.Errorf("Log channel closed: %s", l.configPath)
|
||||
klog.Errorf("Log channel closed: %s", l.configPath)
|
||||
return
|
||||
}
|
||||
l.parseLog(log)
|
||||
case <-l.tomb.Stopping():
|
||||
l.watcher.Stop()
|
||||
glog.Infof("Log monitor stopped: %s", l.configPath)
|
||||
klog.Infof("Log monitor stopped: %s", l.configPath)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -156,7 +156,7 @@ func (l *logMonitor) parseLog(log *systemlogtypes.Log) {
|
||||
continue
|
||||
}
|
||||
status := l.generateStatus(matched, rule)
|
||||
glog.Infof("New status generated: %+v", status)
|
||||
klog.Infof("New status generated: %+v", status)
|
||||
l.output <- status
|
||||
}
|
||||
}
|
||||
@@ -207,14 +207,14 @@ func (l *logMonitor) generateStatus(logs []*systemlogtypes.Log, rule systemlogty
|
||||
for _, event := range events {
|
||||
err := problemmetrics.GlobalProblemMetricsManager.IncrementProblemCounter(event.Reason, 1)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to update problem counter metrics for %q: %v", event.Reason, err)
|
||||
klog.Errorf("Failed to update problem counter metrics for %q: %v", event.Reason, err)
|
||||
}
|
||||
}
|
||||
for _, condition := range changedConditions {
|
||||
err := problemmetrics.GlobalProblemMetricsManager.SetProblemGauge(
|
||||
condition.Type, condition.Reason, condition.Status == types.True)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to update problem gauge metrics for problem %q, reason %q: %v",
|
||||
klog.Errorf("Failed to update problem gauge metrics for problem %q, reason %q: %v",
|
||||
condition.Type, condition.Reason, err)
|
||||
}
|
||||
}
|
||||
@@ -232,7 +232,7 @@ func (l *logMonitor) generateStatus(logs []*systemlogtypes.Log, rule systemlogty
|
||||
func (l *logMonitor) initializeStatus() {
|
||||
// Initialize the default node conditions
|
||||
l.conditions = initialConditions(l.config.DefaultConditions)
|
||||
glog.Infof("Initialize condition generated: %+v", l.conditions)
|
||||
klog.Infof("Initialize condition generated: %+v", l.conditions)
|
||||
// Update the initial status
|
||||
l.output <- &types.Status{
|
||||
Source: l.config.Source,
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
utilclock "code.cloudfoundry.org/clock"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/systemlogmonitor/logwatchers/types"
|
||||
logtypes "k8s.io/node-problem-detector/pkg/systemlogmonitor/types"
|
||||
@@ -48,11 +48,11 @@ type filelogWatcher struct {
|
||||
func NewSyslogWatcherOrDie(cfg types.WatcherConfig) types.LogWatcher {
|
||||
uptime, err := util.GetUptimeDuration()
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to get uptime: %v", err)
|
||||
klog.Fatalf("failed to get uptime: %v", err)
|
||||
}
|
||||
startTime, err := util.GetStartTime(time.Now(), uptime, cfg.Lookback, cfg.Delay)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to get start time: %v", err)
|
||||
klog.Fatalf("failed to get start time: %v", err)
|
||||
}
|
||||
|
||||
return &filelogWatcher{
|
||||
@@ -77,7 +77,7 @@ func (s *filelogWatcher) Watch() (<-chan *logtypes.Log, error) {
|
||||
}
|
||||
s.reader = bufio.NewReader(r)
|
||||
s.closer = r
|
||||
glog.Info("Start watching filelog")
|
||||
klog.Info("Start watching filelog")
|
||||
go s.watchLoop()
|
||||
return s.logCh, nil
|
||||
}
|
||||
@@ -102,14 +102,14 @@ func (s *filelogWatcher) watchLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-s.tomb.Stopping():
|
||||
glog.Infof("Stop watching filelog")
|
||||
klog.Infof("Stop watching filelog")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
line, err := s.reader.ReadString('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
glog.Errorf("Exiting filelog watch with error: %v", err)
|
||||
klog.Errorf("Exiting filelog watch with error: %v", err)
|
||||
return
|
||||
}
|
||||
buffer.WriteString(line)
|
||||
@@ -121,12 +121,12 @@ func (s *filelogWatcher) watchLoop() {
|
||||
buffer.Reset()
|
||||
log, err := s.translator.translate(strings.TrimSuffix(line, "\n"))
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to parse line: %q, %v", line, err)
|
||||
klog.Warningf("Unable to parse line: %q, %v", line, err)
|
||||
continue
|
||||
}
|
||||
// Discard messages before start time.
|
||||
if log.Timestamp.Before(s.startTime) {
|
||||
glog.V(5).Infof("Throwing away msg %q before start time: %v < %v", log.Message, log.Timestamp, s.startTime)
|
||||
klog.V(5).Infof("Throwing away msg %q before start time: %v < %v", log.Message, log.Timestamp, s.startTime)
|
||||
continue
|
||||
}
|
||||
s.logCh <- log
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
logtypes "k8s.io/node-problem-detector/pkg/systemlogmonitor/types"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// translator translates log line into internal log type based on user defined
|
||||
@@ -46,7 +46,7 @@ const (
|
||||
|
||||
func newTranslatorOrDie(pluginConfig map[string]string) *translator {
|
||||
if err := validatePluginConfig(pluginConfig); err != nil {
|
||||
glog.Errorf("Failed to validate plugin configuration %+v: %v", pluginConfig, err)
|
||||
klog.Errorf("Failed to validate plugin configuration %+v: %v", pluginConfig, err)
|
||||
}
|
||||
return &translator{
|
||||
timestampRegexp: regexp.MustCompile(pluginConfig[timestampKey]),
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-systemd/sdjournal"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/systemlogmonitor/logwatchers/types"
|
||||
logtypes "k8s.io/node-problem-detector/pkg/systemlogmonitor/types"
|
||||
@@ -52,11 +52,11 @@ type journaldWatcher struct {
|
||||
func NewJournaldWatcher(cfg types.WatcherConfig) types.LogWatcher {
|
||||
uptime, err := util.GetUptimeDuration()
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to get uptime: %v", err)
|
||||
klog.Fatalf("failed to get uptime: %v", err)
|
||||
}
|
||||
startTime, err := util.GetStartTime(time.Now(), uptime, cfg.Lookback, cfg.Delay)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to get start time: %v", err)
|
||||
klog.Fatalf("failed to get start time: %v", err)
|
||||
}
|
||||
|
||||
return &journaldWatcher{
|
||||
@@ -95,21 +95,21 @@ func (j *journaldWatcher) watchLoop() {
|
||||
startTimestamp := timeToJournalTimestamp(j.startTime)
|
||||
defer func() {
|
||||
if err := j.journal.Close(); err != nil {
|
||||
glog.Errorf("Failed to close journal client: %v", err)
|
||||
klog.Errorf("Failed to close journal client: %v", err)
|
||||
}
|
||||
j.tomb.Done()
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-j.tomb.Stopping():
|
||||
glog.Infof("Stop watching journald")
|
||||
klog.Infof("Stop watching journald")
|
||||
return
|
||||
default:
|
||||
}
|
||||
// Get next log entry.
|
||||
n, err := j.journal.Next()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get next journal entry: %v", err)
|
||||
klog.Errorf("Failed to get next journal entry: %v", err)
|
||||
continue
|
||||
}
|
||||
// If next reaches the end, wait for waitLogTimeout.
|
||||
@@ -120,12 +120,12 @@ func (j *journaldWatcher) watchLoop() {
|
||||
|
||||
entry, err := j.journal.GetEntry()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get journal entry: %v", err)
|
||||
klog.Errorf("failed to get journal entry: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if entry.RealtimeTimestamp < startTimestamp {
|
||||
glog.V(5).Infof("Throwing away journal entry %q before start time: %v < %v",
|
||||
klog.V(5).Infof("Throwing away journal entry %q before start time: %v < %v",
|
||||
entry.Fields[sdjournal.SD_JOURNAL_FIELD_MESSAGE], entry.RealtimeTimestamp, startTimestamp)
|
||||
continue
|
||||
}
|
||||
@@ -148,7 +148,7 @@ func getJournal(cfg types.WatcherConfig, startTime time.Time) (*sdjournal.Journa
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create journal client from default log path: %v", err)
|
||||
}
|
||||
glog.Info("unspecified log path so using systemd default")
|
||||
klog.Info("unspecified log path so using systemd default")
|
||||
} else {
|
||||
// If the path doesn't exist, NewJournalFromDir will
|
||||
// create it instead of returning error. So check the
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
utilclock "code.cloudfoundry.org/clock"
|
||||
"github.com/euank/go-kmsg-parser/kmsgparser"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/systemlogmonitor/logwatchers/types"
|
||||
logtypes "k8s.io/node-problem-detector/pkg/systemlogmonitor/types"
|
||||
@@ -45,11 +45,11 @@ type kernelLogWatcher struct {
|
||||
func NewKmsgWatcher(cfg types.WatcherConfig) types.LogWatcher {
|
||||
uptime, err := util.GetUptimeDuration()
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to get uptime: %v", err)
|
||||
klog.Fatalf("failed to get uptime: %v", err)
|
||||
}
|
||||
startTime, err := util.GetStartTime(time.Now(), uptime, cfg.Lookback, cfg.Delay)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to get start time: %v", err)
|
||||
klog.Fatalf("failed to get start time: %v", err)
|
||||
}
|
||||
|
||||
return &kernelLogWatcher{
|
||||
@@ -89,7 +89,7 @@ func (k *kernelLogWatcher) watchLoop() {
|
||||
kmsgs := k.kmsgParser.Parse()
|
||||
defer func() {
|
||||
if err := k.kmsgParser.Close(); err != nil {
|
||||
glog.Errorf("Failed to close kmsg parser: %v", err)
|
||||
klog.Errorf("Failed to close kmsg parser: %v", err)
|
||||
}
|
||||
close(k.logCh)
|
||||
k.tomb.Done()
|
||||
@@ -98,21 +98,21 @@ func (k *kernelLogWatcher) watchLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-k.tomb.Stopping():
|
||||
glog.Infof("Stop watching kernel log")
|
||||
klog.Infof("Stop watching kernel log")
|
||||
return
|
||||
case msg, ok := <-kmsgs:
|
||||
if !ok {
|
||||
glog.Error("Kmsg channel closed")
|
||||
klog.Error("Kmsg channel closed")
|
||||
return
|
||||
}
|
||||
glog.V(5).Infof("got kernel message: %+v", msg)
|
||||
klog.V(5).Infof("got kernel message: %+v", msg)
|
||||
if msg.Message == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Discard messages before start time.
|
||||
if msg.Timestamp.Before(k.startTime) {
|
||||
glog.V(5).Infof("Throwing away msg %q before start time: %v < %v", msg.Message, msg.Timestamp, k.startTime)
|
||||
klog.V(5).Infof("Throwing away msg %q before start time: %v < %v", msg.Message, msg.Timestamp, k.startTime)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -19,13 +19,13 @@ package kmsg
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/systemlogmonitor/logwatchers/types"
|
||||
)
|
||||
|
||||
// NewKmsgWatcher creates a watcher which will read messages from /dev/kmsg
|
||||
func NewKmsgWatcher(cfg types.WatcherConfig) types.LogWatcher {
|
||||
glog.Fatalf("kmsg parser is not supported in %s", runtime.GOOS)
|
||||
klog.Fatalf("kmsg parser is not supported in %s", runtime.GOOS)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ package logwatchers
|
||||
import (
|
||||
"k8s.io/node-problem-detector/pkg/systemlogmonitor/logwatchers/types"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// createFuncs is a table of createFuncs for all supported log watchers.
|
||||
@@ -35,8 +35,8 @@ func registerLogWatcher(name string, create types.WatcherCreateFunc) {
|
||||
func GetLogWatcherOrDie(config types.WatcherConfig) types.LogWatcher {
|
||||
create, ok := createFuncs[config.Plugin]
|
||||
if !ok {
|
||||
glog.Fatalf("No create function found for plugin %q", config.Plugin)
|
||||
klog.Fatalf("No create function found for plugin %q", config.Plugin)
|
||||
}
|
||||
glog.Infof("Use log watcher of plugin %q", config.Plugin)
|
||||
klog.Infof("Use log watcher of plugin %q", config.Plugin)
|
||||
return create(config)
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@ limitations under the License.
|
||||
package systemstatsmonitor
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/shirou/gopsutil/v3/cpu"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
ssmtypes "k8s.io/node-problem-detector/pkg/systemstatsmonitor/types"
|
||||
"k8s.io/node-problem-detector/pkg/util/metrics"
|
||||
@@ -67,7 +67,7 @@ func NewCPUCollectorOrDie(cpuConfig *ssmtypes.CPUStatsConfig, procPath string) *
|
||||
metrics.LastValue,
|
||||
[]string{})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.CPURunnableTaskCountID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.CPURunnableTaskCountID, err)
|
||||
}
|
||||
|
||||
cc.mUsageTime, err = metrics.NewFloat64Metric(
|
||||
@@ -78,7 +78,7 @@ func NewCPUCollectorOrDie(cpuConfig *ssmtypes.CPUStatsConfig, procPath string) *
|
||||
metrics.Sum,
|
||||
[]string{stateLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.CPUUsageTimeID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.CPUUsageTimeID, err)
|
||||
}
|
||||
|
||||
cc.mCpuLoad1m, err = metrics.NewFloat64Metric(
|
||||
@@ -89,7 +89,7 @@ func NewCPUCollectorOrDie(cpuConfig *ssmtypes.CPUStatsConfig, procPath string) *
|
||||
metrics.LastValue,
|
||||
[]string{})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.CPULoad1m, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.CPULoad1m, err)
|
||||
}
|
||||
|
||||
cc.mCpuLoad5m, err = metrics.NewFloat64Metric(
|
||||
@@ -100,7 +100,7 @@ func NewCPUCollectorOrDie(cpuConfig *ssmtypes.CPUStatsConfig, procPath string) *
|
||||
metrics.LastValue,
|
||||
[]string{})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.CPULoad5m, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.CPULoad5m, err)
|
||||
}
|
||||
|
||||
cc.mCpuLoad15m, err = metrics.NewFloat64Metric(
|
||||
@@ -111,7 +111,7 @@ func NewCPUCollectorOrDie(cpuConfig *ssmtypes.CPUStatsConfig, procPath string) *
|
||||
metrics.LastValue,
|
||||
[]string{})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.CPULoad15m, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.CPULoad15m, err)
|
||||
}
|
||||
|
||||
cc.mSystemProcessesTotal, err = metrics.NewInt64Metric(
|
||||
@@ -122,7 +122,7 @@ func NewCPUCollectorOrDie(cpuConfig *ssmtypes.CPUStatsConfig, procPath string) *
|
||||
metrics.Sum,
|
||||
[]string{})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.SystemProcessesTotal, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.SystemProcessesTotal, err)
|
||||
}
|
||||
|
||||
cc.mSystemProcsRunning, err = metrics.NewInt64Metric(
|
||||
@@ -133,7 +133,7 @@ func NewCPUCollectorOrDie(cpuConfig *ssmtypes.CPUStatsConfig, procPath string) *
|
||||
metrics.LastValue,
|
||||
[]string{})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.SystemProcsRunning, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.SystemProcsRunning, err)
|
||||
}
|
||||
|
||||
cc.mSystemProcsBlocked, err = metrics.NewInt64Metric(
|
||||
@@ -144,7 +144,7 @@ func NewCPUCollectorOrDie(cpuConfig *ssmtypes.CPUStatsConfig, procPath string) *
|
||||
metrics.LastValue,
|
||||
[]string{})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.SystemProcsBlocked, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.SystemProcsBlocked, err)
|
||||
}
|
||||
|
||||
cc.mSystemInterruptsTotal, err = metrics.NewInt64Metric(
|
||||
@@ -155,7 +155,7 @@ func NewCPUCollectorOrDie(cpuConfig *ssmtypes.CPUStatsConfig, procPath string) *
|
||||
metrics.Sum,
|
||||
[]string{})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.SystemInterruptsTotal, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.SystemInterruptsTotal, err)
|
||||
}
|
||||
|
||||
cc.mSystemCPUStat, err = metrics.NewFloat64Metric(
|
||||
@@ -166,7 +166,7 @@ func NewCPUCollectorOrDie(cpuConfig *ssmtypes.CPUStatsConfig, procPath string) *
|
||||
metrics.Sum,
|
||||
[]string{cpuLabel, stageLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.SystemCPUStat, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.SystemCPUStat, err)
|
||||
}
|
||||
|
||||
cc.lastUsageTime = make(map[string]float64)
|
||||
@@ -182,7 +182,7 @@ func (cc *cpuCollector) recordUsage() {
|
||||
// Set percpu=false to get aggregated usage from all CPUs.
|
||||
timersStats, err := cpu.Times(false)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve CPU timers stat: %v", err)
|
||||
klog.Errorf("Failed to retrieve CPU timers stat: %v", err)
|
||||
return
|
||||
}
|
||||
timersStat := timersStats[0]
|
||||
|
||||
@@ -21,9 +21,9 @@ package systemstatsmonitor
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/procfs"
|
||||
"github.com/shirou/gopsutil/v3/load"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func (cc *cpuCollector) recordLoad() {
|
||||
@@ -35,7 +35,7 @@ func (cc *cpuCollector) recordLoad() {
|
||||
|
||||
loadAvg, err := load.Avg()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve average CPU load: %v", err)
|
||||
klog.Errorf("Failed to retrieve average CPU load: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func (cc *cpuCollector) recordSystemStats() {
|
||||
fs, err := procfs.NewFS(cc.procPath)
|
||||
stats, err := fs.Stat()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve cpu/process stats: %v", err)
|
||||
klog.Errorf("Failed to retrieve cpu/process stats: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/shirou/gopsutil/v3/disk"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
ssmtypes "k8s.io/node-problem-detector/pkg/systemstatsmonitor/types"
|
||||
"k8s.io/node-problem-detector/pkg/util/metrics"
|
||||
@@ -69,7 +69,7 @@ func NewDiskCollectorOrDie(diskConfig *ssmtypes.DiskStatsConfig) *diskCollector
|
||||
metrics.Sum,
|
||||
[]string{deviceNameLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for disk/io_time: %v", err)
|
||||
klog.Fatalf("Error initializing metric for disk/io_time: %v", err)
|
||||
}
|
||||
|
||||
// Use metrics.Sum aggregation method to ensure the metric is a counter/cumulative metric.
|
||||
@@ -81,7 +81,7 @@ func NewDiskCollectorOrDie(diskConfig *ssmtypes.DiskStatsConfig) *diskCollector
|
||||
metrics.Sum,
|
||||
[]string{deviceNameLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for disk/weighted_io: %v", err)
|
||||
klog.Fatalf("Error initializing metric for disk/weighted_io: %v", err)
|
||||
}
|
||||
|
||||
dc.mAvgQueueLen, err = metrics.NewFloat64Metric(
|
||||
@@ -92,7 +92,7 @@ func NewDiskCollectorOrDie(diskConfig *ssmtypes.DiskStatsConfig) *diskCollector
|
||||
metrics.LastValue,
|
||||
[]string{deviceNameLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for disk/avg_queue_len: %v", err)
|
||||
klog.Fatalf("Error initializing metric for disk/avg_queue_len: %v", err)
|
||||
}
|
||||
|
||||
dc.mOpsCount, err = metrics.NewInt64Metric(
|
||||
@@ -103,7 +103,7 @@ func NewDiskCollectorOrDie(diskConfig *ssmtypes.DiskStatsConfig) *diskCollector
|
||||
metrics.Sum,
|
||||
[]string{deviceNameLabel, directionLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.DiskOpsCountID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.DiskOpsCountID, err)
|
||||
}
|
||||
|
||||
dc.mMergedOpsCount, err = metrics.NewInt64Metric(
|
||||
@@ -114,7 +114,7 @@ func NewDiskCollectorOrDie(diskConfig *ssmtypes.DiskStatsConfig) *diskCollector
|
||||
metrics.Sum,
|
||||
[]string{deviceNameLabel, directionLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.DiskMergedOpsCountID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.DiskMergedOpsCountID, err)
|
||||
}
|
||||
|
||||
dc.mOpsBytes, err = metrics.NewInt64Metric(
|
||||
@@ -125,7 +125,7 @@ func NewDiskCollectorOrDie(diskConfig *ssmtypes.DiskStatsConfig) *diskCollector
|
||||
metrics.Sum,
|
||||
[]string{deviceNameLabel, directionLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.DiskOpsBytesID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.DiskOpsBytesID, err)
|
||||
}
|
||||
|
||||
dc.mOpsTime, err = metrics.NewInt64Metric(
|
||||
@@ -136,7 +136,7 @@ func NewDiskCollectorOrDie(diskConfig *ssmtypes.DiskStatsConfig) *diskCollector
|
||||
metrics.Sum,
|
||||
[]string{deviceNameLabel, directionLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.DiskOpsTimeID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.DiskOpsTimeID, err)
|
||||
}
|
||||
|
||||
dc.mBytesUsed, err = metrics.NewInt64Metric(
|
||||
@@ -147,7 +147,7 @@ func NewDiskCollectorOrDie(diskConfig *ssmtypes.DiskStatsConfig) *diskCollector
|
||||
metrics.LastValue,
|
||||
[]string{deviceNameLabel, fsTypeLabel, mountOptionLabel, stateLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.DiskBytesUsedID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.DiskBytesUsedID, err)
|
||||
}
|
||||
|
||||
dc.lastIOTime = make(map[string]uint64)
|
||||
@@ -247,7 +247,7 @@ func (dc *diskCollector) collect() {
|
||||
|
||||
partitions, err := disk.Partitions(false)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list disk partitions: %v", err)
|
||||
klog.Errorf("Failed to list disk partitions: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -258,7 +258,7 @@ func (dc *diskCollector) collect() {
|
||||
// Fetch metrics from /proc, /sys.
|
||||
ioCountersStats, err := disk.IOCounters(devices...)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve disk IO counters: %v", err)
|
||||
klog.Errorf("Failed to retrieve disk IO counters: %v", err)
|
||||
return
|
||||
}
|
||||
sampleTime := time.Now()
|
||||
@@ -283,7 +283,7 @@ func (dc *diskCollector) collect() {
|
||||
seen[partition.Device] = true
|
||||
usageStat, err := disk.Usage(partition.Mountpoint)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve disk usage for %q: %v", partition.Mountpoint, err)
|
||||
klog.Errorf("Failed to retrieve disk usage for %q: %v", partition.Mountpoint, err)
|
||||
continue
|
||||
}
|
||||
deviceName := strings.TrimPrefix(partition.Device, "/dev/")
|
||||
@@ -306,7 +306,7 @@ func listRootBlockDevices(timeout time.Duration) []string {
|
||||
cmd := exec.CommandContext(ctx, "lsblk", "-d", "-n", "-o", "NAME")
|
||||
stdout, err := cmd.Output()
|
||||
if err != nil {
|
||||
glog.Errorf("Error calling lsblk")
|
||||
klog.Errorf("Error calling lsblk")
|
||||
}
|
||||
return strings.Split(strings.TrimSpace(string(stdout)), "\n")
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@ limitations under the License.
|
||||
package systemstatsmonitor
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/shirou/gopsutil/v3/host"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
ssmtypes "k8s.io/node-problem-detector/pkg/systemstatsmonitor/types"
|
||||
"k8s.io/node-problem-detector/pkg/util"
|
||||
@@ -35,13 +35,13 @@ func NewHostCollectorOrDie(hostConfig *ssmtypes.HostStatsConfig) *hostCollector
|
||||
|
||||
kernelVersion, err := host.KernelVersion()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to retrieve kernel version: %v", err)
|
||||
klog.Fatalf("Failed to retrieve kernel version: %v", err)
|
||||
}
|
||||
hc.tags["kernel_version"] = kernelVersion
|
||||
|
||||
osVersion, err := util.GetOSVersion()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to retrieve OS version: %v", err)
|
||||
klog.Fatalf("Failed to retrieve OS version: %v", err)
|
||||
}
|
||||
hc.tags["os_version"] = osVersion
|
||||
|
||||
@@ -55,7 +55,7 @@ func NewHostCollectorOrDie(hostConfig *ssmtypes.HostStatsConfig) *hostCollector
|
||||
metrics.LastValue,
|
||||
[]string{"kernel_version", "os_version"})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for host/uptime: %v", err)
|
||||
klog.Fatalf("Error initializing metric for host/uptime: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ func (hc *hostCollector) collect() {
|
||||
|
||||
uptime, err := host.Uptime()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve uptime of the host: %v", err)
|
||||
klog.Errorf("Failed to retrieve uptime of the host: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package systemstatsmonitor
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
ssmtypes "k8s.io/node-problem-detector/pkg/systemstatsmonitor/types"
|
||||
"k8s.io/node-problem-detector/pkg/util/metrics"
|
||||
@@ -46,7 +46,7 @@ func NewMemoryCollectorOrDie(memoryConfig *ssmtypes.MemoryStatsConfig) *memoryCo
|
||||
metrics.LastValue,
|
||||
[]string{stateLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.MemoryBytesUsedID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.MemoryBytesUsedID, err)
|
||||
}
|
||||
|
||||
mc.mAnonymousUsed, err = metrics.NewInt64Metric(
|
||||
@@ -57,7 +57,7 @@ func NewMemoryCollectorOrDie(memoryConfig *ssmtypes.MemoryStatsConfig) *memoryCo
|
||||
metrics.LastValue,
|
||||
[]string{stateLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.MemoryAnonymousUsedID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.MemoryAnonymousUsedID, err)
|
||||
}
|
||||
|
||||
mc.mPageCacheUsed, err = metrics.NewInt64Metric(
|
||||
@@ -68,7 +68,7 @@ func NewMemoryCollectorOrDie(memoryConfig *ssmtypes.MemoryStatsConfig) *memoryCo
|
||||
metrics.LastValue,
|
||||
[]string{stateLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.MemoryPageCacheUsedID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.MemoryPageCacheUsedID, err)
|
||||
}
|
||||
|
||||
mc.mUnevictableUsed, err = metrics.NewInt64Metric(
|
||||
@@ -79,7 +79,7 @@ func NewMemoryCollectorOrDie(memoryConfig *ssmtypes.MemoryStatsConfig) *memoryCo
|
||||
metrics.LastValue,
|
||||
[]string{})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.MemoryUnevictableUsedID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.MemoryUnevictableUsedID, err)
|
||||
}
|
||||
|
||||
mc.mDirtyUsed, err = metrics.NewInt64Metric(
|
||||
@@ -90,7 +90,7 @@ func NewMemoryCollectorOrDie(memoryConfig *ssmtypes.MemoryStatsConfig) *memoryCo
|
||||
metrics.LastValue,
|
||||
[]string{stateLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for %q: %v", metrics.MemoryDirtyUsedID, err)
|
||||
klog.Fatalf("Error initializing metric for %q: %v", metrics.MemoryDirtyUsedID, err)
|
||||
}
|
||||
|
||||
return &mc
|
||||
|
||||
@@ -19,8 +19,8 @@ limitations under the License.
|
||||
package systemstatsmonitor
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/procfs"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func (mc *memoryCollector) collect() {
|
||||
@@ -30,12 +30,12 @@ func (mc *memoryCollector) collect() {
|
||||
|
||||
proc, err := procfs.NewDefaultFS()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to find /proc mount point: %v", err)
|
||||
klog.Errorf("Failed to find /proc mount point: %v", err)
|
||||
return
|
||||
}
|
||||
meminfo, err := proc.Meminfo()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve memory stats: %v", err)
|
||||
klog.Errorf("Failed to retrieve memory stats: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package systemstatsmonitor
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/shirou/gopsutil/v3/mem"
|
||||
)
|
||||
@@ -29,7 +29,7 @@ func (mc *memoryCollector) collect() {
|
||||
|
||||
meminfo, err := mem.VirtualMemory()
|
||||
if err != nil {
|
||||
glog.Errorf("cannot get windows memory metrics from GlobalMemoryStatusEx: %v", err)
|
||||
klog.Errorf("cannot get windows memory metrics from GlobalMemoryStatusEx: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
ssmtypes "k8s.io/node-problem-detector/pkg/systemstatsmonitor/types"
|
||||
"k8s.io/node-problem-detector/pkg/util/metrics"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/procfs"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type newInt64MetricFn func(metricID metrics.MetricID, viewName string, description string, unit string, aggregation metrics.Aggregation, tagNames []string) (metrics.Int64MetricInterface, error)
|
||||
@@ -208,12 +208,12 @@ func (nc *netCollector) mustRegisterMetric(metricID metrics.MetricID, descriptio
|
||||
aggregation metrics.Aggregation, exporter func(stat procfs.NetDevLine) int64) {
|
||||
metricConfig, ok := nc.config.MetricsConfigs[string(metricID)]
|
||||
if !ok {
|
||||
glog.Fatalf("Metric config `%q` not found", metricID)
|
||||
klog.Fatalf("Metric config `%q` not found", metricID)
|
||||
}
|
||||
err := nc.recorder.Register(metricID, metricConfig.DisplayName, description, unit,
|
||||
aggregation, []string{interfaceNameLabel}, exporter)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to initialize metric %q: %v", metricID, err)
|
||||
klog.Fatalf("Failed to initialize metric %q: %v", metricID, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,14 +221,14 @@ func (nc *netCollector) recordNetDev() {
|
||||
fs, err := procfs.NewFS(nc.procPath)
|
||||
stats, err := fs.NetDev()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve net dev stat: %v", err)
|
||||
klog.Errorf("Failed to retrieve net dev stat: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
excludeInterfaceRegexp := nc.config.ExcludeInterfaceRegexp.R
|
||||
for iface, ifaceStats := range stats {
|
||||
if excludeInterfaceRegexp != nil && excludeInterfaceRegexp.MatchString(iface) {
|
||||
glog.V(6).Infof("Network interface %s matched exclude regexp %q, skipping recording", iface, excludeInterfaceRegexp)
|
||||
klog.V(6).Infof("Network interface %s matched exclude regexp %q, skipping recording", iface, excludeInterfaceRegexp)
|
||||
continue
|
||||
}
|
||||
tags := map[string]string{}
|
||||
@@ -282,7 +282,7 @@ func (r ifaceStatRecorder) RecordWithSameTags(stat procfs.NetDevLine, tags map[s
|
||||
for metricID, collector := range r.collectors {
|
||||
measurement := collector.exporter(stat)
|
||||
collector.metric.Record(tags, measurement)
|
||||
glog.V(6).Infof("Metric %q record measurement %d with tags %v", metricID, measurement, tags)
|
||||
klog.V(6).Infof("Metric %q record measurement %d with tags %v", metricID, measurement, tags)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
ssmtypes "k8s.io/node-problem-detector/pkg/systemstatsmonitor/types"
|
||||
"k8s.io/node-problem-detector/pkg/util/metrics"
|
||||
"k8s.io/node-problem-detector/pkg/util/metrics/system"
|
||||
@@ -48,7 +48,7 @@ func NewOsFeatureCollectorOrDie(osFeatureConfig *ssmtypes.OSFeatureStatsConfig,
|
||||
metrics.LastValue,
|
||||
[]string{featureLabel, valueLabel})
|
||||
if err != nil {
|
||||
glog.Fatalf("Error initializing metric for system/os_feature: %v", err)
|
||||
klog.Fatalf("Error initializing metric for system/os_feature: %v", err)
|
||||
}
|
||||
}
|
||||
return &oc
|
||||
@@ -104,7 +104,7 @@ func (ofc *osFeatureCollector) recordFeaturesFromModules(modules []system.Module
|
||||
var knownModules []system.Module
|
||||
f, err := os.ReadFile(ofc.config.KnownModulesConfigPath)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to read configuration file %s: %v",
|
||||
klog.Warningf("Failed to read configuration file %s: %v",
|
||||
ofc.config.KnownModulesConfigPath, err)
|
||||
}
|
||||
// When the knownModulesConfigPath is not set
|
||||
@@ -112,7 +112,7 @@ func (ofc *osFeatureCollector) recordFeaturesFromModules(modules []system.Module
|
||||
if f != nil {
|
||||
err = json.Unmarshal(f, &knownModules)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to retrieve known modules %v", err)
|
||||
klog.Warningf("Failed to retrieve known modules %v", err)
|
||||
}
|
||||
} else {
|
||||
knownModules = []system.Module{}
|
||||
@@ -152,12 +152,12 @@ func (ofc *osFeatureCollector) collect() {
|
||||
}
|
||||
cmdlineArgs, err := system.CmdlineArgs(filepath.Join(ofc.procPath, "/cmdline"))
|
||||
if err != nil {
|
||||
glog.Fatalf("Error retrieving cmdline args: %v", err)
|
||||
klog.Fatalf("Error retrieving cmdline args: %v", err)
|
||||
}
|
||||
ofc.recordFeaturesFromCmdline(cmdlineArgs)
|
||||
modules, err := system.Modules(filepath.Join(ofc.procPath, "/modules"))
|
||||
if err != nil {
|
||||
glog.Fatalf("Error retrieving kernel modules: %v", err)
|
||||
klog.Fatalf("Error retrieving kernel modules: %v", err)
|
||||
}
|
||||
ofc.recordFeaturesFromModules(modules)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/problemdaemon"
|
||||
ssmtypes "k8s.io/node-problem-detector/pkg/systemstatsmonitor/types"
|
||||
@@ -60,21 +60,21 @@ func NewSystemStatsMonitorOrDie(configPath string) types.Monitor {
|
||||
// Apply configurations.
|
||||
f, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to read configuration file %q: %v", configPath, err)
|
||||
klog.Fatalf("Failed to read configuration file %q: %v", configPath, err)
|
||||
}
|
||||
err = json.Unmarshal(f, &ssm.config)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to unmarshal configuration file %q: %v", configPath, err)
|
||||
klog.Fatalf("Failed to unmarshal configuration file %q: %v", configPath, err)
|
||||
}
|
||||
|
||||
err = ssm.config.ApplyConfiguration()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to apply configuration for %q: %v", configPath, err)
|
||||
klog.Fatalf("Failed to apply configuration for %q: %v", configPath, err)
|
||||
}
|
||||
|
||||
err = ssm.config.Validate()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to validate %s configuration %+v: %v", ssm.configPath, ssm.config, err)
|
||||
klog.Fatalf("Failed to validate %s configuration %+v: %v", ssm.configPath, ssm.config, err)
|
||||
}
|
||||
|
||||
if len(ssm.config.CPUConfig.MetricsConfigs) > 0 {
|
||||
@@ -105,7 +105,7 @@ func NewSystemStatsMonitorOrDie(configPath string) types.Monitor {
|
||||
}
|
||||
|
||||
func (ssm *systemStatsMonitor) Start() (<-chan *types.Status, error) {
|
||||
glog.Infof("Start system stats monitor %s", ssm.configPath)
|
||||
klog.Infof("Start system stats monitor %s", ssm.configPath)
|
||||
go ssm.monitorLoop()
|
||||
return nil, nil
|
||||
}
|
||||
@@ -118,7 +118,7 @@ func (ssm *systemStatsMonitor) monitorLoop() {
|
||||
|
||||
select {
|
||||
case <-ssm.tomb.Stopping():
|
||||
glog.Infof("System stats monitor stopped: %s", ssm.configPath)
|
||||
klog.Infof("System stats monitor stopped: %s", ssm.configPath)
|
||||
return
|
||||
default:
|
||||
ssm.cpuCollector.collect()
|
||||
@@ -139,13 +139,13 @@ func (ssm *systemStatsMonitor) monitorLoop() {
|
||||
ssm.osFeatureCollector.collect()
|
||||
ssm.netCollector.collect()
|
||||
case <-ssm.tomb.Stopping():
|
||||
glog.Infof("System stats monitor stopped: %s", ssm.configPath)
|
||||
klog.Infof("System stats monitor stopped: %s", ssm.configPath)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ssm *systemStatsMonitor) Stop() {
|
||||
glog.Infof("Stop system stats monitor %s", ssm.configPath)
|
||||
klog.Infof("Stop system stats monitor %s", ssm.configPath)
|
||||
ssm.tomb.Stop()
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ package util
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/node-problem-detector/pkg/types"
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/node-problem-detector/pkg/types"
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ package makers
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -32,6 +32,6 @@ func makeFilesystemError() {
|
||||
msg := []byte("fake filesystem error from problem-maker")
|
||||
err := os.WriteFile(ext4ErrorTrigger, msg, 0200)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed writing log to %q: %v", ext4ErrorTrigger, err)
|
||||
klog.Fatalf("Failed writing log to %q: %v", ext4ErrorTrigger, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -40,7 +40,7 @@ func writeKernelMessageOrDie(msg string) {
|
||||
for _, line := range strings.Split(msg, "\n") {
|
||||
err := os.WriteFile(kmsgPath, []byte(line), 0644)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed writing to %q: %v", kmsgPath, err)
|
||||
klog.Fatalf("Failed writing to %q: %v", kmsgPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,8 +23,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/node-problem-detector/test/e2e/problemmaker/makers"
|
||||
)
|
||||
@@ -54,7 +54,8 @@ func (o *options) AddFlags(fs *pflag.FlagSet) {
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Set glog flag so that it does not log to files.
|
||||
// Set klog flag so that it does not log to files.
|
||||
klog.InitFlags(nil)
|
||||
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||
fmt.Printf("Failed to set logtostderr=true: %v\n", err)
|
||||
os.Exit(1)
|
||||
@@ -65,12 +66,12 @@ func main() {
|
||||
pflag.Parse()
|
||||
|
||||
if o.Problem == "" {
|
||||
glog.Fatalf("Please specify the type of problem to make using the --problem argument.")
|
||||
klog.Fatalf("Please specify the type of problem to make using the --problem argument.")
|
||||
}
|
||||
|
||||
problemGenerator, ok := makers.ProblemGenerators[o.Problem]
|
||||
if !ok {
|
||||
glog.Fatalf("Expected to see a problem type of one of %q, but got %q.",
|
||||
klog.Fatalf("Expected to see a problem type of one of %q, but got %q.",
|
||||
makers.GetProblemTypes(), o.Problem)
|
||||
}
|
||||
|
||||
@@ -89,7 +90,7 @@ func main() {
|
||||
case <-done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
glog.Infof("Generating problem: %q", o.Problem)
|
||||
klog.Infof("Generating problem: %q", o.Problem)
|
||||
problemGenerator()
|
||||
}
|
||||
}
|
||||
|
||||
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
@@ -1,191 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
36
vendor/github.com/golang/glog/README.md
generated
vendored
36
vendor/github.com/golang/glog/README.md
generated
vendored
@@ -1,36 +0,0 @@
|
||||
# glog
|
||||
|
||||
[](https://pkg.go.dev/github.com/golang/glog)
|
||||
|
||||
Leveled execution logs for Go.
|
||||
|
||||
This is an efficient pure Go implementation of leveled logs in the
|
||||
manner of the open source C++ package [_glog_](https://github.com/google/glog).
|
||||
|
||||
By binding methods to booleans it is possible to use the log package without paying the expense of evaluating the arguments to the log. Through the `-vmodule` flag, the package also provides fine-grained
|
||||
control over logging at the file level.
|
||||
|
||||
The comment from `glog.go` introduces the ideas:
|
||||
|
||||
Package _glog_ implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. It provides the functions Info, Warning, Error, Fatal, plus formatting variants such as Infof. It also provides V-style loggingcontrolled by the `-v` and `-vmodule=file=2` flags.
|
||||
|
||||
Basic examples:
|
||||
|
||||
```go
|
||||
glog.Info("Prepare to repel boarders")
|
||||
|
||||
glog.Fatalf("Initialization failed: %s", err)
|
||||
```
|
||||
|
||||
See the documentation for the V function for an explanation of these examples:
|
||||
|
||||
```go
|
||||
if glog.V(2) {
|
||||
glog.Info("Starting transaction...")
|
||||
}
|
||||
glog.V(2).Infoln("Processed", nItems, "elements")
|
||||
```
|
||||
|
||||
The repository contains an open source version of the log package used inside Google. The master copy of the source lives inside Google, not here. The code in this repo is for export only and is not itself under development. Feature requests will be ignored.
|
||||
|
||||
Send bug reports to golang-nuts@googlegroups.com.
|
||||
592
vendor/github.com/golang/glog/glog.go
generated
vendored
592
vendor/github.com/golang/glog/glog.go
generated
vendored
@@ -1,592 +0,0 @@
|
||||
// Go support for leveled logs, analogous to https://github.com/google/glog.
|
||||
//
|
||||
// Copyright 2023 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
|
||||
// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
|
||||
// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
|
||||
//
|
||||
// Basic examples:
|
||||
//
|
||||
// glog.Info("Prepare to repel boarders")
|
||||
//
|
||||
// glog.Fatalf("Initialization failed: %s", err)
|
||||
//
|
||||
// See the documentation for the V function for an explanation of these examples:
|
||||
//
|
||||
// if glog.V(2) {
|
||||
// glog.Info("Starting transaction...")
|
||||
// }
|
||||
//
|
||||
// glog.V(2).Infoln("Processed", nItems, "elements")
|
||||
//
|
||||
// Log output is buffered and written periodically using Flush. Programs
|
||||
// should call Flush before exiting to guarantee all log output is written.
|
||||
//
|
||||
// By default, all log statements write to files in a temporary directory.
|
||||
// This package provides several flags that modify this behavior.
|
||||
// As a result, flag.Parse must be called before any logging is done.
|
||||
//
|
||||
// -logtostderr=false
|
||||
// Logs are written to standard error instead of to files.
|
||||
// -alsologtostderr=false
|
||||
// Logs are written to standard error as well as to files.
|
||||
// -stderrthreshold=ERROR
|
||||
// Log events at or above this severity are logged to standard
|
||||
// error as well as to files.
|
||||
// -log_dir=""
|
||||
// Log files will be written to this directory instead of the
|
||||
// default temporary directory.
|
||||
//
|
||||
// Other flags provide aids to debugging.
|
||||
//
|
||||
// -log_backtrace_at=""
|
||||
// A comma-separated list of file and line numbers holding a logging
|
||||
// statement, such as
|
||||
// -log_backtrace_at=gopherflakes.go:234
|
||||
// A stack trace will be written to the Info log whenever execution
|
||||
// hits one of these statements. (Unlike with -vmodule, the ".go"
|
||||
// must bepresent.)
|
||||
// -v=0
|
||||
// Enable V-leveled logging at the specified level.
|
||||
// -vmodule=""
|
||||
// The syntax of the argument is a comma-separated list of pattern=N,
|
||||
// where pattern is a literal file name (minus the ".go" suffix) or
|
||||
// "glob" pattern and N is a V level. For instance,
|
||||
// -vmodule=gopher*=3
|
||||
// sets the V level to 3 in all Go files whose names begin with "gopher",
|
||||
// and
|
||||
// -vmodule=/path/to/glog/glog_test=1
|
||||
// sets the V level to 1 in the Go file /path/to/glog/glog_test.go.
|
||||
// If a glob pattern contains a slash, it is matched against the full path,
|
||||
// and the file name. Otherwise, the pattern is
|
||||
// matched only against the file's basename. When both -vmodule and -v
|
||||
// are specified, the -vmodule values take precedence for the specified
|
||||
// modules.
|
||||
package glog
|
||||
|
||||
// This file contains the parts of the log package that are shared among all
|
||||
// implementations (file, envelope, and appengine).
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
stdLog "log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog/internal/logsink"
|
||||
"github.com/golang/glog/internal/stackdump"
|
||||
)
|
||||
|
||||
var timeNow = time.Now // Stubbed out for testing.
|
||||
|
||||
// MaxSize is the maximum size of a log file in bytes.
|
||||
var MaxSize uint64 = 1024 * 1024 * 1800
|
||||
|
||||
// ErrNoLog is the error we return if no log file has yet been created
|
||||
// for the specified log type.
|
||||
var ErrNoLog = errors.New("log file not yet created")
|
||||
|
||||
// OutputStats tracks the number of output lines and bytes written.
|
||||
type OutputStats struct {
|
||||
lines int64
|
||||
bytes int64
|
||||
}
|
||||
|
||||
// Lines returns the number of lines written.
|
||||
func (s *OutputStats) Lines() int64 {
|
||||
return atomic.LoadInt64(&s.lines)
|
||||
}
|
||||
|
||||
// Bytes returns the number of bytes written.
|
||||
func (s *OutputStats) Bytes() int64 {
|
||||
return atomic.LoadInt64(&s.bytes)
|
||||
}
|
||||
|
||||
// Stats tracks the number of lines of output and number of bytes
|
||||
// per severity level. Values must be read with atomic.LoadInt64.
|
||||
var Stats struct {
|
||||
Info, Warning, Error OutputStats
|
||||
}
|
||||
|
||||
var severityStats = [...]*OutputStats{
|
||||
logsink.Info: &Stats.Info,
|
||||
logsink.Warning: &Stats.Warning,
|
||||
logsink.Error: &Stats.Error,
|
||||
logsink.Fatal: nil,
|
||||
}
|
||||
|
||||
// Level specifies a level of verbosity for V logs. The -v flag is of type
|
||||
// Level and should be modified only through the flag.Value interface.
|
||||
type Level int32
|
||||
|
||||
var metaPool sync.Pool // Pool of *logsink.Meta.
|
||||
|
||||
// metaPoolGet returns a *logsink.Meta from metaPool as both an interface and a
|
||||
// pointer, allocating a new one if necessary. (Returning the interface value
|
||||
// directly avoids an allocation if there was an existing pointer in the pool.)
|
||||
func metaPoolGet() (any, *logsink.Meta) {
|
||||
if metai := metaPool.Get(); metai != nil {
|
||||
return metai, metai.(*logsink.Meta)
|
||||
}
|
||||
meta := new(logsink.Meta)
|
||||
return meta, meta
|
||||
}
|
||||
|
||||
type stack bool
|
||||
|
||||
const (
|
||||
noStack = stack(false)
|
||||
withStack = stack(true)
|
||||
)
|
||||
|
||||
func appendBacktrace(depth int, format string, args []any) (string, []any) {
|
||||
// Capture a backtrace as a stackdump.Stack (both text and PC slice).
|
||||
// Structured log sinks can extract the backtrace in whichever format they
|
||||
// prefer (PCs or text), and Text sinks will include it as just another part
|
||||
// of the log message.
|
||||
//
|
||||
// Use depth instead of depth+1 so that the backtrace always includes the
|
||||
// log function itself - otherwise the reason for the trace appearing in the
|
||||
// log may not be obvious to the reader.
|
||||
dump := stackdump.Caller(depth)
|
||||
|
||||
// Add an arg and an entry in the format string for the stack dump.
|
||||
//
|
||||
// Copy the "args" slice to avoid a rare but serious aliasing bug
|
||||
// (corrupting the caller's slice if they passed it to a non-Fatal call
|
||||
// using "...").
|
||||
format = format + "\n\n%v\n"
|
||||
args = append(append([]any(nil), args...), dump)
|
||||
|
||||
return format, args
|
||||
}
|
||||
|
||||
// logf writes a log message for a log function call (or log function wrapper)
|
||||
// at the given depth in the current goroutine's stack.
|
||||
func logf(depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) {
|
||||
now := timeNow()
|
||||
_, file, line, ok := runtime.Caller(depth + 1)
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 1
|
||||
}
|
||||
|
||||
if stack == withStack || backtraceAt(file, line) {
|
||||
format, args = appendBacktrace(depth+1, format, args)
|
||||
}
|
||||
|
||||
metai, meta := metaPoolGet()
|
||||
*meta = logsink.Meta{
|
||||
Time: now,
|
||||
File: file,
|
||||
Line: line,
|
||||
Depth: depth + 1,
|
||||
Severity: severity,
|
||||
Verbose: verbose,
|
||||
Thread: int64(pid),
|
||||
}
|
||||
sinkf(meta, format, args...)
|
||||
metaPool.Put(metai)
|
||||
}
|
||||
|
||||
func sinkf(meta *logsink.Meta, format string, args ...any) {
|
||||
meta.Depth++
|
||||
n, err := logsink.Printf(meta, format, args...)
|
||||
if stats := severityStats[meta.Severity]; stats != nil {
|
||||
atomic.AddInt64(&stats.lines, 1)
|
||||
atomic.AddInt64(&stats.bytes, int64(n))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logsink.Printf(meta, "glog: exiting because of error: %s", err)
|
||||
sinks.file.Flush()
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
||||
|
||||
// CopyStandardLogTo arranges for messages written to the Go "log" package's
|
||||
// default logs to also appear in the Google logs for the named and lower
|
||||
// severities. Subsequent changes to the standard log's default output location
|
||||
// or format may break this behavior.
|
||||
//
|
||||
// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
|
||||
// recognized, CopyStandardLogTo panics.
|
||||
func CopyStandardLogTo(name string) {
|
||||
sev, err := logsink.ParseSeverity(name)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("log.CopyStandardLogTo(%q): %v", name, err))
|
||||
}
|
||||
// Set a log format that captures the user's file and line:
|
||||
// d.go:23: message
|
||||
stdLog.SetFlags(stdLog.Lshortfile)
|
||||
stdLog.SetOutput(logBridge(sev))
|
||||
}
|
||||
|
||||
// NewStandardLogger returns a Logger that writes to the Google logs for the
|
||||
// named and lower severities.
|
||||
//
|
||||
// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
|
||||
// recognized, NewStandardLogger panics.
|
||||
func NewStandardLogger(name string) *stdLog.Logger {
|
||||
sev, err := logsink.ParseSeverity(name)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("log.NewStandardLogger(%q): %v", name, err))
|
||||
}
|
||||
return stdLog.New(logBridge(sev), "", stdLog.Lshortfile)
|
||||
}
|
||||
|
||||
// logBridge provides the Write method that enables CopyStandardLogTo to connect
|
||||
// Go's standard logs to the logs provided by this package.
|
||||
type logBridge logsink.Severity
|
||||
|
||||
// Write parses the standard logging line and passes its components to the
|
||||
// logger for severity(lb).
|
||||
func (lb logBridge) Write(b []byte) (n int, err error) {
|
||||
var (
|
||||
file = "???"
|
||||
line = 1
|
||||
text string
|
||||
)
|
||||
// Split "d.go:23: message" into "d.go", "23", and "message".
|
||||
if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
|
||||
text = fmt.Sprintf("bad log format: %s", b)
|
||||
} else {
|
||||
file = string(parts[0])
|
||||
text = string(parts[2][1:]) // skip leading space
|
||||
line, err = strconv.Atoi(string(parts[1]))
|
||||
if err != nil {
|
||||
text = fmt.Sprintf("bad line number: %s", b)
|
||||
line = 1
|
||||
}
|
||||
}
|
||||
|
||||
// The depth below hard-codes details of how stdlog gets here. The alternative would be to walk
|
||||
// up the stack looking for src/log/log.go but that seems like it would be
|
||||
// unfortunately slow.
|
||||
const stdLogDepth = 4
|
||||
|
||||
metai, meta := metaPoolGet()
|
||||
*meta = logsink.Meta{
|
||||
Time: timeNow(),
|
||||
File: file,
|
||||
Line: line,
|
||||
Depth: stdLogDepth,
|
||||
Severity: logsink.Severity(lb),
|
||||
Thread: int64(pid),
|
||||
}
|
||||
|
||||
format := "%s"
|
||||
args := []any{text}
|
||||
if backtraceAt(file, line) {
|
||||
format, args = appendBacktrace(meta.Depth, format, args)
|
||||
}
|
||||
|
||||
sinkf(meta, format, args...)
|
||||
metaPool.Put(metai)
|
||||
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// defaultFormat returns a fmt.Printf format specifier that formats its
|
||||
// arguments as if they were passed to fmt.Print.
|
||||
func defaultFormat(args []any) string {
|
||||
n := len(args)
|
||||
switch n {
|
||||
case 0:
|
||||
return ""
|
||||
case 1:
|
||||
return "%v"
|
||||
}
|
||||
|
||||
b := make([]byte, 0, n*3-1)
|
||||
wasString := true // Suppress leading space.
|
||||
for _, arg := range args {
|
||||
isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String
|
||||
if wasString || isString {
|
||||
b = append(b, "%v"...)
|
||||
} else {
|
||||
b = append(b, " %v"...)
|
||||
}
|
||||
wasString = isString
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// lnFormat returns a fmt.Printf format specifier that formats its arguments
|
||||
// as if they were passed to fmt.Println.
|
||||
func lnFormat(args []any) string {
|
||||
if len(args) == 0 {
|
||||
return "\n"
|
||||
}
|
||||
|
||||
b := make([]byte, 0, len(args)*3)
|
||||
for range args {
|
||||
b = append(b, "%v "...)
|
||||
}
|
||||
b[len(b)-1] = '\n' // Replace the last space with a newline.
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// Verbose is a boolean type that implements Infof (like Printf) etc.
|
||||
// See the documentation of V for more information.
|
||||
type Verbose bool
|
||||
|
||||
// V reports whether verbosity at the call site is at least the requested level.
|
||||
// The returned value is a boolean of type Verbose, which implements Info, Infoln
|
||||
// and Infof. These methods will write to the Info log if called.
|
||||
// Thus, one may write either
|
||||
//
|
||||
// if glog.V(2) { glog.Info("log this") }
|
||||
//
|
||||
// or
|
||||
//
|
||||
// glog.V(2).Info("log this")
|
||||
//
|
||||
// The second form is shorter but the first is cheaper if logging is off because it does
|
||||
// not evaluate its arguments.
|
||||
//
|
||||
// Whether an individual call to V generates a log record depends on the setting of
|
||||
// the -v and --vmodule flags; both are off by default. If the level in the call to
|
||||
// V is at most the value of -v, or of -vmodule for the source file containing the
|
||||
// call, the V call will log.
|
||||
func V(level Level) Verbose {
|
||||
return VDepth(1, level)
|
||||
}
|
||||
|
||||
// VDepth acts as V but uses depth to determine which call frame to check vmodule for.
|
||||
// VDepth(0, level) is the same as V(level).
|
||||
func VDepth(depth int, level Level) Verbose {
|
||||
return Verbose(verboseEnabled(depth+1, level))
|
||||
}
|
||||
|
||||
// Info is equivalent to the global Info function, guarded by the value of v.
|
||||
// See the documentation of V for usage.
|
||||
func (v Verbose) Info(args ...any) {
|
||||
v.InfoDepth(1, args...)
|
||||
}
|
||||
|
||||
// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v.
|
||||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfoDepth(depth int, args ...any) {
|
||||
if v {
|
||||
logf(depth+1, logsink.Info, true, noStack, defaultFormat(args), args...)
|
||||
}
|
||||
}
|
||||
|
||||
// InfoDepthf is equivalent to the global InfoDepthf function, guarded by the value of v.
|
||||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfoDepthf(depth int, format string, args ...any) {
|
||||
if v {
|
||||
logf(depth+1, logsink.Info, true, noStack, format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Infoln is equivalent to the global Infoln function, guarded by the value of v.
|
||||
// See the documentation of V for usage.
|
||||
func (v Verbose) Infoln(args ...any) {
|
||||
if v {
|
||||
logf(1, logsink.Info, true, noStack, lnFormat(args), args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Infof is equivalent to the global Infof function, guarded by the value of v.
|
||||
// See the documentation of V for usage.
|
||||
func (v Verbose) Infof(format string, args ...any) {
|
||||
if v {
|
||||
logf(1, logsink.Info, true, noStack, format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Info logs to the INFO log.
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Info(args ...any) {
|
||||
InfoDepth(1, args...)
|
||||
}
|
||||
|
||||
// InfoDepth calls Info from a different depth in the call stack.
|
||||
// This enables a callee to emit logs that use the callsite information of its caller
|
||||
// or any other callers in the stack. When depth == 0, the original callee's line
|
||||
// information is emitted. When depth > 0, depth frames are skipped in the call stack
|
||||
// and the final frame is treated like the original callee to Info.
|
||||
func InfoDepth(depth int, args ...any) {
|
||||
logf(depth+1, logsink.Info, false, noStack, defaultFormat(args), args...)
|
||||
}
|
||||
|
||||
// InfoDepthf acts as InfoDepth but with format string.
|
||||
func InfoDepthf(depth int, format string, args ...any) {
|
||||
logf(depth+1, logsink.Info, false, noStack, format, args...)
|
||||
}
|
||||
|
||||
// Infoln logs to the INFO log.
|
||||
// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
|
||||
func Infoln(args ...any) {
|
||||
logf(1, logsink.Info, false, noStack, lnFormat(args), args...)
|
||||
}
|
||||
|
||||
// Infof logs to the INFO log.
|
||||
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
|
||||
func Infof(format string, args ...any) {
|
||||
logf(1, logsink.Info, false, noStack, format, args...)
|
||||
}
|
||||
|
||||
// Warning logs to the WARNING and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Warning(args ...any) {
|
||||
WarningDepth(1, args...)
|
||||
}
|
||||
|
||||
// WarningDepth acts as Warning but uses depth to determine which call frame to log.
|
||||
// WarningDepth(0, "msg") is the same as Warning("msg").
|
||||
func WarningDepth(depth int, args ...any) {
|
||||
logf(depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...)
|
||||
}
|
||||
|
||||
// WarningDepthf acts as Warningf but uses depth to determine which call frame to log.
|
||||
// WarningDepthf(0, "msg") is the same as Warningf("msg").
|
||||
func WarningDepthf(depth int, format string, args ...any) {
|
||||
logf(depth+1, logsink.Warning, false, noStack, format, args...)
|
||||
}
|
||||
|
||||
// Warningln logs to the WARNING and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
|
||||
func Warningln(args ...any) {
|
||||
logf(1, logsink.Warning, false, noStack, lnFormat(args), args...)
|
||||
}
|
||||
|
||||
// Warningf logs to the WARNING and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
|
||||
func Warningf(format string, args ...any) {
|
||||
logf(1, logsink.Warning, false, noStack, format, args...)
|
||||
}
|
||||
|
||||
// Error logs to the ERROR, WARNING, and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Error(args ...any) {
|
||||
ErrorDepth(1, args...)
|
||||
}
|
||||
|
||||
// ErrorDepth acts as Error but uses depth to determine which call frame to log.
|
||||
// ErrorDepth(0, "msg") is the same as Error("msg").
|
||||
func ErrorDepth(depth int, args ...any) {
|
||||
logf(depth+1, logsink.Error, false, noStack, defaultFormat(args), args...)
|
||||
}
|
||||
|
||||
// ErrorDepthf acts as Errorf but uses depth to determine which call frame to log.
|
||||
// ErrorDepthf(0, "msg") is the same as Errorf("msg").
|
||||
func ErrorDepthf(depth int, format string, args ...any) {
|
||||
logf(depth+1, logsink.Error, false, noStack, format, args...)
|
||||
}
|
||||
|
||||
// Errorln logs to the ERROR, WARNING, and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
|
||||
func Errorln(args ...any) {
|
||||
logf(1, logsink.Error, false, noStack, lnFormat(args), args...)
|
||||
}
|
||||
|
||||
// Errorf logs to the ERROR, WARNING, and INFO logs.
|
||||
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
|
||||
func Errorf(format string, args ...any) {
|
||||
logf(1, logsink.Error, false, noStack, format, args...)
|
||||
}
|
||||
|
||||
func fatalf(depth int, format string, args ...any) {
|
||||
logf(depth+1, logsink.Fatal, false, withStack, format, args...)
|
||||
sinks.file.Flush()
|
||||
|
||||
err := abortProcess() // Should not return.
|
||||
|
||||
// Failed to abort the process using signals. Dump a stack trace and exit.
|
||||
Errorf("abortProcess returned unexpectedly: %v", err)
|
||||
sinks.file.Flush()
|
||||
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
|
||||
os.Exit(2) // Exit with the same code as the default SIGABRT handler.
|
||||
}
|
||||
|
||||
// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||
// including a stack trace of all running goroutines, then calls os.Exit(2).
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Fatal(args ...any) {
|
||||
FatalDepth(1, args...)
|
||||
}
|
||||
|
||||
// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
|
||||
// FatalDepth(0, "msg") is the same as Fatal("msg").
|
||||
func FatalDepth(depth int, args ...any) {
|
||||
fatalf(depth+1, defaultFormat(args), args...)
|
||||
}
|
||||
|
||||
// FatalDepthf acts as Fatalf but uses depth to determine which call frame to log.
|
||||
// FatalDepthf(0, "msg") is the same as Fatalf("msg").
|
||||
func FatalDepthf(depth int, format string, args ...any) {
|
||||
fatalf(depth+1, format, args...)
|
||||
}
|
||||
|
||||
// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||
// including a stack trace of all running goroutines, then calls os.Exit(2).
|
||||
// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
|
||||
func Fatalln(args ...any) {
|
||||
fatalf(1, lnFormat(args), args...)
|
||||
}
|
||||
|
||||
// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||
// including a stack trace of all running goroutines, then calls os.Exit(2).
|
||||
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
|
||||
func Fatalf(format string, args ...any) {
|
||||
fatalf(1, format, args...)
|
||||
}
|
||||
|
||||
func exitf(depth int, format string, args ...any) {
|
||||
logf(depth+1, logsink.Fatal, false, noStack, format, args...)
|
||||
sinks.file.Flush()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Exit(args ...any) {
|
||||
ExitDepth(1, args...)
|
||||
}
|
||||
|
||||
// ExitDepth acts as Exit but uses depth to determine which call frame to log.
|
||||
// ExitDepth(0, "msg") is the same as Exit("msg").
|
||||
func ExitDepth(depth int, args ...any) {
|
||||
exitf(depth+1, defaultFormat(args), args...)
|
||||
}
|
||||
|
||||
// ExitDepthf acts as Exitf but uses depth to determine which call frame to log.
|
||||
// ExitDepthf(0, "msg") is the same as Exitf("msg").
|
||||
func ExitDepthf(depth int, format string, args ...any) {
|
||||
exitf(depth+1, format, args...)
|
||||
}
|
||||
|
||||
// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
|
||||
func Exitln(args ...any) {
|
||||
exitf(1, lnFormat(args), args...)
|
||||
}
|
||||
|
||||
// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
|
||||
// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
|
||||
func Exitf(format string, args ...any) {
|
||||
exitf(1, format, args...)
|
||||
}
|
||||
413
vendor/github.com/golang/glog/glog_file.go
generated
vendored
413
vendor/github.com/golang/glog/glog_file.go
generated
vendored
@@ -1,413 +0,0 @@
|
||||
// Go support for leveled logs, analogous to https://github.com/google/glog.
|
||||
//
|
||||
// Copyright 2023 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// File I/O for logs.
|
||||
|
||||
package glog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog/internal/logsink"
|
||||
)
|
||||
|
||||
// logDirs lists the candidate directories for new log files.
|
||||
var logDirs []string
|
||||
|
||||
var (
|
||||
// If non-empty, overrides the choice of directory in which to write logs.
|
||||
// See createLogDirs for the full list of possible destinations.
|
||||
logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
|
||||
logLink = flag.String("log_link", "", "If non-empty, add symbolic links in this directory to the log files")
|
||||
logBufLevel = flag.Int("logbuflevel", int(logsink.Info), "Buffer log messages logged at this level or lower"+
|
||||
" (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.")
|
||||
)
|
||||
|
||||
func createLogDirs() {
|
||||
if *logDir != "" {
|
||||
logDirs = append(logDirs, *logDir)
|
||||
}
|
||||
logDirs = append(logDirs, os.TempDir())
|
||||
}
|
||||
|
||||
var (
|
||||
pid = os.Getpid()
|
||||
program = filepath.Base(os.Args[0])
|
||||
host = "unknownhost"
|
||||
userName = "unknownuser"
|
||||
)
|
||||
|
||||
func init() {
|
||||
h, err := os.Hostname()
|
||||
if err == nil {
|
||||
host = shortHostname(h)
|
||||
}
|
||||
|
||||
current, err := user.Current()
|
||||
if err == nil {
|
||||
userName = current.Username
|
||||
}
|
||||
// Sanitize userName since it is used to construct file paths.
|
||||
userName = strings.Map(func(r rune) rune {
|
||||
switch {
|
||||
case r >= 'a' && r <= 'z':
|
||||
case r >= 'A' && r <= 'Z':
|
||||
case r >= '0' && r <= '9':
|
||||
default:
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}, userName)
|
||||
}
|
||||
|
||||
// shortHostname returns its argument, truncating at the first period.
|
||||
// For instance, given "www.google.com" it returns "www".
|
||||
func shortHostname(hostname string) string {
|
||||
if i := strings.Index(hostname, "."); i >= 0 {
|
||||
return hostname[:i]
|
||||
}
|
||||
return hostname
|
||||
}
|
||||
|
||||
// logName returns a new log file name containing tag, with start time t, and
|
||||
// the name for the symlink for tag.
|
||||
func logName(tag string, t time.Time) (name, link string) {
|
||||
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
|
||||
program,
|
||||
host,
|
||||
userName,
|
||||
tag,
|
||||
t.Year(),
|
||||
t.Month(),
|
||||
t.Day(),
|
||||
t.Hour(),
|
||||
t.Minute(),
|
||||
t.Second(),
|
||||
pid)
|
||||
return name, program + "." + tag
|
||||
}
|
||||
|
||||
var onceLogDirs sync.Once
|
||||
|
||||
// create creates a new log file and returns the file and its filename, which
|
||||
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
|
||||
// successfully, create also attempts to update the symlink for that tag, ignoring
|
||||
// errors.
|
||||
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
||||
onceLogDirs.Do(createLogDirs)
|
||||
if len(logDirs) == 0 {
|
||||
return nil, "", errors.New("log: no log dirs")
|
||||
}
|
||||
name, link := logName(tag, t)
|
||||
var lastErr error
|
||||
for _, dir := range logDirs {
|
||||
fname := filepath.Join(dir, name)
|
||||
f, err := os.Create(fname)
|
||||
if err == nil {
|
||||
symlink := filepath.Join(dir, link)
|
||||
os.Remove(symlink) // ignore err
|
||||
os.Symlink(name, symlink) // ignore err
|
||||
if *logLink != "" {
|
||||
lsymlink := filepath.Join(*logLink, link)
|
||||
os.Remove(lsymlink) // ignore err
|
||||
os.Symlink(fname, lsymlink) // ignore err
|
||||
}
|
||||
return f, fname, nil
|
||||
}
|
||||
lastErr = err
|
||||
}
|
||||
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
|
||||
}
|
||||
|
||||
// flushSyncWriter is the interface satisfied by logging destinations.
|
||||
type flushSyncWriter interface {
|
||||
Flush() error
|
||||
Sync() error
|
||||
io.Writer
|
||||
filenames() []string
|
||||
}
|
||||
|
||||
var sinks struct {
|
||||
stderr stderrSink
|
||||
file fileSink
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Register stderr first: that way if we crash during file-writing at least
|
||||
// the log will have gone somewhere.
|
||||
logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file)
|
||||
|
||||
sinks.file.flushChan = make(chan logsink.Severity, 1)
|
||||
go sinks.file.flushDaemon()
|
||||
}
|
||||
|
||||
// stderrSink is a logsink.Text that writes log entries to stderr
|
||||
// if they meet certain conditions.
|
||||
type stderrSink struct {
|
||||
mu sync.Mutex
|
||||
w io.Writer // if nil Emit uses os.Stderr directly
|
||||
}
|
||||
|
||||
// Enabled implements logsink.Text.Enabled. It returns true if any of the
|
||||
// various stderr flags are enabled for logs of the given severity, if the log
|
||||
// message is from the standard "log" package, or if google.Init has not yet run
|
||||
// (and hence file logging is not yet initialized).
|
||||
func (s *stderrSink) Enabled(m *logsink.Meta) bool {
|
||||
return toStderr || alsoToStderr || m.Severity >= stderrThreshold.get()
|
||||
}
|
||||
|
||||
// Emit implements logsink.Text.Emit.
|
||||
func (s *stderrSink) Emit(m *logsink.Meta, data []byte) (n int, err error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
w := s.w
|
||||
if w == nil {
|
||||
w = os.Stderr
|
||||
}
|
||||
dn, err := w.Write(data)
|
||||
n += dn
|
||||
return n, err
|
||||
}
|
||||
|
||||
// severityWriters is an array of flushSyncWriter with a value for each
|
||||
// logsink.Severity.
|
||||
type severityWriters [4]flushSyncWriter
|
||||
|
||||
// fileSink is a logsink.Text that prints to a set of Google log files.
|
||||
type fileSink struct {
|
||||
mu sync.Mutex
|
||||
// file holds writer for each of the log types.
|
||||
file severityWriters
|
||||
flushChan chan logsink.Severity
|
||||
}
|
||||
|
||||
// Enabled implements logsink.Text.Enabled. It returns true if google.Init
|
||||
// has run and both --disable_log_to_disk and --logtostderr are false.
|
||||
func (s *fileSink) Enabled(m *logsink.Meta) bool {
|
||||
return !toStderr
|
||||
}
|
||||
|
||||
// Emit implements logsink.Text.Emit
|
||||
func (s *fileSink) Emit(m *logsink.Meta, data []byte) (n int, err error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if err = s.createMissingFiles(m.Severity); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for sev := m.Severity; sev >= logsink.Info; sev-- {
|
||||
if _, fErr := s.file[sev].Write(data); fErr != nil && err == nil {
|
||||
err = fErr // Take the first error.
|
||||
}
|
||||
}
|
||||
n = len(data)
|
||||
if int(m.Severity) > *logBufLevel {
|
||||
select {
|
||||
case s.flushChan <- m.Severity:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
|
||||
// file's Sync method and providing a wrapper for the Write method that provides log
|
||||
// file rotation. There are conflicting methods, so the file cannot be embedded.
|
||||
// s.mu is held for all its methods.
|
||||
type syncBuffer struct {
|
||||
sink *fileSink
|
||||
*bufio.Writer
|
||||
file *os.File
|
||||
names []string
|
||||
sev logsink.Severity
|
||||
nbytes uint64 // The number of bytes written to this file
|
||||
}
|
||||
|
||||
func (sb *syncBuffer) Sync() error {
|
||||
return sb.file.Sync()
|
||||
}
|
||||
|
||||
func (sb *syncBuffer) Write(p []byte) (n int, err error) {
|
||||
if sb.nbytes+uint64(len(p)) >= MaxSize {
|
||||
if err := sb.rotateFile(time.Now()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
n, err = sb.Writer.Write(p)
|
||||
sb.nbytes += uint64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (sb *syncBuffer) filenames() []string {
|
||||
return sb.names
|
||||
}
|
||||
|
||||
const footer = "\nCONTINUED IN NEXT FILE\n"
|
||||
|
||||
// rotateFile closes the syncBuffer's file and starts a new one.
|
||||
func (sb *syncBuffer) rotateFile(now time.Time) error {
|
||||
var err error
|
||||
pn := "<none>"
|
||||
file, name, err := create(sb.sev.String(), now)
|
||||
|
||||
if sb.file != nil {
|
||||
// The current log file becomes the previous log at the end of
|
||||
// this block, so save its name for use in the header of the next
|
||||
// file.
|
||||
pn = sb.file.Name()
|
||||
sb.Flush()
|
||||
// If there's an existing file, write a footer with the name of
|
||||
// the next file in the chain, followed by the constant string
|
||||
// \nCONTINUED IN NEXT FILE\n to make continuation detection simple.
|
||||
sb.file.Write([]byte("Next log: "))
|
||||
sb.file.Write([]byte(name))
|
||||
sb.file.Write([]byte(footer))
|
||||
sb.file.Close()
|
||||
}
|
||||
|
||||
sb.file = file
|
||||
sb.names = append(sb.names, name)
|
||||
sb.nbytes = 0
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
|
||||
|
||||
// Write header.
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
|
||||
fmt.Fprintf(&buf, "Running on machine: %s\n", host)
|
||||
fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
fmt.Fprintf(&buf, "Previous log: %s\n", pn)
|
||||
fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
|
||||
n, err := sb.file.Write(buf.Bytes())
|
||||
sb.nbytes += uint64(n)
|
||||
return err
|
||||
}
|
||||
|
||||
// bufferSize sizes the buffer associated with each log file. It's large
|
||||
// so that log records can accumulate without the logging thread blocking
|
||||
// on disk I/O. The flushDaemon will block instead.
|
||||
const bufferSize = 256 * 1024
|
||||
|
||||
// createMissingFiles creates all the log files for severity from infoLog up to
|
||||
// upTo that have not already been created.
|
||||
// s.mu is held.
|
||||
func (s *fileSink) createMissingFiles(upTo logsink.Severity) error {
|
||||
if s.file[upTo] != nil {
|
||||
return nil
|
||||
}
|
||||
now := time.Now()
|
||||
// Files are created in increasing severity order, so we can be assured that
|
||||
// if a high severity logfile exists, then so do all of lower severity.
|
||||
for sev := logsink.Info; sev <= upTo; sev++ {
|
||||
if s.file[sev] != nil {
|
||||
continue
|
||||
}
|
||||
sb := &syncBuffer{
|
||||
sink: s,
|
||||
sev: sev,
|
||||
}
|
||||
if err := sb.rotateFile(now); err != nil {
|
||||
return err
|
||||
}
|
||||
s.file[sev] = sb
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// flushDaemon periodically flushes the log file buffers.
|
||||
func (s *fileSink) flushDaemon() {
|
||||
tick := time.NewTicker(30 * time.Second)
|
||||
defer tick.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
s.Flush()
|
||||
case sev := <-s.flushChan:
|
||||
s.flush(sev)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flush flushes all pending log I/O.
|
||||
func Flush() {
|
||||
sinks.file.Flush()
|
||||
}
|
||||
|
||||
// Flush flushes all the logs and attempts to "sync" their data to disk.
|
||||
func (s *fileSink) Flush() error {
|
||||
return s.flush(logsink.Info)
|
||||
}
|
||||
|
||||
// flush flushes all logs of severity threshold or greater.
|
||||
func (s *fileSink) flush(threshold logsink.Severity) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
var firstErr error
|
||||
updateErr := func(err error) {
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
|
||||
// Flush from fatal down, in case there's trouble flushing.
|
||||
for sev := logsink.Fatal; sev >= threshold; sev-- {
|
||||
file := s.file[sev]
|
||||
if file != nil {
|
||||
updateErr(file.Flush())
|
||||
updateErr(file.Sync())
|
||||
}
|
||||
}
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
// Names returns the names of the log files holding the FATAL, ERROR,
|
||||
// WARNING, or INFO logs. Returns ErrNoLog if the log for the given
|
||||
// level doesn't exist (e.g. because no messages of that level have been
|
||||
// written). This may return multiple names if the log type requested
|
||||
// has rolled over.
|
||||
func Names(s string) ([]string, error) {
|
||||
severity, err := logsink.ParseSeverity(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sinks.file.mu.Lock()
|
||||
defer sinks.file.mu.Unlock()
|
||||
f := sinks.file.file[severity]
|
||||
if f == nil {
|
||||
return nil, ErrNoLog
|
||||
}
|
||||
|
||||
return f.filenames(), nil
|
||||
}
|
||||
39
vendor/github.com/golang/glog/glog_file_linux.go
generated
vendored
39
vendor/github.com/golang/glog/glog_file_linux.go
generated
vendored
@@ -1,39 +0,0 @@
|
||||
// Go support for leveled logs, analogous to https://github.com/google/glog.
|
||||
//
|
||||
// Copyright 2023 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
|
||||
package glog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// abortProcess attempts to kill the current process in a way that will dump the
|
||||
// currently-running goroutines someplace useful (like stderr).
|
||||
//
|
||||
// It does this by sending SIGABRT to the current thread.
|
||||
//
|
||||
// If successful, abortProcess does not return.
|
||||
func abortProcess() error {
|
||||
runtime.LockOSThread()
|
||||
if err := syscall.Tgkill(syscall.Getpid(), syscall.Gettid(), syscall.SIGABRT); err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.New("log: killed current thread with SIGABRT, but still running")
|
||||
}
|
||||
30
vendor/github.com/golang/glog/glog_file_other.go
generated
vendored
30
vendor/github.com/golang/glog/glog_file_other.go
generated
vendored
@@ -1,30 +0,0 @@
|
||||
// Go support for leveled logs, analogous to https://github.com/google/glog.
|
||||
//
|
||||
// Copyright 2023 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !(unix || windows)
|
||||
|
||||
package glog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// abortProcess returns an error on platforms that presumably don't support signals.
|
||||
func abortProcess() error {
|
||||
return fmt.Errorf("not sending SIGABRT (%s/%s does not support signals), falling back", runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
}
|
||||
53
vendor/github.com/golang/glog/glog_file_posix.go
generated
vendored
53
vendor/github.com/golang/glog/glog_file_posix.go
generated
vendored
@@ -1,53 +0,0 @@
|
||||
// Go support for leveled logs, analogous to https://github.com/google/glog.
|
||||
//
|
||||
// Copyright 2023 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build (unix || windows) && !linux
|
||||
|
||||
package glog
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// abortProcess attempts to kill the current process in a way that will dump the
|
||||
// currently-running goroutines someplace useful (like stderr).
|
||||
//
|
||||
// It does this by sending SIGABRT to the current process. Unfortunately, the
|
||||
// signal may or may not be delivered to the current thread; in order to do that
|
||||
// portably, we would need to add a cgo dependency and call pthread_kill.
|
||||
//
|
||||
// If successful, abortProcess does not return.
|
||||
func abortProcess() error {
|
||||
p, err := os.FindProcess(os.Getpid())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.Signal(syscall.SIGABRT); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sent the signal. Now we wait for it to arrive and any SIGABRT handlers to
|
||||
// run (and eventually terminate the process themselves).
|
||||
//
|
||||
// We could just "select{}" here, but there's an outside chance that would
|
||||
// trigger the runtime's deadlock detector if there happen not to be any
|
||||
// background goroutines running. So we'll sleep a while first to give
|
||||
// the signal some time.
|
||||
time.Sleep(10 * time.Second)
|
||||
select {}
|
||||
}
|
||||
398
vendor/github.com/golang/glog/glog_flags.go
generated
vendored
398
vendor/github.com/golang/glog/glog_flags.go
generated
vendored
@@ -1,398 +0,0 @@
|
||||
// Go support for leveled logs, analogous to https://github.com/google/glog.
|
||||
//
|
||||
// Copyright 2023 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package glog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/golang/glog/internal/logsink"
|
||||
)
|
||||
|
||||
// modulePat contains a filter for the -vmodule flag.
|
||||
// It holds a verbosity level and a file pattern to match.
|
||||
type modulePat struct {
|
||||
pattern string
|
||||
literal bool // The pattern is a literal string
|
||||
full bool // The pattern wants to match the full path
|
||||
level Level
|
||||
}
|
||||
|
||||
// match reports whether the file matches the pattern. It uses a string
|
||||
// comparison if the pattern contains no metacharacters.
|
||||
func (m *modulePat) match(full, file string) bool {
|
||||
if m.literal {
|
||||
if m.full {
|
||||
return full == m.pattern
|
||||
}
|
||||
return file == m.pattern
|
||||
}
|
||||
if m.full {
|
||||
match, _ := filepath.Match(m.pattern, full)
|
||||
return match
|
||||
}
|
||||
match, _ := filepath.Match(m.pattern, file)
|
||||
return match
|
||||
}
|
||||
|
||||
// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
|
||||
// that require filepath.Match to be called to match the pattern.
|
||||
func isLiteral(pattern string) bool {
|
||||
return !strings.ContainsAny(pattern, `\*?[]`)
|
||||
}
|
||||
|
||||
// isFull reports whether the pattern matches the full file path, that is,
|
||||
// whether it contains /.
|
||||
func isFull(pattern string) bool {
|
||||
return strings.ContainsRune(pattern, '/')
|
||||
}
|
||||
|
||||
// verboseFlags represents the setting of the -v and -vmodule flags.
|
||||
type verboseFlags struct {
|
||||
// moduleLevelCache is a sync.Map storing the -vmodule Level for each V()
|
||||
// call site, identified by PC. If there is no matching -vmodule filter,
|
||||
// the cached value is exactly v. moduleLevelCache is replaced with a new
|
||||
// Map whenever the -vmodule or -v flag changes state.
|
||||
moduleLevelCache atomic.Value
|
||||
|
||||
// mu guards all fields below.
|
||||
mu sync.Mutex
|
||||
|
||||
// v stores the value of the -v flag. It may be read safely using
|
||||
// sync.LoadInt32, but is only modified under mu.
|
||||
v Level
|
||||
|
||||
// module stores the parsed -vmodule flag.
|
||||
module []modulePat
|
||||
|
||||
// moduleLength caches len(module). If greater than zero, it
|
||||
// means vmodule is enabled. It may be read safely using sync.LoadInt32, but
|
||||
// is only modified under mu.
|
||||
moduleLength int32
|
||||
}
|
||||
|
||||
// NOTE: For compatibility with the open-sourced v1 version of this
|
||||
// package (github.com/golang/glog) we need to retain that flag.Level
|
||||
// implements the flag.Value interface. See also go/log-vs-glog.
|
||||
|
||||
// String is part of the flag.Value interface.
|
||||
func (l *Level) String() string {
|
||||
return strconv.FormatInt(int64(l.Get().(Level)), 10)
|
||||
}
|
||||
|
||||
// Get is part of the flag.Value interface.
|
||||
func (l *Level) Get() any {
|
||||
if l == &vflags.v {
|
||||
// l is the value registered for the -v flag.
|
||||
return Level(atomic.LoadInt32((*int32)(l)))
|
||||
}
|
||||
return *l
|
||||
}
|
||||
|
||||
// Set is part of the flag.Value interface.
|
||||
func (l *Level) Set(value string) error {
|
||||
v, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if l == &vflags.v {
|
||||
// l is the value registered for the -v flag.
|
||||
vflags.mu.Lock()
|
||||
defer vflags.mu.Unlock()
|
||||
vflags.moduleLevelCache.Store(&sync.Map{})
|
||||
atomic.StoreInt32((*int32)(l), int32(v))
|
||||
return nil
|
||||
}
|
||||
*l = Level(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// vModuleFlag is the flag.Value for the --vmodule flag.
|
||||
type vModuleFlag struct{ *verboseFlags }
|
||||
|
||||
func (f vModuleFlag) String() string {
|
||||
// Do not panic on the zero value.
|
||||
// https://groups.google.com/g/golang-nuts/c/Atlr8uAjn6U/m/iId17Td5BQAJ.
|
||||
if f.verboseFlags == nil {
|
||||
return ""
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
var b bytes.Buffer
|
||||
for i, f := range f.module {
|
||||
if i > 0 {
|
||||
b.WriteRune(',')
|
||||
}
|
||||
fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Get returns nil for this flag type since the struct is not exported.
|
||||
func (f vModuleFlag) Get() any { return nil }
|
||||
|
||||
var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
|
||||
|
||||
// Syntax: -vmodule=recordio=2,foo/bar/baz=1,gfs*=3
|
||||
func (f vModuleFlag) Set(value string) error {
|
||||
var filter []modulePat
|
||||
for _, pat := range strings.Split(value, ",") {
|
||||
if len(pat) == 0 {
|
||||
// Empty strings such as from a trailing comma can be ignored.
|
||||
continue
|
||||
}
|
||||
patLev := strings.Split(pat, "=")
|
||||
if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
|
||||
return errVmoduleSyntax
|
||||
}
|
||||
pattern := patLev[0]
|
||||
v, err := strconv.Atoi(patLev[1])
|
||||
if err != nil {
|
||||
return errors.New("syntax error: expect comma-separated list of filename=N")
|
||||
}
|
||||
// TODO: check syntax of filter?
|
||||
filter = append(filter, modulePat{pattern, isLiteral(pattern), isFull(pattern), Level(v)})
|
||||
}
|
||||
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.module = filter
|
||||
atomic.StoreInt32((*int32)(&f.moduleLength), int32(len(f.module)))
|
||||
f.moduleLevelCache.Store(&sync.Map{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *verboseFlags) levelForPC(pc uintptr) Level {
|
||||
if level, ok := f.moduleLevelCache.Load().(*sync.Map).Load(pc); ok {
|
||||
return level.(Level)
|
||||
}
|
||||
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
level := Level(f.v)
|
||||
fn := runtime.FuncForPC(pc)
|
||||
file, _ := fn.FileLine(pc)
|
||||
// The file is something like /a/b/c/d.go. We want just the d for
|
||||
// regular matches, /a/b/c/d for full matches.
|
||||
file = strings.TrimSuffix(file, ".go")
|
||||
full := file
|
||||
if slash := strings.LastIndex(file, "/"); slash >= 0 {
|
||||
file = file[slash+1:]
|
||||
}
|
||||
for _, filter := range f.module {
|
||||
if filter.match(full, file) {
|
||||
level = filter.level
|
||||
break // Use the first matching level.
|
||||
}
|
||||
}
|
||||
f.moduleLevelCache.Load().(*sync.Map).Store(pc, level)
|
||||
return level
|
||||
}
|
||||
|
||||
func (f *verboseFlags) enabled(callerDepth int, level Level) bool {
|
||||
if atomic.LoadInt32(&f.moduleLength) == 0 {
|
||||
// No vmodule values specified, so compare against v level.
|
||||
return Level(atomic.LoadInt32((*int32)(&f.v))) >= level
|
||||
}
|
||||
|
||||
pcs := [1]uintptr{}
|
||||
if runtime.Callers(callerDepth+2, pcs[:]) < 1 {
|
||||
return false
|
||||
}
|
||||
frame, _ := runtime.CallersFrames(pcs[:]).Next()
|
||||
return f.levelForPC(frame.Entry) >= level
|
||||
}
|
||||
|
||||
// traceLocation represents an entry in the -log_backtrace_at flag.
|
||||
type traceLocation struct {
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
var errTraceSyntax = errors.New("syntax error: expect file.go:234")
|
||||
|
||||
func parseTraceLocation(value string) (traceLocation, error) {
|
||||
fields := strings.Split(value, ":")
|
||||
if len(fields) != 2 {
|
||||
return traceLocation{}, errTraceSyntax
|
||||
}
|
||||
file, lineStr := fields[0], fields[1]
|
||||
if !strings.Contains(file, ".") {
|
||||
return traceLocation{}, errTraceSyntax
|
||||
}
|
||||
line, err := strconv.Atoi(lineStr)
|
||||
if err != nil {
|
||||
return traceLocation{}, errTraceSyntax
|
||||
}
|
||||
if line < 0 {
|
||||
return traceLocation{}, errors.New("negative value for line")
|
||||
}
|
||||
return traceLocation{file, line}, nil
|
||||
}
|
||||
|
||||
// match reports whether the specified file and line matches the trace location.
|
||||
// The argument file name is the full path, not the basename specified in the flag.
|
||||
func (t traceLocation) match(file string, line int) bool {
|
||||
if t.line != line {
|
||||
return false
|
||||
}
|
||||
if i := strings.LastIndex(file, "/"); i >= 0 {
|
||||
file = file[i+1:]
|
||||
}
|
||||
return t.file == file
|
||||
}
|
||||
|
||||
func (t traceLocation) String() string {
|
||||
return fmt.Sprintf("%s:%d", t.file, t.line)
|
||||
}
|
||||
|
||||
// traceLocations represents the -log_backtrace_at flag.
|
||||
// Syntax: -log_backtrace_at=recordio.go:234,sstable.go:456
|
||||
// Note that unlike vmodule the file extension is included here.
|
||||
type traceLocations struct {
|
||||
mu sync.Mutex
|
||||
locsLen int32 // Safe for atomic read without mu.
|
||||
locs []traceLocation
|
||||
}
|
||||
|
||||
func (t *traceLocations) String() string {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
var buf bytes.Buffer
|
||||
for i, tl := range t.locs {
|
||||
if i > 0 {
|
||||
buf.WriteString(",")
|
||||
}
|
||||
buf.WriteString(tl.String())
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Get always returns nil for this flag type since the struct is not exported
|
||||
func (t *traceLocations) Get() any { return nil }
|
||||
|
||||
func (t *traceLocations) Set(value string) error {
|
||||
var locs []traceLocation
|
||||
for _, s := range strings.Split(value, ",") {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
loc, err := parseTraceLocation(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
locs = append(locs, loc)
|
||||
}
|
||||
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
atomic.StoreInt32(&t.locsLen, int32(len(locs)))
|
||||
t.locs = locs
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *traceLocations) match(file string, line int) bool {
|
||||
if atomic.LoadInt32(&t.locsLen) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
for _, tl := range t.locs {
|
||||
if tl.match(file, line) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// severityFlag is an atomic flag.Value implementation for logsink.Severity.
|
||||
type severityFlag int32
|
||||
|
||||
func (s *severityFlag) get() logsink.Severity {
|
||||
return logsink.Severity(atomic.LoadInt32((*int32)(s)))
|
||||
}
|
||||
func (s *severityFlag) String() string { return strconv.FormatInt(int64(*s), 10) }
|
||||
func (s *severityFlag) Get() any { return s.get() }
|
||||
func (s *severityFlag) Set(value string) error {
|
||||
threshold, err := logsink.ParseSeverity(value)
|
||||
if err != nil {
|
||||
// Not a severity name. Try a raw number.
|
||||
v, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
threshold = logsink.Severity(v)
|
||||
if threshold < logsink.Info || threshold > logsink.Fatal {
|
||||
return fmt.Errorf("Severity %d out of range (min %d, max %d).", v, logsink.Info, logsink.Fatal)
|
||||
}
|
||||
}
|
||||
atomic.StoreInt32((*int32)(s), int32(threshold))
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
vflags verboseFlags // The -v and -vmodule flags.
|
||||
|
||||
logBacktraceAt traceLocations // The -log_backtrace_at flag.
|
||||
|
||||
// Boolean flags. Not handled atomically because the flag.Value interface
|
||||
// does not let us avoid the =true, and that shorthand is necessary for
|
||||
// compatibility. TODO: does this matter enough to fix? Seems unlikely.
|
||||
toStderr bool // The -logtostderr flag.
|
||||
alsoToStderr bool // The -alsologtostderr flag.
|
||||
|
||||
stderrThreshold severityFlag // The -stderrthreshold flag.
|
||||
)
|
||||
|
||||
// verboseEnabled returns whether the caller at the given depth should emit
|
||||
// verbose logs at the given level, with depth 0 identifying the caller of
|
||||
// verboseEnabled.
|
||||
func verboseEnabled(callerDepth int, level Level) bool {
|
||||
return vflags.enabled(callerDepth+1, level)
|
||||
}
|
||||
|
||||
// backtraceAt returns whether the logging call at the given function and line
|
||||
// should also emit a backtrace of the current call stack.
|
||||
func backtraceAt(file string, line int) bool {
|
||||
return logBacktraceAt.match(file, line)
|
||||
}
|
||||
|
||||
func init() {
|
||||
vflags.moduleLevelCache.Store(&sync.Map{})
|
||||
|
||||
flag.Var(&vflags.v, "v", "log level for V logs")
|
||||
flag.Var(vModuleFlag{&vflags}, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
|
||||
|
||||
flag.Var(&logBacktraceAt, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
|
||||
|
||||
stderrThreshold = severityFlag(logsink.Error)
|
||||
|
||||
flag.BoolVar(&toStderr, "logtostderr", false, "log to standard error instead of files")
|
||||
flag.BoolVar(&alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
|
||||
flag.Var(&stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
|
||||
}
|
||||
387
vendor/github.com/golang/glog/internal/logsink/logsink.go
generated
vendored
387
vendor/github.com/golang/glog/internal/logsink/logsink.go
generated
vendored
@@ -1,387 +0,0 @@
|
||||
// Copyright 2023 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logsink
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog/internal/stackdump"
|
||||
)
|
||||
|
||||
// MaxLogMessageLen is the limit on length of a formatted log message, including
|
||||
// the standard line prefix and trailing newline.
|
||||
//
|
||||
// Chosen to match C++ glog.
|
||||
const MaxLogMessageLen = 15000
|
||||
|
||||
// A Severity is a severity at which a message can be logged.
|
||||
type Severity int8
|
||||
|
||||
// These constants identify the log levels in order of increasing severity.
|
||||
// A message written to a high-severity log file is also written to each
|
||||
// lower-severity log file.
|
||||
const (
|
||||
Info Severity = iota
|
||||
Warning
|
||||
Error
|
||||
|
||||
// Fatal contains logs written immediately before the process terminates.
|
||||
//
|
||||
// Sink implementations should not terminate the process themselves: the log
|
||||
// package will perform any necessary cleanup and terminate the process as
|
||||
// appropriate.
|
||||
Fatal
|
||||
)
|
||||
|
||||
func (s Severity) String() string {
|
||||
switch s {
|
||||
case Info:
|
||||
return "INFO"
|
||||
case Warning:
|
||||
return "WARNING"
|
||||
case Error:
|
||||
return "ERROR"
|
||||
case Fatal:
|
||||
return "FATAL"
|
||||
}
|
||||
return fmt.Sprintf("%T(%d)", s, s)
|
||||
}
|
||||
|
||||
// ParseSeverity returns the case-insensitive Severity value for the given string.
|
||||
func ParseSeverity(name string) (Severity, error) {
|
||||
name = strings.ToUpper(name)
|
||||
for s := Info; s <= Fatal; s++ {
|
||||
if s.String() == name {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("logsink: invalid severity %q", name)
|
||||
}
|
||||
|
||||
// Meta is metadata about a logging call.
|
||||
type Meta struct {
|
||||
// Time is the time at which the log call was made.
|
||||
Time time.Time
|
||||
|
||||
// File is the source file from which the log entry originates.
|
||||
File string
|
||||
// Line is the line offset within the source file.
|
||||
Line int
|
||||
// Depth is the number of stack frames between the logsink and the log call.
|
||||
Depth int
|
||||
|
||||
Severity Severity
|
||||
|
||||
// Verbose indicates whether the call was made via "log.V". Log entries below
|
||||
// the current verbosity threshold are not sent to the sink.
|
||||
Verbose bool
|
||||
|
||||
// Thread ID. This can be populated with a thread ID from another source,
|
||||
// such as a system we are importing logs from. In the normal case, this
|
||||
// will be set to the process ID (PID), since Go doesn't have threads.
|
||||
Thread int64
|
||||
|
||||
// Stack trace starting in the logging function. May be nil.
|
||||
// A logsink should implement the StackWanter interface to request this.
|
||||
//
|
||||
// Even if WantStack returns false, this field may be set (e.g. if another
|
||||
// sink wants a stack trace).
|
||||
Stack *stackdump.Stack
|
||||
}
|
||||
|
||||
// Structured is a logging destination that accepts structured data as input.
|
||||
type Structured interface {
|
||||
// Printf formats according to a fmt.Printf format specifier and writes a log
|
||||
// entry. The precise result of formatting depends on the sink, but should
|
||||
// aim for consistency with fmt.Printf.
|
||||
//
|
||||
// Printf returns the number of bytes occupied by the log entry, which
|
||||
// may not be equal to the total number of bytes written.
|
||||
//
|
||||
// Printf returns any error encountered *if* it is severe enough that the log
|
||||
// package should terminate the process.
|
||||
//
|
||||
// The sink must not modify the *Meta parameter, nor reference it after
|
||||
// Printf has returned: it may be reused in subsequent calls.
|
||||
Printf(meta *Meta, format string, a ...any) (n int, err error)
|
||||
}
|
||||
|
||||
// StackWanter can be implemented by a logsink.Structured to indicate that it
|
||||
// wants a stack trace to accompany at least some of the log messages it receives.
|
||||
type StackWanter interface {
|
||||
// WantStack returns true if the sink requires a stack trace for a log message
|
||||
// with this metadata.
|
||||
//
|
||||
// NOTE: Returning true implies that meta.Stack will be non-nil. Returning
|
||||
// false does NOT imply that meta.Stack will be nil.
|
||||
WantStack(meta *Meta) bool
|
||||
}
|
||||
|
||||
// Text is a logging destination that accepts pre-formatted log lines (instead of
|
||||
// structured data).
|
||||
type Text interface {
|
||||
// Enabled returns whether this sink should output messages for the given
|
||||
// Meta. If the sink returns false for a given Meta, the Printf function will
|
||||
// not call Emit on it for the corresponding log message.
|
||||
Enabled(*Meta) bool
|
||||
|
||||
// Emit writes a pre-formatted text log entry (including any applicable
|
||||
// header) to the log. It returns the number of bytes occupied by the entry
|
||||
// (which may differ from the length of the passed-in slice).
|
||||
//
|
||||
// Emit returns any error encountered *if* it is severe enough that the log
|
||||
// package should terminate the process.
|
||||
//
|
||||
// The sink must not modify the *Meta parameter, nor reference it after
|
||||
// Printf has returned: it may be reused in subsequent calls.
|
||||
//
|
||||
// NOTE: When developing a text sink, keep in mind the surface in which the
|
||||
// logs will be displayed, and whether it's important that the sink be
|
||||
// resistent to tampering in the style of b/211428300. Standard text sinks
|
||||
// (like `stderrSink`) do not protect against this (e.g. by escaping
|
||||
// characters) because the cases where they would show user-influenced bytes
|
||||
// are vanishingly small.
|
||||
Emit(*Meta, []byte) (n int, err error)
|
||||
}
|
||||
|
||||
// bufs is a pool of *bytes.Buffer used in formatting log entries.
|
||||
var bufs sync.Pool // Pool of *bytes.Buffer.
|
||||
|
||||
// textPrintf formats a text log entry and emits it to all specified Text sinks.
|
||||
//
|
||||
// The returned n is the maximum across all Emit calls.
|
||||
// The returned err is the first non-nil error encountered.
|
||||
// Sinks that are disabled by configuration should return (0, nil).
|
||||
func textPrintf(m *Meta, textSinks []Text, format string, args ...any) (n int, err error) {
|
||||
// We expect at most file, stderr, and perhaps syslog. If there are more,
|
||||
// we'll end up allocating - no big deal.
|
||||
const maxExpectedTextSinks = 3
|
||||
var noAllocSinks [maxExpectedTextSinks]Text
|
||||
|
||||
sinks := noAllocSinks[:0]
|
||||
for _, s := range textSinks {
|
||||
if s.Enabled(m) {
|
||||
sinks = append(sinks, s)
|
||||
}
|
||||
}
|
||||
if len(sinks) == 0 && m.Severity != Fatal {
|
||||
return 0, nil // No TextSinks specified; don't bother formatting.
|
||||
}
|
||||
|
||||
bufi := bufs.Get()
|
||||
var buf *bytes.Buffer
|
||||
if bufi == nil {
|
||||
buf = bytes.NewBuffer(nil)
|
||||
bufi = buf
|
||||
} else {
|
||||
buf = bufi.(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
}
|
||||
|
||||
// Lmmdd hh:mm:ss.uuuuuu PID/GID file:line]
|
||||
//
|
||||
// The "PID" entry arguably ought to be TID for consistency with other
|
||||
// environments, but TID is not meaningful in a Go program due to the
|
||||
// multiplexing of goroutines across threads.
|
||||
//
|
||||
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
|
||||
// It's worth about 3X. Fprintf is hard.
|
||||
const severityChar = "IWEF"
|
||||
buf.WriteByte(severityChar[m.Severity])
|
||||
|
||||
_, month, day := m.Time.Date()
|
||||
hour, minute, second := m.Time.Clock()
|
||||
twoDigits(buf, int(month))
|
||||
twoDigits(buf, day)
|
||||
buf.WriteByte(' ')
|
||||
twoDigits(buf, hour)
|
||||
buf.WriteByte(':')
|
||||
twoDigits(buf, minute)
|
||||
buf.WriteByte(':')
|
||||
twoDigits(buf, second)
|
||||
buf.WriteByte('.')
|
||||
nDigits(buf, 6, uint64(m.Time.Nanosecond()/1000), '0')
|
||||
buf.WriteByte(' ')
|
||||
|
||||
nDigits(buf, 7, uint64(m.Thread), ' ')
|
||||
buf.WriteByte(' ')
|
||||
|
||||
{
|
||||
file := m.File
|
||||
if i := strings.LastIndex(file, "/"); i >= 0 {
|
||||
file = file[i+1:]
|
||||
}
|
||||
buf.WriteString(file)
|
||||
}
|
||||
|
||||
buf.WriteByte(':')
|
||||
{
|
||||
var tmp [19]byte
|
||||
buf.Write(strconv.AppendInt(tmp[:0], int64(m.Line), 10))
|
||||
}
|
||||
buf.WriteString("] ")
|
||||
|
||||
msgStart := buf.Len()
|
||||
fmt.Fprintf(buf, format, args...)
|
||||
if buf.Len() > MaxLogMessageLen-1 {
|
||||
buf.Truncate(MaxLogMessageLen - 1)
|
||||
}
|
||||
msgEnd := buf.Len()
|
||||
if b := buf.Bytes(); b[len(b)-1] != '\n' {
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
|
||||
for _, s := range sinks {
|
||||
sn, sErr := s.Emit(m, buf.Bytes())
|
||||
if sn > n {
|
||||
n = sn
|
||||
}
|
||||
if sErr != nil && err == nil {
|
||||
err = sErr
|
||||
}
|
||||
}
|
||||
|
||||
if m.Severity == Fatal {
|
||||
savedM := *m
|
||||
fatalMessageStore(savedEntry{
|
||||
meta: &savedM,
|
||||
msg: buf.Bytes()[msgStart:msgEnd],
|
||||
})
|
||||
} else {
|
||||
bufs.Put(bufi)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
const digits = "0123456789"
|
||||
|
||||
// twoDigits formats a zero-prefixed two-digit integer to buf.
|
||||
func twoDigits(buf *bytes.Buffer, d int) {
|
||||
buf.WriteByte(digits[(d/10)%10])
|
||||
buf.WriteByte(digits[d%10])
|
||||
}
|
||||
|
||||
// nDigits formats an n-digit integer to buf, padding with pad on the left. It
|
||||
// assumes d != 0.
|
||||
func nDigits(buf *bytes.Buffer, n int, d uint64, pad byte) {
|
||||
var tmp [20]byte
|
||||
|
||||
cutoff := len(tmp) - n
|
||||
j := len(tmp) - 1
|
||||
for ; d > 0; j-- {
|
||||
tmp[j] = digits[d%10]
|
||||
d /= 10
|
||||
}
|
||||
for ; j >= cutoff; j-- {
|
||||
tmp[j] = pad
|
||||
}
|
||||
j++
|
||||
buf.Write(tmp[j:])
|
||||
}
|
||||
|
||||
// Printf writes a log entry to all registered TextSinks in this package, then
|
||||
// to all registered StructuredSinks.
|
||||
//
|
||||
// The returned n is the maximum across all Emit and Printf calls.
|
||||
// The returned err is the first non-nil error encountered.
|
||||
// Sinks that are disabled by configuration should return (0, nil).
|
||||
func Printf(m *Meta, format string, args ...any) (n int, err error) {
|
||||
m.Depth++
|
||||
n, err = textPrintf(m, TextSinks, format, args...)
|
||||
|
||||
for _, sink := range StructuredSinks {
|
||||
// TODO: Support TextSinks that implement StackWanter?
|
||||
if sw, ok := sink.(StackWanter); ok && sw.WantStack(m) {
|
||||
if m.Stack == nil {
|
||||
// First, try to find a stacktrace in args, otherwise generate one.
|
||||
for _, arg := range args {
|
||||
if stack, ok := arg.(stackdump.Stack); ok {
|
||||
m.Stack = &stack
|
||||
break
|
||||
}
|
||||
}
|
||||
if m.Stack == nil {
|
||||
stack := stackdump.Caller( /* skipDepth = */ m.Depth)
|
||||
m.Stack = &stack
|
||||
}
|
||||
}
|
||||
}
|
||||
sn, sErr := sink.Printf(m, format, args...)
|
||||
if sn > n {
|
||||
n = sn
|
||||
}
|
||||
if sErr != nil && err == nil {
|
||||
err = sErr
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// The sets of sinks to which logs should be written.
|
||||
//
|
||||
// These must only be modified during package init, and are read-only thereafter.
|
||||
var (
|
||||
// StructuredSinks is the set of Structured sink instances to which logs
|
||||
// should be written.
|
||||
StructuredSinks []Structured
|
||||
|
||||
// TextSinks is the set of Text sink instances to which logs should be
|
||||
// written.
|
||||
//
|
||||
// These are registered separately from Structured sink implementations to
|
||||
// avoid the need to repeat the work of formatting a message for each Text
|
||||
// sink that writes it. The package-level Printf function writes to both sets
|
||||
// independenty, so a given log destination should only register a Structured
|
||||
// *or* a Text sink (not both).
|
||||
TextSinks []Text
|
||||
)
|
||||
|
||||
type savedEntry struct {
|
||||
meta *Meta
|
||||
msg []byte
|
||||
}
|
||||
|
||||
// StructuredTextWrapper is a Structured sink which forwards logs to a set of Text sinks.
|
||||
//
|
||||
// The purpose of this sink is to allow applications to intercept logging calls before they are
|
||||
// serialized and sent to Text sinks. For example, if one needs to redact PII from logging
|
||||
// arguments before they reach STDERR, one solution would be to do the redacting in a Structured
|
||||
// sink that forwards logs to a StructuredTextWrapper instance, and make STDERR a child of that
|
||||
// StructuredTextWrapper instance. This is how one could set this up in their application:
|
||||
//
|
||||
// func init() {
|
||||
//
|
||||
// wrapper := logsink.StructuredTextWrapper{TextSinks: logsink.TextSinks}
|
||||
// // sanitizersink will intercept logs and remove PII
|
||||
// sanitizer := sanitizersink{Sink: &wrapper}
|
||||
// logsink.StructuredSinks = append(logsink.StructuredSinks, &sanitizer)
|
||||
// logsink.TextSinks = nil
|
||||
//
|
||||
// }
|
||||
type StructuredTextWrapper struct {
|
||||
// TextSinks is the set of Text sinks that should receive logs from this
|
||||
// StructuredTextWrapper instance.
|
||||
TextSinks []Text
|
||||
}
|
||||
|
||||
// Printf forwards logs to all Text sinks registered in the StructuredTextWrapper.
|
||||
func (w *StructuredTextWrapper) Printf(meta *Meta, format string, args ...any) (n int, err error) {
|
||||
return textPrintf(meta, w.TextSinks, format, args...)
|
||||
}
|
||||
35
vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go
generated
vendored
35
vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go
generated
vendored
@@ -1,35 +0,0 @@
|
||||
package logsink
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func fatalMessageStore(e savedEntry) {
|
||||
// Only put a new one in if we haven't assigned before.
|
||||
atomic.CompareAndSwapPointer(&fatalMessage, nil, unsafe.Pointer(&e))
|
||||
}
|
||||
|
||||
var fatalMessage unsafe.Pointer // savedEntry stored with CompareAndSwapPointer
|
||||
|
||||
// FatalMessage returns the Meta and message contents of the first message
|
||||
// logged with Fatal severity, or false if none has occurred.
|
||||
func FatalMessage() (*Meta, []byte, bool) {
|
||||
e := (*savedEntry)(atomic.LoadPointer(&fatalMessage))
|
||||
if e == nil {
|
||||
return nil, nil, false
|
||||
}
|
||||
return e.meta, e.msg, true
|
||||
}
|
||||
|
||||
// DoNotUseRacyFatalMessage is FatalMessage, but worse.
|
||||
//
|
||||
//go:norace
|
||||
//go:nosplit
|
||||
func DoNotUseRacyFatalMessage() (*Meta, []byte, bool) {
|
||||
e := (*savedEntry)(fatalMessage)
|
||||
if e == nil {
|
||||
return nil, nil, false
|
||||
}
|
||||
return e.meta, e.msg, true
|
||||
}
|
||||
127
vendor/github.com/golang/glog/internal/stackdump/stackdump.go
generated
vendored
127
vendor/github.com/golang/glog/internal/stackdump/stackdump.go
generated
vendored
@@ -1,127 +0,0 @@
|
||||
// Copyright 2023 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package stackdump provides wrappers for runtime.Stack and runtime.Callers
|
||||
// with uniform support for skipping caller frames.
|
||||
//
|
||||
// ⚠ Unlike the functions in the runtime package, these may allocate a
|
||||
// non-trivial quantity of memory: use them with care. ⚠
|
||||
package stackdump
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// runtimeStackSelfFrames is 1 if runtime.Stack includes the call to
|
||||
// runtime.Stack itself or 0 if it does not.
|
||||
//
|
||||
// As of 2016-04-27, the gccgo compiler includes runtime.Stack but the gc
|
||||
// compiler does not.
|
||||
var runtimeStackSelfFrames = func() int {
|
||||
for n := 1 << 10; n < 1<<20; n *= 2 {
|
||||
buf := make([]byte, n)
|
||||
n := runtime.Stack(buf, false)
|
||||
if bytes.Contains(buf[:n], []byte("runtime.Stack")) {
|
||||
return 1
|
||||
} else if n < len(buf) || bytes.Count(buf, []byte("\n")) >= 3 {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}()
|
||||
|
||||
// Stack is a stack dump for a single goroutine.
|
||||
type Stack struct {
|
||||
// Text is a representation of the stack dump in a human-readable format.
|
||||
Text []byte
|
||||
|
||||
// PC is a representation of the stack dump using raw program counter values.
|
||||
PC []uintptr
|
||||
}
|
||||
|
||||
func (s Stack) String() string { return string(s.Text) }
|
||||
|
||||
// Caller returns the Stack dump for the calling goroutine, starting skipDepth
|
||||
// frames before the caller of Caller. (Caller(0) provides a dump starting at
|
||||
// the caller of this function.)
|
||||
func Caller(skipDepth int) Stack {
|
||||
return Stack{
|
||||
Text: CallerText(skipDepth + 1),
|
||||
PC: CallerPC(skipDepth + 1),
|
||||
}
|
||||
}
|
||||
|
||||
// CallerText returns a textual dump of the stack starting skipDepth frames before
|
||||
// the caller. (CallerText(0) provides a dump starting at the caller of this
|
||||
// function.)
|
||||
func CallerText(skipDepth int) []byte {
|
||||
for n := 1 << 10; ; n *= 2 {
|
||||
buf := make([]byte, n)
|
||||
n := runtime.Stack(buf, false)
|
||||
if n < len(buf) {
|
||||
return pruneFrames(skipDepth+1+runtimeStackSelfFrames, buf[:n])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CallerPC returns a dump of the program counters of the stack starting
|
||||
// skipDepth frames before the caller. (CallerPC(0) provides a dump starting at
|
||||
// the caller of this function.)
|
||||
func CallerPC(skipDepth int) []uintptr {
|
||||
for n := 1 << 8; ; n *= 2 {
|
||||
buf := make([]uintptr, n)
|
||||
n := runtime.Callers(skipDepth+2, buf)
|
||||
if n < len(buf) {
|
||||
return buf[:n]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pruneFrames removes the topmost skipDepth frames of the first goroutine in a
|
||||
// textual stack dump. It overwrites the passed-in slice.
|
||||
//
|
||||
// If there are fewer than skipDepth frames in the first goroutine's stack,
|
||||
// pruneFrames prunes it to an empty stack and leaves the remaining contents
|
||||
// intact.
|
||||
func pruneFrames(skipDepth int, stack []byte) []byte {
|
||||
headerLen := 0
|
||||
for i, c := range stack {
|
||||
if c == '\n' {
|
||||
headerLen = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if headerLen == 0 {
|
||||
return stack // No header line - not a well-formed stack trace.
|
||||
}
|
||||
|
||||
skipLen := headerLen
|
||||
skipNewlines := skipDepth * 2
|
||||
for ; skipLen < len(stack) && skipNewlines > 0; skipLen++ {
|
||||
c := stack[skipLen]
|
||||
if c != '\n' {
|
||||
continue
|
||||
}
|
||||
skipNewlines--
|
||||
skipLen++
|
||||
if skipNewlines == 0 || skipLen == len(stack) || stack[skipLen] == '\n' {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
pruned := stack[skipLen-headerLen:]
|
||||
copy(pruned, stack[:headerLen])
|
||||
return pruned
|
||||
}
|
||||
Reference in New Issue
Block a user