mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
🌱 Bump k8s.io/klog/v2 from 2.120.1 to 2.130.1 (#564)
Bumps [k8s.io/klog/v2](https://github.com/kubernetes/klog) from 2.120.1 to 2.130.1. - [Release notes](https://github.com/kubernetes/klog/releases) - [Changelog](https://github.com/kubernetes/klog/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes/klog/compare/v2.120.1...v2.130.1) --- updated-dependencies: - dependency-name: k8s.io/klog/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
2
go.mod
2
go.mod
@@ -31,7 +31,7 @@ require (
|
||||
k8s.io/apiserver v0.30.2
|
||||
k8s.io/client-go v0.30.2
|
||||
k8s.io/component-base v0.30.2
|
||||
k8s.io/klog/v2 v2.120.1
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kube-aggregator v0.30.2
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
|
||||
open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a
|
||||
|
||||
4
go.sum
4
go.sum
@@ -449,8 +449,8 @@ k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
|
||||
k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
|
||||
k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII=
|
||||
k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.30.2 h1:VSZILO/tkzrz5Tu2j+yFQZ2Dc5JerQZX2GqhFJbQrfw=
|
||||
k8s.io/kms v0.30.2/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4=
|
||||
k8s.io/kube-aggregator v0.30.2 h1:0+yk/ED6foCprY8VmkDPUhngjaAPKsNTXB/UrtvbIz0=
|
||||
|
||||
76
vendor/k8s.io/klog/v2/klog.go
generated
vendored
76
vendor/k8s.io/klog/v2/klog.go
generated
vendored
@@ -404,13 +404,6 @@ func (t *traceLocation) Set(value string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// flushSyncWriter is the interface satisfied by logging destinations.
|
||||
type flushSyncWriter interface {
|
||||
Flush() error
|
||||
Sync() error
|
||||
io.Writer
|
||||
}
|
||||
|
||||
var logging loggingT
|
||||
var commandLine flag.FlagSet
|
||||
|
||||
@@ -486,7 +479,7 @@ type settings struct {
|
||||
// Access to all of the following fields must be protected via a mutex.
|
||||
|
||||
// file holds writer for each of the log types.
|
||||
file [severity.NumSeverity]flushSyncWriter
|
||||
file [severity.NumSeverity]io.Writer
|
||||
// flushInterval is the interval for periodic flushing. If zero,
|
||||
// the global default will be used.
|
||||
flushInterval time.Duration
|
||||
@@ -831,32 +824,12 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
|
||||
buffer.PutBuffer(b)
|
||||
}
|
||||
|
||||
// redirectBuffer is used to set an alternate destination for the logs
|
||||
type redirectBuffer struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
|
||||
return rb.w.Write(bytes)
|
||||
}
|
||||
|
||||
// SetOutput sets the output destination for all severities
|
||||
func SetOutput(w io.Writer) {
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
|
||||
rb := &redirectBuffer{
|
||||
w: w,
|
||||
}
|
||||
logging.file[s] = rb
|
||||
logging.file[s] = w
|
||||
}
|
||||
}
|
||||
|
||||
@@ -868,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) {
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
|
||||
}
|
||||
rb := &redirectBuffer{
|
||||
w: w,
|
||||
}
|
||||
logging.file[sev] = rb
|
||||
logging.file[sev] = w
|
||||
}
|
||||
|
||||
// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
|
||||
@@ -1011,7 +981,8 @@ func (l *loggingT) exit(err error) {
|
||||
logExitFunc(err)
|
||||
return
|
||||
}
|
||||
l.flushAll()
|
||||
needToSync := l.flushAll()
|
||||
l.syncAll(needToSync)
|
||||
OsExit(2)
|
||||
}
|
||||
|
||||
@@ -1028,10 +999,6 @@ type syncBuffer struct {
|
||||
maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
|
||||
}
|
||||
|
||||
func (sb *syncBuffer) Sync() error {
|
||||
return sb.file.Sync()
|
||||
}
|
||||
|
||||
// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
|
||||
func CalculateMaxSize() uint64 {
|
||||
if logging.logFile != "" {
|
||||
@@ -1223,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) {
|
||||
// lockAndFlushAll is like flushAll but locks l.mu first.
|
||||
func (l *loggingT) lockAndFlushAll() {
|
||||
l.mu.Lock()
|
||||
l.flushAll()
|
||||
needToSync := l.flushAll()
|
||||
l.mu.Unlock()
|
||||
// Some environments are slow when syncing and holding the lock might cause contention.
|
||||
l.syncAll(needToSync)
|
||||
}
|
||||
|
||||
// flushAll flushes all the logs and attempts to "sync" their data to disk.
|
||||
// flushAll flushes all the logs
|
||||
// l.mu is held.
|
||||
func (l *loggingT) flushAll() {
|
||||
//
|
||||
// The result is the number of files which need to be synced and the pointers to them.
|
||||
func (l *loggingT) flushAll() fileArray {
|
||||
var needToSync fileArray
|
||||
|
||||
// Flush from fatal down, in case there's trouble flushing.
|
||||
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
|
||||
file := l.file[s]
|
||||
if file != nil {
|
||||
_ = file.Flush() // ignore error
|
||||
_ = file.Sync() // ignore error
|
||||
if sb, ok := file.(*syncBuffer); ok && sb.file != nil {
|
||||
_ = sb.Flush() // ignore error
|
||||
needToSync.files[needToSync.num] = sb.file
|
||||
needToSync.num++
|
||||
}
|
||||
}
|
||||
if logging.loggerOptions.flush != nil {
|
||||
logging.loggerOptions.flush()
|
||||
}
|
||||
return needToSync
|
||||
}
|
||||
|
||||
type fileArray struct {
|
||||
num int
|
||||
files [severity.NumSeverity]*os.File
|
||||
}
|
||||
|
||||
// syncAll attempts to "sync" their data to disk.
|
||||
func (l *loggingT) syncAll(needToSync fileArray) {
|
||||
// Flush from fatal down, in case there's trouble flushing.
|
||||
for i := 0; i < needToSync.num; i++ {
|
||||
_ = needToSync.files[i].Sync() // ignore error
|
||||
}
|
||||
}
|
||||
|
||||
// CopyStandardLogTo arranges for messages written to the Go "log" package's
|
||||
|
||||
32
vendor/k8s.io/klog/v2/ktesting/testinglogger.go
generated
vendored
32
vendor/k8s.io/klog/v2/ktesting/testinglogger.go
generated
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
Copyright 2020 Intel Coporation.
|
||||
Copyright 2020 Intel Corporation.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -217,8 +217,19 @@ type tloggerShared struct {
|
||||
// t gets cleared when the test is completed.
|
||||
t TL
|
||||
|
||||
// We warn once when a leaked goroutine is detected because
|
||||
// it logs after test completion.
|
||||
// The time when the test completed.
|
||||
stopTime time.Time
|
||||
|
||||
// We warn once when a leaked goroutine logs after test completion.
|
||||
//
|
||||
// Not terminating immediately is fairly normal: many controllers
|
||||
// log "terminating" messages while they shut down, which often is
|
||||
// right after test completion, if that is when the test cancels the
|
||||
// context and then doesn't wait for goroutines (which is often
|
||||
// not possible).
|
||||
//
|
||||
// Therefore there is the [stopGracePeriod] during which messages get
|
||||
// logged to the global logger without the warning.
|
||||
goroutineWarningDone bool
|
||||
|
||||
formatter serialize.Formatter
|
||||
@@ -228,10 +239,15 @@ type tloggerShared struct {
|
||||
callDepth int
|
||||
}
|
||||
|
||||
// Log output of a leaked goroutine during this period after test completion
|
||||
// does not trigger the warning.
|
||||
const stopGracePeriod = 10 * time.Second
|
||||
|
||||
func (ls *tloggerShared) stop() {
|
||||
ls.mutex.Lock()
|
||||
defer ls.mutex.Unlock()
|
||||
ls.t = nil
|
||||
ls.stopTime = time.Now()
|
||||
}
|
||||
|
||||
// tlogger is the actual LogSink implementation.
|
||||
@@ -241,6 +257,8 @@ type tlogger struct {
|
||||
values []interface{}
|
||||
}
|
||||
|
||||
// fallbackLogger is called while l.shared.mutex is locked and after it has
|
||||
// been determined that the original testing.TB is no longer usable.
|
||||
func (l tlogger) fallbackLogger() logr.Logger {
|
||||
logger := klog.Background().WithValues(l.values...).WithName(l.shared.testName + " leaked goroutine")
|
||||
if l.prefix != "" {
|
||||
@@ -250,8 +268,12 @@ func (l tlogger) fallbackLogger() logr.Logger {
|
||||
logger = logger.WithCallDepth(l.shared.callDepth + 1)
|
||||
|
||||
if !l.shared.goroutineWarningDone {
|
||||
logger.WithCallDepth(1).Error(nil, "WARNING: test kept at least one goroutine running after test completion", "callstack", string(dbg.Stacks(false)))
|
||||
l.shared.goroutineWarningDone = true
|
||||
duration := time.Since(l.shared.stopTime)
|
||||
if duration > stopGracePeriod {
|
||||
|
||||
logger.WithCallDepth(1).Info("WARNING: test kept at least one goroutine running after test completion", "timeSinceCompletion", duration.Round(time.Second), "callstack", string(dbg.Stacks(false)))
|
||||
l.shared.goroutineWarningDone = true
|
||||
}
|
||||
}
|
||||
return logger
|
||||
}
|
||||
|
||||
2
vendor/k8s.io/klog/v2/textlogger/textlogger.go
generated
vendored
2
vendor/k8s.io/klog/v2/textlogger/textlogger.go
generated
vendored
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
Copyright 2020 Intel Coporation.
|
||||
Copyright 2020 Intel Corporation.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@@ -1436,7 +1436,7 @@ k8s.io/component-base/metrics/testutil
|
||||
k8s.io/component-base/tracing
|
||||
k8s.io/component-base/tracing/api/v1
|
||||
k8s.io/component-base/version
|
||||
# k8s.io/klog/v2 v2.120.1
|
||||
# k8s.io/klog/v2 v2.130.1
|
||||
## explicit; go 1.18
|
||||
k8s.io/klog/v2
|
||||
k8s.io/klog/v2/internal/buffer
|
||||
|
||||
Reference in New Issue
Block a user