Update dependencies

This commit is contained in:
github-actions
2024-04-26 06:05:56 +00:00
parent b4623de861
commit 273c3f5266
50 changed files with 465 additions and 326 deletions

View File

@@ -1,5 +1,32 @@
# Changelog
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23)
### Features
* **auth/httptransport:** Add ability to customize transport ([#10023](https://github.com/googleapis/google-cloud-go/issues/10023)) ([72c7f6b](https://github.com/googleapis/google-cloud-go/commit/72c7f6bbec3136cc7a62788fc7186bc33ef6c3b3)), refs [#9812](https://github.com/googleapis/google-cloud-go/issues/9812) [#9814](https://github.com/googleapis/google-cloud-go/issues/9814)
### Bug Fixes
* **auth/credentials:** Error on bad file name if explicitly set ([#10018](https://github.com/googleapis/google-cloud-go/issues/10018)) ([55beaa9](https://github.com/googleapis/google-cloud-go/commit/55beaa993aaf052d8be39766afc6777c3c2a0bdd)), refs [#9809](https://github.com/googleapis/google-cloud-go/issues/9809)
## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19)
### Bug Fixes
* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823)
* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833)
## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18)
### Bug Fixes
* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7))
## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15)
### Breaking Changes

View File

@@ -76,7 +76,10 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
if opts.CredentialsJSON != nil {
return readCredentialsFileJSON(opts.CredentialsJSON, opts)
}
if filename := credsfile.GetFileNameFromEnv(opts.CredentialsFile); filename != "" {
if opts.CredentialsFile != "" {
return readCredentialsFile(opts.CredentialsFile, opts)
}
if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" {
if creds, err := readCredentialsFile(filename, opts); err == nil {
return creds, err
}

View File

@@ -96,6 +96,9 @@ func (o *Options) validate() error {
if o == nil {
return errors.New("grpctransport: opts required to be non-nil")
}
if o.InternalOptions != nil && o.InternalOptions.SkipValidation {
return nil
}
hasCreds := o.Credentials != nil ||
(o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) ||
(o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "")
@@ -151,6 +154,9 @@ type InternalOptions struct {
// DefaultScopes specifies the default OAuth2 scopes to be used for a
// service.
DefaultScopes []string
// SkipValidation bypasses validation on Options. It should only be used
// internally for clients that needs more control over their transport.
SkipValidation bool
}
// Dial returns a GRPCClientConnPool that can be used to communicate with a
@@ -169,7 +175,7 @@ func Dial(ctx context.Context, secure bool, opts *Options) (GRPCClientConnPool,
}
pool := &roundRobinConnPool{}
for i := 0; i < opts.PoolSize; i++ {
conn, err := dial(ctx, false, opts)
conn, err := dial(ctx, secure, opts)
if err != nil {
// ignore close error, if any
defer pool.Close()
@@ -287,15 +293,24 @@ func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ..
return nil, fmt.Errorf("unable to transfer credentials PerRPCCredentials: %v", err)
}
}
metadata := map[string]string{
"authorization": token.Type + " " + token.Value,
}
metadata := make(map[string]string, len(c.metadata)+1)
setAuthMetadata(token, metadata)
for k, v := range c.metadata {
metadata[k] = v
}
return metadata, nil
}
// setAuthMetadata uses the provided token to set the Authorization metadata.
// If the token.Type is empty, the type is assumed to be Bearer.
func setAuthMetadata(token *auth.Token, m map[string]string) {
typ := token.Type
if typ == "" {
typ = internal.TokenTypeBearer
}
m["authorization"] = typ + " " + token.Value
}
func (c *grpcCredentialsProvider) RequireTransportSecurity() bool {
return c.secure
}

View File

@@ -44,6 +44,9 @@ type Options struct {
// Headers are extra HTTP headers that will be appended to every outgoing
// request.
Headers http.Header
// BaseRoundTripper overrides the base transport used for serving requests.
// If specified ClientCertProvider is ignored.
BaseRoundTripper http.RoundTripper
// Endpoint overrides the default endpoint to be used for a service.
Endpoint string
// APIKey specifies an API key to be used as the basis for authentication.
@@ -74,6 +77,9 @@ func (o *Options) validate() error {
if o == nil {
return errors.New("httptransport: opts required to be non-nil")
}
if o.InternalOptions != nil && o.InternalOptions.SkipValidation {
return nil
}
hasCreds := o.APIKey != "" ||
o.Credentials != nil ||
(o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) ||
@@ -131,6 +137,9 @@ type InternalOptions struct {
// DefaultScopes specifies the default OAuth2 scopes to be used for a
// service.
DefaultScopes []string
// SkipValidation bypasses validation on Options. It should only be used
// internally for clients that needs more control over their transport.
SkipValidation bool
}
// AddAuthorizationMiddleware adds a middleware to the provided client's
@@ -175,7 +184,11 @@ func NewClient(opts *Options) (*http.Client, error) {
if err != nil {
return nil, err
}
trans, err := newTransport(defaultBaseTransport(clientCertProvider, dialTLSContext), opts)
baseRoundTripper := opts.BaseRoundTripper
if baseRoundTripper == nil {
baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext)
}
trans, err := newTransport(baseRoundTripper, opts)
if err != nil {
return nil, err
}

View File

@@ -1,5 +1,19 @@
# Changelog
## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23)
### Bug Fixes
* **auth/oauth2adapt:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18)
### Bug Fixes
* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800)
## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16)

View File

@@ -49,6 +49,7 @@ func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) {
}
return &auth.Token{
Value: tok.AccessToken,
Type: tok.Type(),
Expiry: tok.Expiry,
}, nil
}
@@ -77,6 +78,7 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) {
}
return &oauth2.Token{
AccessToken: tok.Value,
TokenType: tok.Type,
Expiry: tok.Expiry,
}, nil
}

View File

@@ -1,2 +1,3 @@
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
* Paul Gier <pgier@redhat.com> @pgier
* Paul Gier <paulgier@gmail.com> @pgier
* Ben Kochie <superq@gmail.com> @SuperQ

View File

@@ -49,7 +49,7 @@ endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
ifneq ($(shell command -v gotestsum > /dev/null),)
ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.55.2
GOLANGCI_LINT_VERSION ?= v1.56.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@@ -182,7 +182,7 @@ endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell command -v yamllint > /dev/null))
ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .
@@ -208,6 +208,10 @@ common-tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker-repo-name
common-docker-repo-name:
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:

View File

@@ -55,7 +55,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
}
entries = append(entries, entry)
} else {
return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
}
}

View File

@@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
}
}

View File

@@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
}
field := strings.SplitN(firstLine, ": ", 2)
@@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine)
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}

View File

@@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err)
return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err)
return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
}
return crypto, nil

View File

@@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err)
return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
}
return *m, nil
@@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
}
for i := range setFields {

View File

@@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
case 46:
ip = net.ParseIP(s[1:40])
if ip == nil {
return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
}
default:
return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
}
portString := s[len(s)-4:]
if len(portString) != 4 {
return nil, 0,
fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err)
fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
}
port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {

View File

@@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
}
}
return &LoadAvg{

View File

@@ -70,7 +70,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
}
mdstat, err := parseMDStat(data)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@@ -90,7 +90,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
deviceFields := strings.Fields(line)
if len(deviceFields) < 3 {
return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line)
return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
}
mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive
@@ -105,7 +105,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
}
syncLineIdx := i + 2
@@ -140,7 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
} else {
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
}
}
}
@@ -168,13 +168,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
statusFields := strings.Fields(statusLine)
if len(statusFields) < 1 {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@@ -189,17 +189,17 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 5 {
return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
}
down = int64(strings.Count(matches[4], "_"))
@@ -209,12 +209,12 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
}
// Get percentage complete
@@ -244,7 +244,7 @@ func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, fin
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
}
return syncedBlocks, pct, finish, speed, nil

View File

@@ -202,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err)
return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
}
return *m, nil

View File

@@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil {
return nil, fmt.Errorf("%s: %w", ErrFileParse, err)
return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
}
}
return mount, nil

View File

@@ -194,8 +194,6 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64
// The average time from the point the client sends RPC requests until it receives the response.
AverageRTTMilliseconds float64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64
}
@@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7],
}
if ns[0] != 0 {
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
}
if len(ns) > 8 {
opStats.Errors = ns[8]
@@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
}
default:
return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay

View File

@@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err)
return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
}
return stat, nil
@@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
entries, err := util.ParseHexUint64s(fields)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
}
numEntries := len(entries)
if numEntries < 16 || numEntries > 17 {

View File

@@ -141,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
}
switch len(byteIP) {
case 4:
@@ -155,7 +155,7 @@ func parseIP(hexIP string) (net.IP, error) {
}
return i, nil
default:
return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil)
return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
}
}
@@ -178,7 +178,7 @@ func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error)
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
}
// local_address
l := strings.Split(fields[1], ":")
@@ -189,7 +189,7 @@ func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error)
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
}
// remote_address
@@ -201,12 +201,12 @@ func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error)
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
}
// tx_queue and rx_queue
@@ -219,27 +219,27 @@ func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error)
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
}
// inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
}
// drops
if isUDP {
drops, err := strconv.ParseUint(fields[12], 0, 64)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
}
line.Drops = &drops
}

View File

@@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err)
return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
}
return stat, nil
@@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.

View File

@@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err)
return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
}
return entries, nil

View File

@@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
}
nu.Rows = append(nu.Rows, item)
}
if err := s.Err(); err != nil {
return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err)
return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
}
return &nu, nil
@@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1])
if err != nil {
return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err)
return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
}
flags, err := u.parseFlags(fields[3])
if err != nil {
return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
}
typ, err := u.parseType(fields[4])
if err != nil {
return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
}
state, err := u.parseState(fields[5])
if err != nil {
return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
}
var inode uint64
if hasInode {
inode, err = u.parseInode(fields[6])
if err != nil {
return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err)
return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
}
}

View File

@@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) {
m, err := parseWireless(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err)
return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
}
return m, nil
@@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
if err != nil {
return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
}
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
if err != nil {
return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
}
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
if err != nil {
return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
}
dnwid, err := strconv.Atoi(stats[4])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
}
dcrypt, err := strconv.Atoi(stats[5])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
}
dfrag, err := strconv.Atoi(stats[6])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
}
dretry, err := strconv.Atoi(stats[7])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
}
dmisc, err := strconv.Atoi(stats[8])
if err != nil {
return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
}
mbeacon, err := strconv.Atoi(stats[9])
if err != nil {
return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
}
w := &Wireless{
@@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
}
return interfaces, nil

View File

@@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
p := Procs{}
@@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err)
return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
}
fds[i] = uintptr(fd)
}
@@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
return names, nil

View File

@@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
}
i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err)
return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
}
return i, nil
}

View File

@@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err)
return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
}
ns := make(Namespaces, len(names))
@@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err)
return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}

View File

@@ -61,7 +61,7 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
}
return parsePSIStats(bytes.NewReader(data))

View File

@@ -15,6 +15,7 @@ package procfs
import (
"bytes"
"math/bits"
"sort"
"strconv"
"strings"
@@ -76,9 +77,9 @@ type ProcStatus struct {
NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
UIDs [4]string
UIDs [4]uint64
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string
GIDs [4]uint64
// CpusAllowedList: List of cpu cores processes are allowed to run on.
CpusAllowedList []uint64
@@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) {
// convert kB to B
vBytes := vKBytes * 1024
s.fillStatus(k, v, vKBytes, vBytes)
err = s.fillStatus(k, v, vKBytes, vBytes)
if err != nil {
return ProcStatus{}, err
}
}
return s, nil
}
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
switch k {
case "Tgid":
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "Uid":
copy(s.UIDs[:], strings.Split(vString, "\t"))
var err error
for i, v := range strings.Split(vString, "\t") {
s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
if err != nil {
return err
}
}
case "Gid":
copy(s.GIDs[:], strings.Split(vString, "\t"))
var err error
for i, v := range strings.Split(vString, "\t") {
s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
if err != nil {
return err
}
}
case "NSpid":
s.NSpids = calcNSPidsList(vString)
case "VmPeak":
@@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.CpusAllowedList = calcCpusAllowedList(vString)
}
return nil
}
// TotalCtxtSwitches returns the total context switch.

View File

@@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
vp := util.NewValueParser(f)
values[i] = vp.Int()
if err := vp.Err(); err != nil {
return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
}
}
return values, nil

View File

@@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "TIMER:":
@@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "NET_TX:":
@@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "NET_RX:":
@@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "BLOCK:":
@@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "IRQ_POLL:":
@@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "TASKLET:":
@@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "SCHED:":
@@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "HRTIMER:":
@@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "RCU:":
@@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
}
}
}
}
if err := scanner.Err(); err != nil {
return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err)
return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
}
return softirqs, scanner.Err()

View File

@@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF {
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
}
if count == 0 {
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
@@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil {
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
}
return cpuStat, cpuID, nil
@@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil {
return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
}
return softIRQStat, total, nil
@@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
switch {
case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
}
numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line)
@@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
}
if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err)
return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
}
return stat, nil

View File

@@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
}
return swap, nil

View File

@@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err)
return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
}
t := Procs{}

View File

@@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil {
return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
}
zoneinfo, err := parseZoneinfo(data)
if err != nil {
return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
}
return zoneinfo, nil
}

View File

@@ -33,6 +33,9 @@
#define CONSTBASE R16
#define BLOCKS R17
// for VPERMXOR
#define MASK R18
DATA consts<>+0x00(SB)/8, $0x3320646e61707865
DATA consts<>+0x08(SB)/8, $0x6b20657479622d32
DATA consts<>+0x10(SB)/8, $0x0000000000000001
@@ -53,7 +56,11 @@ DATA consts<>+0x80(SB)/8, $0x6b2065746b206574
DATA consts<>+0x88(SB)/8, $0x6b2065746b206574
DATA consts<>+0x90(SB)/8, $0x0000000100000000
DATA consts<>+0x98(SB)/8, $0x0000000300000002
GLOBL consts<>(SB), RODATA, $0xa0
DATA consts<>+0xa0(SB)/8, $0x5566774411223300
DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88
DATA consts<>+0xb0(SB)/8, $0x6677445522330011
DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899
GLOBL consts<>(SB), RODATA, $0xc0
//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
@@ -70,6 +77,9 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
MOVD $48, R10
MOVD $64, R11
SRD $6, LEN, BLOCKS
// for VPERMXOR
MOVD $consts<>+0xa0(SB), MASK
MOVD $16, R20
// V16
LXVW4X (CONSTBASE)(R0), VS48
ADD $80,CONSTBASE
@@ -87,6 +97,10 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
// V28
LXVW4X (CONSTBASE)(R11), VS60
// Load mask constants for VPERMXOR
LXVW4X (MASK)(R0), V20
LXVW4X (MASK)(R20), V21
// splat slot from V19 -> V26
VSPLTW $0, V19, V26
@@ -97,7 +111,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
MOVD $10, R14
MOVD R14, CTR
PCALIGN $16
loop_outer_vsx:
// V0, V1, V2, V3
LXVW4X (R0)(CONSTBASE), VS32
@@ -128,22 +142,17 @@ loop_outer_vsx:
VSPLTISW $12, V28
VSPLTISW $8, V29
VSPLTISW $7, V30
PCALIGN $16
loop_vsx:
VADDUWM V0, V4, V0
VADDUWM V1, V5, V1
VADDUWM V2, V6, V2
VADDUWM V3, V7, V3
VXOR V12, V0, V12
VXOR V13, V1, V13
VXOR V14, V2, V14
VXOR V15, V3, V15
VRLW V12, V27, V12
VRLW V13, V27, V13
VRLW V14, V27, V14
VRLW V15, V27, V15
VPERMXOR V12, V0, V21, V12
VPERMXOR V13, V1, V21, V13
VPERMXOR V14, V2, V21, V14
VPERMXOR V15, V3, V21, V15
VADDUWM V8, V12, V8
VADDUWM V9, V13, V9
@@ -165,15 +174,10 @@ loop_vsx:
VADDUWM V2, V6, V2
VADDUWM V3, V7, V3
VXOR V12, V0, V12
VXOR V13, V1, V13
VXOR V14, V2, V14
VXOR V15, V3, V15
VRLW V12, V29, V12
VRLW V13, V29, V13
VRLW V14, V29, V14
VRLW V15, V29, V15
VPERMXOR V12, V0, V20, V12
VPERMXOR V13, V1, V20, V13
VPERMXOR V14, V2, V20, V14
VPERMXOR V15, V3, V20, V15
VADDUWM V8, V12, V8
VADDUWM V9, V13, V9
@@ -195,15 +199,10 @@ loop_vsx:
VADDUWM V2, V7, V2
VADDUWM V3, V4, V3
VXOR V15, V0, V15
VXOR V12, V1, V12
VXOR V13, V2, V13
VXOR V14, V3, V14
VRLW V15, V27, V15
VRLW V12, V27, V12
VRLW V13, V27, V13
VRLW V14, V27, V14
VPERMXOR V15, V0, V21, V15
VPERMXOR V12, V1, V21, V12
VPERMXOR V13, V2, V21, V13
VPERMXOR V14, V3, V21, V14
VADDUWM V10, V15, V10
VADDUWM V11, V12, V11
@@ -225,15 +224,10 @@ loop_vsx:
VADDUWM V2, V7, V2
VADDUWM V3, V4, V3
VXOR V15, V0, V15
VXOR V12, V1, V12
VXOR V13, V2, V13
VXOR V14, V3, V14
VRLW V15, V29, V15
VRLW V12, V29, V12
VRLW V13, V29, V13
VRLW V14, V29, V14
VPERMXOR V15, V0, V20, V15
VPERMXOR V12, V1, V20, V12
VPERMXOR V13, V2, V20, V13
VPERMXOR V14, V3, V20, V14
VADDUWM V10, V15, V10
VADDUWM V11, V12, V11
@@ -249,48 +243,48 @@ loop_vsx:
VRLW V6, V30, V6
VRLW V7, V30, V7
VRLW V4, V30, V4
BC 16, LT, loop_vsx
BDNZ loop_vsx
VADDUWM V12, V26, V12
WORD $0x13600F8C // VMRGEW V0, V1, V27
WORD $0x13821F8C // VMRGEW V2, V3, V28
VMRGEW V0, V1, V27
VMRGEW V2, V3, V28
WORD $0x10000E8C // VMRGOW V0, V1, V0
WORD $0x10421E8C // VMRGOW V2, V3, V2
VMRGOW V0, V1, V0
VMRGOW V2, V3, V2
WORD $0x13A42F8C // VMRGEW V4, V5, V29
WORD $0x13C63F8C // VMRGEW V6, V7, V30
VMRGEW V4, V5, V29
VMRGEW V6, V7, V30
XXPERMDI VS32, VS34, $0, VS33
XXPERMDI VS32, VS34, $3, VS35
XXPERMDI VS59, VS60, $0, VS32
XXPERMDI VS59, VS60, $3, VS34
WORD $0x10842E8C // VMRGOW V4, V5, V4
WORD $0x10C63E8C // VMRGOW V6, V7, V6
VMRGOW V4, V5, V4
VMRGOW V6, V7, V6
WORD $0x13684F8C // VMRGEW V8, V9, V27
WORD $0x138A5F8C // VMRGEW V10, V11, V28
VMRGEW V8, V9, V27
VMRGEW V10, V11, V28
XXPERMDI VS36, VS38, $0, VS37
XXPERMDI VS36, VS38, $3, VS39
XXPERMDI VS61, VS62, $0, VS36
XXPERMDI VS61, VS62, $3, VS38
WORD $0x11084E8C // VMRGOW V8, V9, V8
WORD $0x114A5E8C // VMRGOW V10, V11, V10
VMRGOW V8, V9, V8
VMRGOW V10, V11, V10
WORD $0x13AC6F8C // VMRGEW V12, V13, V29
WORD $0x13CE7F8C // VMRGEW V14, V15, V30
VMRGEW V12, V13, V29
VMRGEW V14, V15, V30
XXPERMDI VS40, VS42, $0, VS41
XXPERMDI VS40, VS42, $3, VS43
XXPERMDI VS59, VS60, $0, VS40
XXPERMDI VS59, VS60, $3, VS42
WORD $0x118C6E8C // VMRGOW V12, V13, V12
WORD $0x11CE7E8C // VMRGOW V14, V15, V14
VMRGOW V12, V13, V12
VMRGOW V14, V15, V14
VSPLTISW $4, V27
VADDUWM V26, V27, V26
@@ -431,7 +425,7 @@ tail_vsx:
ADD $-1, R11, R12
ADD $-1, INP
ADD $-1, OUT
PCALIGN $16
looptail_vsx:
// Copying the result to OUT
// in bytes.
@@ -439,7 +433,7 @@ looptail_vsx:
MOVBZU 1(INP), TMP
XOR KEY, TMP, KEY
MOVBU KEY, 1(OUT)
BC 16, LT, looptail_vsx
BDNZ looptail_vsx
// Clear the stack values
STXVW4X VS48, (R11)(R0)

View File

@@ -35,11 +35,25 @@ type Weighted struct {
// Acquire acquires the semaphore with a weight of n, blocking until resources
// are available or ctx is done. On success, returns nil. On failure, returns
// ctx.Err() and leaves the semaphore unchanged.
//
// If ctx is already done, Acquire may still succeed without blocking.
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
done := ctx.Done()
s.mu.Lock()
select {
case <-done:
// ctx becoming done has "happened before" acquiring the semaphore,
// whether it became done before the call began or while we were
// waiting for the mutex. We prefer to fail even if we could acquire
// the mutex without blocking.
s.mu.Unlock()
return ctx.Err()
default:
}
if s.size-s.cur >= n && s.waiters.Len() == 0 {
// Since we hold s.mu and haven't synchronized since checking done, if
// ctx becomes done before we return here, it becoming done must have
// "happened concurrently" with this call - it cannot "happen before"
// we return in this branch. So, we're ok to always acquire here.
s.cur += n
s.mu.Unlock()
return nil
@@ -48,7 +62,7 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
if n > s.size {
// Don't make other Acquire calls block on one that's doomed to fail.
s.mu.Unlock()
<-ctx.Done()
<-done
return ctx.Err()
}
@@ -58,14 +72,14 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
s.mu.Unlock()
select {
case <-ctx.Done():
err := ctx.Err()
case <-done:
s.mu.Lock()
select {
case <-ready:
// Acquired the semaphore after we were canceled. Rather than trying to
// fix up the queue, just pretend we didn't notice the cancelation.
err = nil
// Acquired the semaphore after we were canceled.
// Pretend we didn't and put the tokens back.
s.cur -= n
s.notifyWaiters()
default:
isFront := s.waiters.Front() == elem
s.waiters.Remove(elem)
@@ -75,9 +89,19 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
}
}
s.mu.Unlock()
return err
return ctx.Err()
case <-ready:
// Acquired the semaphore. Check that ctx isn't already done.
// We check the done channel instead of calling ctx.Err because we
// already have the channel, and ctx.Err is O(n) with the nesting
// depth of ctx.
select {
case <-done:
s.Release(n)
return ctx.Err()
default:
}
return nil
}
}

View File

@@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "0.174.0"
const Version = "0.176.1"

View File

@@ -174,21 +174,30 @@ func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error
// dialPoolNewAuth is an adapter to call new auth library.
func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *internal.DialSettings) (grpctransport.GRPCClientConnPool, error) {
// honor options if set
var ts oauth2.TokenSource
if ds.InternalCredentials != nil {
ts = ds.InternalCredentials.TokenSource
} else if ds.Credentials != nil {
ts = ds.Credentials.TokenSource
} else if ds.TokenSource != nil {
ts = ds.TokenSource
}
var creds *auth.Credentials
if ds.AuthCredentials != nil {
if ds.InternalCredentials != nil {
creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.InternalCredentials)
} else if ds.Credentials != nil {
creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.Credentials)
} else if ds.AuthCredentials != nil {
creds = ds.AuthCredentials
} else if ts != nil {
creds = auth.NewCredentials(&auth.CredentialsOptions{
TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ts),
})
} else if ds.TokenSource != nil {
credOpts := &auth.CredentialsOptions{
TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ds.TokenSource),
}
if ds.QuotaProject != "" {
credOpts.QuotaProjectIDProvider = auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
return ds.QuotaProject, nil
})
}
creds = auth.NewCredentials(credOpts)
}
var skipValidation bool
// If our clients explicitly setup the credential skip validation as it is
// assumed correct
if ds.SkipValidation || ds.InternalCredentials != nil {
skipValidation = true
}
var aud string
@@ -202,6 +211,13 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna
if ds.RequestReason != "" {
metadata["X-goog-request-reason"] = ds.RequestReason
}
// Defaults for older clients that don't set this value yet
defaultEndpointTemplate := ds.DefaultEndpointTemplate
if defaultEndpointTemplate == "" {
defaultEndpointTemplate = ds.DefaultEndpoint
}
pool, err := grpctransport.Dial(ctx, secure, &grpctransport.Options{
DisableTelemetry: ds.TelemetryDisabled,
DisableAuthentication: ds.NoAuth,
@@ -223,9 +239,10 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna
EnableDirectPathXds: ds.EnableDirectPathXds,
EnableJWTWithScope: ds.EnableJwtWithScope,
DefaultAudience: ds.DefaultAudience,
DefaultEndpointTemplate: ds.DefaultEndpointTemplate,
DefaultEndpointTemplate: defaultEndpointTemplate,
DefaultMTLSEndpoint: ds.DefaultMTLSEndpoint,
DefaultScopes: ds.DefaultScopes,
SkipValidation: skipValidation,
},
})
return pool, err

View File

@@ -109,9 +109,9 @@ func (p *poolAdapter) Close() error {
}
func (p *poolAdapter) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error {
return p.Conn().Invoke(ctx, method, args, reply, opts...)
return p.pool.Invoke(ctx, method, args, reply, opts...)
}
func (p *poolAdapter) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
return p.Conn().NewStream(ctx, desc, method, opts...)
return p.pool.NewStream(ctx, desc, method, opts...)
}

View File

@@ -48,7 +48,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client,
}
if settings.IsNewAuthLibraryEnabled() {
client, err := newClientNewAuth(ctx, settings)
client, err := newClientNewAuth(ctx, nil, settings)
if err != nil {
return nil, "", err
}
@@ -62,57 +62,74 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client,
}
// newClientNewAuth is an adapter to call new auth library.
func newClientNewAuth(ctx context.Context, settings *internal.DialSettings) (*http.Client, error) {
func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal.DialSettings) (*http.Client, error) {
// honor options if set
var ts oauth2.TokenSource
if settings.InternalCredentials != nil {
ts = settings.InternalCredentials.TokenSource
} else if settings.Credentials != nil {
ts = settings.Credentials.TokenSource
} else if settings.TokenSource != nil {
ts = settings.TokenSource
}
var creds *auth.Credentials
if settings.AuthCredentials != nil {
creds = settings.AuthCredentials
} else if ts != nil {
creds = auth.NewCredentials(&auth.CredentialsOptions{
TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ts),
})
if ds.InternalCredentials != nil {
creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.InternalCredentials)
} else if ds.Credentials != nil {
creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.Credentials)
} else if ds.AuthCredentials != nil {
creds = ds.AuthCredentials
} else if ds.TokenSource != nil {
credOpts := &auth.CredentialsOptions{
TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ds.TokenSource),
}
if ds.QuotaProject != "" {
credOpts.QuotaProjectIDProvider = auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
return ds.QuotaProject, nil
})
}
creds = auth.NewCredentials(credOpts)
}
var skipValidation bool
// If our clients explicitly setup the credential skip validation as it is
// assumed correct
if ds.SkipValidation || ds.InternalCredentials != nil {
skipValidation = true
}
// Defaults for older clients that don't set this value yet
defaultEndpointTemplate := ds.DefaultEndpointTemplate
if defaultEndpointTemplate == "" {
defaultEndpointTemplate = ds.DefaultEndpoint
}
var aud string
if len(settings.Audiences) > 0 {
aud = settings.Audiences[0]
if len(ds.Audiences) > 0 {
aud = ds.Audiences[0]
}
headers := http.Header{}
if settings.QuotaProject != "" {
headers.Set("X-goog-user-project", settings.QuotaProject)
if ds.QuotaProject != "" {
headers.Set("X-goog-user-project", ds.QuotaProject)
}
if settings.RequestReason != "" {
headers.Set("X-goog-request-reason", settings.RequestReason)
if ds.RequestReason != "" {
headers.Set("X-goog-request-reason", ds.RequestReason)
}
client, err := httptransport.NewClient(&httptransport.Options{
DisableTelemetry: settings.TelemetryDisabled,
DisableAuthentication: settings.NoAuth,
DisableTelemetry: ds.TelemetryDisabled,
DisableAuthentication: ds.NoAuth,
Headers: headers,
Endpoint: settings.Endpoint,
APIKey: settings.APIKey,
Endpoint: ds.Endpoint,
APIKey: ds.APIKey,
Credentials: creds,
ClientCertProvider: settings.ClientCertSource,
ClientCertProvider: ds.ClientCertSource,
BaseRoundTripper: base,
DetectOpts: &credentials.DetectOptions{
Scopes: settings.Scopes,
Scopes: ds.Scopes,
Audience: aud,
CredentialsFile: settings.CredentialsFile,
CredentialsJSON: settings.CredentialsJSON,
CredentialsFile: ds.CredentialsFile,
CredentialsJSON: ds.CredentialsJSON,
Client: oauth2.NewClient(ctx, nil),
},
InternalOptions: &httptransport.InternalOptions{
EnableJWTWithScope: settings.EnableJwtWithScope,
DefaultAudience: settings.DefaultAudience,
DefaultEndpointTemplate: settings.DefaultEndpointTemplate,
DefaultMTLSEndpoint: settings.DefaultMTLSEndpoint,
DefaultScopes: settings.DefaultScopes,
EnableJWTWithScope: ds.EnableJwtWithScope,
DefaultAudience: ds.DefaultAudience,
DefaultEndpointTemplate: defaultEndpointTemplate,
DefaultMTLSEndpoint: ds.DefaultMTLSEndpoint,
DefaultScopes: ds.DefaultScopes,
SkipValidation: skipValidation,
},
})
if err != nil {
@@ -132,8 +149,7 @@ func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.Cl
return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport")
}
if settings.IsNewAuthLibraryEnabled() {
// TODO, this is not wrapping the base, find a way...
client, err := newClientNewAuth(ctx, settings)
client, err := newClientNewAuth(ctx, base, settings)
if err != nil {
return nil, err
}

View File

@@ -196,8 +196,6 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
}
// Dial calls DialContext(context.Background(), target, opts...).
//
// Deprecated: use NewClient instead. Will be supported throughout 1.x.
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
return DialContext(context.Background(), target, opts...)
}
@@ -211,8 +209,6 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
// "passthrough" for backward compatibility. This distinction should not matter
// to most users, but could matter to legacy users that specify a custom dialer
// and expect it to receive the target string directly.
//
// Deprecated: use NewClient instead. Will be supported throughout 1.x.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
// At the end of this method, we kick the channel out of idle, rather than
// waiting for the first rpc.
@@ -837,7 +833,7 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
addrs: copyAddressesWithoutBalancerAttributes(addrs),
scopts: opts,
dopts: cc.dopts,
channelz: channelz.RegisterSubChannel(cc.channelz.ID, ""),
channelz: channelz.RegisterSubChannel(cc.channelz, ""),
resetBackoff: make(chan struct{}),
stateChan: make(chan struct{}),
}

View File

@@ -143,20 +143,21 @@ func RegisterChannel(parent *Channel, target string) *Channel {
// Returns a unique channelz identifier assigned to this subChannel.
//
// If channelz is not turned ON, the channelz database is not mutated.
func RegisterSubChannel(pid int64, ref string) *SubChannel {
func RegisterSubChannel(parent *Channel, ref string) *SubChannel {
id := IDGen.genID()
if !IsOn() {
return &SubChannel{ID: id}
sc := &SubChannel{
ID: id,
RefName: ref,
parent: parent,
}
sc := &SubChannel{
RefName: ref,
ID: id,
sockets: make(map[int64]string),
parent: db.getChannel(pid),
trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())},
if !IsOn() {
return sc
}
db.addSubChannel(id, sc, pid)
sc.sockets = make(map[int64]string)
sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}
db.addSubChannel(id, sc, parent.ID)
return sc
}

View File

@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.63.0"
const Version = "1.63.2"

22
vendor/modules.txt vendored
View File

@@ -1,4 +1,4 @@
# cloud.google.com/go/auth v0.2.0
# cloud.google.com/go/auth v0.3.0
## explicit; go 1.19
cloud.google.com/go/auth
cloud.google.com/go/auth/credentials
@@ -14,7 +14,7 @@ cloud.google.com/go/auth/internal/credsfile
cloud.google.com/go/auth/internal/jwt
cloud.google.com/go/auth/internal/transport
cloud.google.com/go/auth/internal/transport/cert
# cloud.google.com/go/auth/oauth2adapt v0.2.0
# cloud.google.com/go/auth/oauth2adapt v0.2.2
## explicit; go 1.19
cloud.google.com/go/auth/oauth2adapt
# cloud.google.com/go/compute/metadata v0.3.0
@@ -263,8 +263,8 @@ github.com/prometheus/client_model/go
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
# github.com/prometheus/procfs v0.13.0
## explicit; go 1.19
# github.com/prometheus/procfs v0.14.0
## explicit; go 1.21
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
@@ -357,7 +357,7 @@ go.opentelemetry.io/otel/metric/noop
## explicit; go 1.20
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
# golang.org/x/crypto v0.21.0
# golang.org/x/crypto v0.22.0
## explicit; go 1.18
golang.org/x/crypto/chacha20
golang.org/x/crypto/chacha20poly1305
@@ -366,7 +366,7 @@ golang.org/x/crypto/cryptobyte/asn1
golang.org/x/crypto/hkdf
golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
# golang.org/x/net v0.23.0
# golang.org/x/net v0.24.0
## explicit; go 1.18
golang.org/x/net/http/httpguts
golang.org/x/net/http2
@@ -386,7 +386,7 @@ golang.org/x/oauth2/google/internal/stsexchange
golang.org/x/oauth2/internal
golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt
# golang.org/x/sync v0.6.0
# golang.org/x/sync v0.7.0
## explicit; go 1.18
golang.org/x/sync/semaphore
# golang.org/x/sys v0.19.0
@@ -399,7 +399,7 @@ golang.org/x/sys/windows/registry
golang.org/x/sys/windows/svc
golang.org/x/sys/windows/svc/debug
golang.org/x/sys/windows/svc/eventlog
# golang.org/x/term v0.18.0
# golang.org/x/term v0.19.0
## explicit; go 1.18
golang.org/x/term
# golang.org/x/text v0.14.0
@@ -411,7 +411,7 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.5.0
## explicit; go 1.18
golang.org/x/time/rate
# google.golang.org/api v0.174.0
# google.golang.org/api v0.176.1
## explicit; go 1.19
google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport
@@ -444,7 +444,7 @@ google.golang.org/genproto/googleapis/api/monitoredres
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.63.0
# google.golang.org/grpc v1.63.2
## explicit; go 1.19
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -822,7 +822,7 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/spec3
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/validation/spec
# k8s.io/utils v0.0.0-20240310230437-4693a0247e57
# k8s.io/utils v0.0.0-20240423183400-0849a56e8f22
## explicit; go 1.18
k8s.io/utils/clock
k8s.io/utils/clock/testing