mirror of
https://github.com/weaveworks/scope.git
synced 2026-03-03 18:20:27 +00:00
Merge pull request #2491 from kinvolk/alban/perf-proc-walker-2
process walker perfs: optimize readLimits and readStats
This commit is contained in:
@@ -1,41 +0,0 @@
|
||||
package process
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/coocood/freecache"
|
||||
|
||||
"github.com/weaveworks/common/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
generalTimeout = 30 // seconds
|
||||
statsTimeout = 10 //seconds
|
||||
)
|
||||
|
||||
var (
|
||||
hitMetricsKey = []string{"process", "cache", "hit"}
|
||||
missMetricsKey = []string{"process", "cache", "miss"}
|
||||
)
|
||||
|
||||
var fileCache = freecache.NewCache(1024 * 16)
|
||||
|
||||
type entry struct {
|
||||
buf []byte
|
||||
err error
|
||||
ts time.Time
|
||||
}
|
||||
|
||||
func cachedReadFile(path string) ([]byte, error) {
|
||||
key := []byte(path)
|
||||
if v, err := fileCache.Get(key); err == nil {
|
||||
metrics.IncrCounter(hitMetricsKey, 1.0)
|
||||
return v, nil
|
||||
}
|
||||
|
||||
buf, err := fs.ReadFile(path)
|
||||
fileCache.Set(key, buf, generalTimeout)
|
||||
metrics.IncrCounter(missMetricsKey, 1.0)
|
||||
return buf, err
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package process
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"strings"
|
||||
|
||||
linuxproc "github.com/c9s/goprocinfo/linux"
|
||||
"github.com/coocood/freecache"
|
||||
|
||||
"github.com/weaveworks/common/fs"
|
||||
"github.com/weaveworks/scope/probe/host"
|
||||
@@ -18,12 +20,64 @@ type walker struct {
|
||||
procRoot string
|
||||
}
|
||||
|
||||
var (
|
||||
// limitsCache caches /proc/<pid>/limits
|
||||
// key: filename in /proc. Example: "42"
|
||||
// value: max open files (soft limit) stored in a [8]byte (uint64, little endian)
|
||||
limitsCache = freecache.NewCache(1024 * 16)
|
||||
|
||||
// cmdlineCache caches /proc/<pid>/cmdline and /proc/<pid>/name
|
||||
// key: filename in /proc. Example: "42"
|
||||
// value: two strings separated by a '\0'
|
||||
cmdlineCache = freecache.NewCache(1024 * 16)
|
||||
)
|
||||
|
||||
const (
|
||||
limitsCacheTimeout = 60
|
||||
cmdlineCacheTimeout = 60
|
||||
)
|
||||
|
||||
// NewWalker creates a new process Walker.
|
||||
func NewWalker(procRoot string) Walker {
|
||||
return &walker{procRoot: procRoot}
|
||||
}
|
||||
|
||||
// skipNSpaces skips nSpaces in buf and updates the cursor 'pos'
|
||||
func skipNSpaces(buf *[]byte, pos *int, nSpaces int) {
|
||||
for spaceCount := 0; *pos < len(*buf) && spaceCount < nSpaces; *pos++ {
|
||||
if (*buf)[*pos] == ' ' {
|
||||
spaceCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseUint64WithSpaces is similar to strconv.ParseUint64 but stops parsing
|
||||
// when reading a space instead of returning an error
|
||||
func parseUint64WithSpaces(buf *[]byte, pos *int) (ret uint64) {
|
||||
for ; *pos < len(*buf) && (*buf)[*pos] != ' '; *pos++ {
|
||||
ret = ret*10 + uint64((*buf)[*pos]-'0')
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// parseIntWithSpaces is similar to strconv.ParseInt but stops parsing when
|
||||
// reading a space instead of returning an error
|
||||
func parseIntWithSpaces(buf *[]byte, pos *int) (ret int) {
|
||||
return int(parseUint64WithSpaces(buf, pos))
|
||||
}
|
||||
|
||||
// readStats reads and parses '/proc/<pid>/stat' files
|
||||
func readStats(path string) (ppid, threads int, jiffies, rss, rssLimit uint64, err error) {
|
||||
const (
|
||||
// /proc/<pid>/stat field positions, counting from zero
|
||||
// see "man 5 proc"
|
||||
procStatFieldPpid int = 3
|
||||
procStatFieldUserJiffies int = 13
|
||||
procStatFieldSysJiffies int = 14
|
||||
procStatFieldThreads int = 19
|
||||
procStatFieldRssPages int = 23
|
||||
procStatFieldRssLimit int = 24
|
||||
)
|
||||
var (
|
||||
buf []byte
|
||||
userJiffies, sysJiffies, rssPages uint64
|
||||
@@ -32,53 +86,84 @@ func readStats(path string) (ppid, threads int, jiffies, rss, rssLimit uint64, e
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
splits := strings.Fields(string(buf))
|
||||
if len(splits) < 25 {
|
||||
err = fmt.Errorf("Invalid /proc/PID/stat")
|
||||
return
|
||||
}
|
||||
ppid, err = strconv.Atoi(splits[3])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
threads, err = strconv.Atoi(splits[19])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
userJiffies, err = strconv.ParseUint(splits[13], 10, 64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sysJiffies, err = strconv.ParseUint(splits[14], 10, 64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse the file without using expensive extra string allocations
|
||||
|
||||
pos := 0
|
||||
skipNSpaces(&buf, &pos, procStatFieldPpid)
|
||||
ppid = parseIntWithSpaces(&buf, &pos)
|
||||
|
||||
skipNSpaces(&buf, &pos, procStatFieldUserJiffies-procStatFieldPpid)
|
||||
userJiffies = parseUint64WithSpaces(&buf, &pos)
|
||||
|
||||
pos++ // 1 space between userJiffies and sysJiffies
|
||||
sysJiffies = parseUint64WithSpaces(&buf, &pos)
|
||||
|
||||
skipNSpaces(&buf, &pos, procStatFieldThreads-procStatFieldSysJiffies)
|
||||
threads = parseIntWithSpaces(&buf, &pos)
|
||||
|
||||
skipNSpaces(&buf, &pos, procStatFieldRssPages-procStatFieldThreads)
|
||||
rssPages = parseUint64WithSpaces(&buf, &pos)
|
||||
|
||||
pos++ // 1 space between rssPages and rssLimit
|
||||
rssLimit = parseUint64WithSpaces(&buf, &pos)
|
||||
|
||||
jiffies = userJiffies + sysJiffies
|
||||
rssPages, err = strconv.ParseUint(splits[23], 10, 64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
rss = rssPages * uint64(os.Getpagesize())
|
||||
rssLimit, err = strconv.ParseUint(splits[24], 10, 64)
|
||||
return
|
||||
}
|
||||
|
||||
func readLimits(path string) (openFilesLimit uint64, err error) {
|
||||
buf, err := cachedReadFile(path)
|
||||
buf, err := fs.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, line := range strings.Split(string(buf), "\n") {
|
||||
if strings.HasPrefix(line, "Max open files") {
|
||||
splits := strings.Fields(line)
|
||||
if len(splits) < 6 {
|
||||
return 0, fmt.Errorf("Invalid /proc/PID/limits")
|
||||
}
|
||||
openFilesLimit, err := strconv.Atoi(splits[3])
|
||||
return uint64(openFilesLimit), err
|
||||
content := string(buf)
|
||||
|
||||
// File format: one line header + one line per limit
|
||||
//
|
||||
// Limit Soft Limit Hard Limit Units
|
||||
// ...
|
||||
// Max open files 1024 4096 files
|
||||
// ...
|
||||
delim := "\nMax open files"
|
||||
pos := strings.Index(content, delim)
|
||||
|
||||
if pos < 0 {
|
||||
// Tests such as TestWalker can synthetise empty files
|
||||
return 0, nil
|
||||
}
|
||||
pos += len(delim)
|
||||
|
||||
for pos < len(content) && content[pos] == ' ' {
|
||||
pos++
|
||||
}
|
||||
|
||||
var softLimit uint64
|
||||
softLimit = parseUint64WithSpaces(&buf, &pos)
|
||||
|
||||
return softLimit, nil
|
||||
}
|
||||
|
||||
func (w *walker) readCmdline(filename string) (cmdline, name string) {
|
||||
if cmdlineBuf, err := fs.ReadFile(path.Join(w.procRoot, filename, "cmdline")); err == nil {
|
||||
// like proc, treat name as the first element of command line
|
||||
i := bytes.IndexByte(cmdlineBuf, '\000')
|
||||
if i == -1 {
|
||||
i = len(cmdlineBuf)
|
||||
}
|
||||
name = string(cmdlineBuf[:i])
|
||||
cmdlineBuf = bytes.Replace(cmdlineBuf, []byte{'\000'}, []byte{' '}, -1)
|
||||
cmdline = string(cmdlineBuf)
|
||||
}
|
||||
if name == "" {
|
||||
if commBuf, err := fs.ReadFile(path.Join(w.procRoot, filename, "comm")); err == nil {
|
||||
name = "[" + strings.TrimSpace(string(commBuf)) + "]"
|
||||
} else {
|
||||
name = "(unknown)"
|
||||
}
|
||||
}
|
||||
return 0, nil
|
||||
return
|
||||
}
|
||||
|
||||
// Walk walks the supplied directory (expecting it to look like /proc)
|
||||
@@ -107,29 +192,29 @@ func (w *walker) Walk(f func(Process, Process)) error {
|
||||
continue
|
||||
}
|
||||
|
||||
openFilesLimit, err := readLimits(path.Join(w.procRoot, filename, "limits"))
|
||||
if err != nil {
|
||||
continue
|
||||
var openFilesLimit uint64
|
||||
if v, err := limitsCache.Get([]byte(filename)); err == nil {
|
||||
openFilesLimit = binary.LittleEndian.Uint64(v)
|
||||
} else {
|
||||
openFilesLimit, err = readLimits(path.Join(w.procRoot, filename, "limits"))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, openFilesLimit)
|
||||
limitsCache.Set([]byte(filename), buf, limitsCacheTimeout)
|
||||
}
|
||||
|
||||
cmdline, name := "", ""
|
||||
if cmdlineBuf, err := cachedReadFile(path.Join(w.procRoot, filename, "cmdline")); err == nil {
|
||||
// like proc, treat name as the first element of command line
|
||||
i := bytes.IndexByte(cmdlineBuf, '\000')
|
||||
if i == -1 {
|
||||
i = len(cmdlineBuf)
|
||||
}
|
||||
name = string(cmdlineBuf[:i])
|
||||
cmdlineBuf = bytes.Replace(cmdlineBuf, []byte{'\000'}, []byte{' '}, -1)
|
||||
cmdline = string(cmdlineBuf)
|
||||
}
|
||||
if name == "" {
|
||||
if commBuf, err := cachedReadFile(path.Join(w.procRoot, filename, "comm")); err == nil {
|
||||
name = "[" + strings.TrimSpace(string(commBuf)) + "]"
|
||||
} else {
|
||||
name = "(unknown)"
|
||||
}
|
||||
if v, err := cmdlineCache.Get([]byte(filename)); err == nil {
|
||||
separatorPos := strings.Index(string(v), "\x00")
|
||||
cmdline = string(v[:separatorPos])
|
||||
name = string(v[separatorPos+1:])
|
||||
} else {
|
||||
cmdline, name = w.readCmdline(filename)
|
||||
cmdlineCache.Set([]byte(filename), []byte(fmt.Sprintf("%s\x00%s", cmdline, name)), cmdlineCacheTimeout)
|
||||
}
|
||||
|
||||
f(Process{
|
||||
PID: pid,
|
||||
PPID: ppid,
|
||||
|
||||
@@ -23,7 +23,7 @@ var mockFS = fs.Dir("",
|
||||
},
|
||||
fs.File{
|
||||
FName: "limits",
|
||||
FContents: `Max open files 32768 65536 files`,
|
||||
FContents: "Limit Soft-Limit Hard-Limit Units\nMax open files 32768 65536 files",
|
||||
},
|
||||
fs.Dir("fd", fs.File{FName: "0"}, fs.File{FName: "1"}, fs.File{FName: "2"}),
|
||||
),
|
||||
|
||||
Reference in New Issue
Block a user