Merge pull request #1225 from weaveworks/1149-more-meaningful-metrics-max-mmmmmmm

Set the memory and open files maximums based on the host
This commit is contained in:
Paul Bellamy
2016-04-07 10:36:30 +01:00
8 changed files with 64 additions and 20 deletions

View File

@@ -22,9 +22,6 @@ export function getClipPathDefinition(clipId, size, height,
}
//
// Open files, 100k should be enought for anyone?
const openFilesScale = d3.scale.log().domain([1, 100000]).range([0, 1]);
//
// loadScale(1) == 0.5; E.g. a nicely balanced system :).
const loadScale = d3.scale.log().domain([0.01, 100]).range([0, 1]);
@@ -39,10 +36,7 @@ export function getMetricValue(metric, size) {
let valuePercentage = value === 0 ? 0 : value / m.max;
let max = m.max;
if (m.id === 'open_files_count') {
valuePercentage = openFilesScale(value);
max = null;
} else if (_.includes(['load1', 'load5', 'load15'], m.id)) {
if (_.includes(['load1', 'load5', 'load15'], m.id)) {
valuePercentage = loadScale(value);
max = null;
}

View File

@@ -277,7 +277,7 @@ func (c *container) ports(localAddrs []net.IP) report.StringSet {
func (c *container) memoryUsageMetric(stats []docker.Stats) report.Metric {
result := report.MakeMetric()
for _, s := range stats {
result = result.Add(s.Read, float64(s.MemoryStats.Usage))
result = result.Add(s.Read, float64(s.MemoryStats.Usage)).WithMax(float64(s.MemoryStats.Limit))
}
return result
}

View File

@@ -66,6 +66,7 @@ func TestContainer(t *testing.T) {
stats := &client.Stats{}
stats.Read = now
stats.MemoryStats.Usage = 12345
stats.MemoryStats.Limit = 45678
encoder := codec.NewEncoder(writer, &codec.JsonHandle{})
if err = encoder.Encode(&stats); err != nil {
t.Error(err)
@@ -93,7 +94,7 @@ func TestContainer(t *testing.T) {
docker.AttachContainer, docker.ExecContainer,
).WithMetrics(report.Metrics{
"docker_cpu_total_usage": report.MakeMetric(),
"docker_memory_usage": report.MakeMetric().Add(now, 12345),
"docker_memory_usage": report.MakeMetric().Add(now, 12345).WithMax(45678),
}).WithParents(report.EmptySets.
Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID("baz"))),
)

View File

@@ -47,7 +47,11 @@ var mockFS = fs.Dir("",
),
fs.File{
FName: "stat",
FContents: "1 na R 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0",
FContents: "1 na R 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0",
},
fs.File{
FName: "limits",
FContents: "",
},
),
),

View File

@@ -84,8 +84,8 @@ func (r *Reporter) processTopology() (report.Topology, error) {
node = node.WithMetric(CPUUsage, report.MakeMetric().Add(now, cpuUsage).WithMax(maxCPU))
}
node = node.WithMetric(MemoryUsage, report.MakeMetric().Add(now, float64(p.RSSBytes)))
node = node.WithMetric(OpenFilesCount, report.MakeMetric().Add(now, float64(p.OpenFilesCount)))
node = node.WithMetric(MemoryUsage, report.MakeMetric().Add(now, float64(p.RSSBytes)).WithMax(float64(p.RSSBytesLimit)))
node = node.WithMetric(OpenFilesCount, report.MakeMetric().Add(now, float64(p.OpenFilesCount)).WithMax(float64(p.OpenFilesLimit)))
t.AddNode(nodeID, node)
})

View File

@@ -10,7 +10,9 @@ type Process struct {
Threads int
Jiffies uint64
RSSBytes uint64
RSSBytesLimit uint64
OpenFilesCount int
OpenFilesLimit uint64
}
// Walker is something that walks the /proc directory

View File

@@ -23,7 +23,7 @@ func NewWalker(procRoot string) Walker {
return &walker{procRoot: procRoot}
}
func readStats(path string) (ppid, threads int, jiffies, rss uint64, err error) {
func readStats(path string) (ppid, threads int, jiffies, rss, rssLimit uint64, err error) {
var (
buf []byte
userJiffies, sysJiffies, rssPages uint64
@@ -33,7 +33,8 @@ func readStats(path string) (ppid, threads int, jiffies, rss uint64, err error)
return
}
splits := strings.Fields(string(buf))
if len(splits) < 24 {
fmt.Printf("Got stats: %q, %d fields\n", splits, len(splits))
if len(splits) < 25 {
err = fmt.Errorf("Invalid /proc/PID/stat")
return
}
@@ -59,9 +60,28 @@ func readStats(path string) (ppid, threads int, jiffies, rss uint64, err error)
return
}
rss = rssPages * uint64(os.Getpagesize())
rssLimit, err = strconv.ParseUint(splits[24], 10, 64)
return
}
func readLimits(path string) (openFilesLimit uint64, err error) {
buf, err := cachedReadFile(path)
if err != nil {
return 0, err
}
for _, line := range strings.Split(string(buf), "\n") {
if strings.HasPrefix(line, "Max open files") {
splits := strings.Fields(line)
if len(splits) < 6 {
return 0, fmt.Errorf("Invalid /proc/PID/limits")
}
openFilesLimit, err := strconv.Atoi(splits[3])
return uint64(openFilesLimit), err
}
}
return 0, nil
}
// Walk walks the supplied directory (expecting it to look like /proc)
// and marshalls the files into instances of Process, which it then
// passes one-by-one to the supplied function. Walk is only made public
@@ -78,7 +98,7 @@ func (w *walker) Walk(f func(Process, Process)) error {
continue
}
ppid, threads, jiffies, rss, err := readStats(path.Join(w.procRoot, filename, "stat"))
ppid, threads, jiffies, rss, rssLimit, err := readStats(path.Join(w.procRoot, filename, "stat"))
if err != nil {
continue
}
@@ -88,6 +108,11 @@ func (w *walker) Walk(f func(Process, Process)) error {
continue
}
openFilesLimit, err := readLimits(path.Join(w.procRoot, filename, "limits"))
if err != nil {
continue
}
cmdline, name := "", "(unknown)"
if cmdlineBuf, err := cachedReadFile(path.Join(w.procRoot, filename, "cmdline")); err == nil {
// like proc, treat name as the first element of command line
@@ -108,7 +133,9 @@ func (w *walker) Walk(f func(Process, Process)) error {
Threads: threads,
Jiffies: jiffies,
RSSBytes: rss,
RSSBytesLimit: rssLimit,
OpenFilesCount: len(openFiles),
OpenFilesLimit: openFilesLimit,
}, Process{})
}

View File

@@ -19,7 +19,11 @@ var mockFS = fs.Dir("",
},
fs.File{
FName: "stat",
FContents: "3 na R 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0",
FContents: "3 na R 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 2 2048",
},
fs.File{
FName: "limits",
FContents: `Max open files 32768 65536 files`,
},
fs.Dir("fd", fs.File{FName: "0"}, fs.File{FName: "1"}, fs.File{FName: "2"}),
),
@@ -30,7 +34,11 @@ var mockFS = fs.Dir("",
},
fs.File{
FName: "stat",
FContents: "2 na R 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0",
FContents: "2 na R 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0",
},
fs.File{
FName: "limits",
FContents: ``,
},
fs.Dir("fd", fs.File{FName: "1"}, fs.File{FName: "2"}),
),
@@ -41,7 +49,11 @@ var mockFS = fs.Dir("",
},
fs.File{
FName: "stat",
FContents: "4 na R 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0",
FContents: "4 na R 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0",
},
fs.File{
FName: "limits",
FContents: ``,
},
fs.Dir("fd", fs.File{FName: "0"}),
),
@@ -53,7 +65,11 @@ var mockFS = fs.Dir("",
},
fs.File{
FName: "stat",
FContents: "1 na R 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0",
FContents: "1 na R 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0",
},
fs.File{
FName: "limits",
FContents: ``,
},
fs.Dir("fd"),
),
@@ -65,7 +81,7 @@ func TestWalker(t *testing.T) {
defer fs_hook.Restore()
want := map[int]process.Process{
3: {PID: 3, PPID: 2, Name: "curl", Cmdline: "curl google.com", Threads: 1, OpenFilesCount: 3},
3: {PID: 3, PPID: 2, Name: "curl", Cmdline: "curl google.com", Threads: 1, RSSBytes: 8192, RSSBytesLimit: 2048, OpenFilesCount: 3, OpenFilesLimit: 32768},
2: {PID: 2, PPID: 1, Name: "bash", Cmdline: "bash", Threads: 1, OpenFilesCount: 2},
4: {PID: 4, PPID: 3, Name: "apache", Cmdline: "apache", Threads: 1, OpenFilesCount: 1},
1: {PID: 1, PPID: 0, Name: "init", Cmdline: "init", Threads: 1, OpenFilesCount: 0},