Merge pull request #841 from weaveworks/840-no-metric-aggregation

Rename metric keys so they don't conflict and get merged between types
This commit is contained in:
Tom Wilkie
2016-01-21 09:46:19 -08:00
10 changed files with 92 additions and 82 deletions

View File

@@ -41,16 +41,16 @@ const (
NetworkTxErrors = "network_tx_errors"
NetworkTxBytes = "network_tx_bytes"
MemoryMaxUsage = "memory_max_usage"
MemoryUsage = "memory_usage"
MemoryFailcnt = "memory_failcnt"
MemoryLimit = "memory_limit"
MemoryMaxUsage = "docker_memory_max_usage"
MemoryUsage = "docker_memory_usage"
MemoryFailcnt = "docker_memory_failcnt"
MemoryLimit = "docker_memory_limit"
CPUPercpuUsage = "cpu_per_cpu_usage"
CPUUsageInUsermode = "cpu_usage_in_usermode"
CPUTotalUsage = "cpu_total_usage"
CPUUsageInKernelmode = "cpu_usage_in_kernelmode"
CPUSystemCPUUsage = "cpu_system_cpu_usage"
CPUPercpuUsage = "docker_cpu_per_cpu_usage"
CPUUsageInUsermode = "docker_cpu_usage_in_usermode"
CPUTotalUsage = "docker_cpu_total_usage"
CPUUsageInKernelmode = "docker_cpu_usage_in_kernelmode"
CPUSystemCPUUsage = "docker_cpu_system_cpu_usage"
StateRunning = "running"
StateStopped = "stopped"

View File

@@ -79,7 +79,7 @@ func TestContainer(t *testing.T) {
"docker_image_id": "baz",
"docker_label_foo1": "bar1",
"docker_label_foo2": "bar2",
"memory_usage": "12345",
"docker_memory_usage": "12345",
}).WithSets(report.Sets{
"docker_container_ports": report.MakeStringSet("1.2.3.4:80->80/tcp", "81/tcp"),
"docker_container_ips": report.MakeStringSet("1.2.3.4"),
@@ -90,8 +90,8 @@ func TestContainer(t *testing.T) {
).WithLatest(
"docker_container_state", now, "running",
).WithMetrics(report.Metrics{
"cpu_total_usage": report.MakeMetric(),
"memory_usage": report.MakeMetric().Add(now, 12345),
"docker_cpu_total_usage": report.MakeMetric(),
"docker_memory_usage": report.MakeMetric().Add(now, 12345),
}).WithParents(report.Sets{
report.ContainerImage: report.MakeStringSet(report.MakeContainerImageNodeID("baz")),
})

View File

@@ -19,8 +19,8 @@ const (
Load1 = "load1"
Load5 = "load5"
Load15 = "load15"
CPUUsage = "cpu_usage_percent"
MemUsage = "mem_usage_bytes"
CPUUsage = "host_cpu_usage_percent"
MemoryUsage = "host_mem_usage_bytes"
)
// Exposed for testing.
@@ -76,8 +76,8 @@ func (r *Reporter) Report() (report.Report, error) {
metrics := GetLoad(now)
cpuUsage, max := GetCPUUsagePercent()
metrics[CPUUsage] = report.MakeMetric().Add(now, cpuUsage).WithMax(max)
memUsage, max := GetMemoryUsageBytes()
metrics[MemUsage] = report.MakeMetric().Add(now, memUsage).WithMax(max)
memoryUsage, max := GetMemoryUsageBytes()
metrics[MemoryUsage] = report.MakeMetric().Add(now, memoryUsage).WithMax(max)
rep.Host.AddNode(report.MakeHostNodeID(r.hostID), report.MakeNodeWith(map[string]string{
Timestamp: mtime.Now().UTC().Format(time.RFC3339Nano),

View File

@@ -22,11 +22,11 @@ func TestReporter(t *testing.T) {
hostname = "hostname"
timestamp = time.Now()
load = report.Metrics{
host.Load1: report.MakeMetric().Add(timestamp, 1.0),
host.Load5: report.MakeMetric().Add(timestamp, 5.0),
host.Load15: report.MakeMetric().Add(timestamp, 15.0),
host.CPUUsage: report.MakeMetric().Add(timestamp, 30.0).WithMax(100.0),
host.MemUsage: report.MakeMetric().Add(timestamp, 60.0).WithMax(100.0),
host.Load1: report.MakeMetric().Add(timestamp, 1.0),
host.Load5: report.MakeMetric().Add(timestamp, 5.0),
host.Load15: report.MakeMetric().Add(timestamp, 15.0),
host.CPUUsage: report.MakeMetric().Add(timestamp, 30.0).WithMax(100.0),
host.MemoryUsage: report.MakeMetric().Add(timestamp, 60.0).WithMax(100.0),
}
uptime = "278h55m43s"
kernel = "release version"

View File

@@ -14,8 +14,8 @@ const (
PPID = "ppid"
Cmdline = "cmdline"
Threads = "threads"
CPUUsage = "cpu_usage_percent"
MemoryUsage = "memory_usage_bytes"
CPUUsage = "process_cpu_usage_percent"
MemoryUsage = "process_memory_usage_bytes"
)
// Reporter generates Reports containing the Process topology.

View File

@@ -27,7 +27,7 @@ var (
)
hostNodeMetrics = renderMetrics(
MetricRow{ID: host.CPUUsage, Label: "CPU", Format: percentFormat},
MetricRow{ID: host.MemUsage, Label: "Memory", Format: filesizeFormat},
MetricRow{ID: host.MemoryUsage, Label: "Memory", Format: filesizeFormat},
MetricRow{ID: host.Load1, Label: "Load (1m)", Format: defaultFormat, Group: "load"},
MetricRow{ID: host.Load5, Label: "Load (5m)", Format: defaultFormat, Group: "load"},
MetricRow{ID: host.Load15, Label: "Load (15m)", Format: defaultFormat, Group: "load"},

View File

@@ -29,15 +29,15 @@ func TestNodeMetrics(t *testing.T) {
Format: "percent",
Group: "",
Value: 0.01,
Metric: &fixture.CPUMetric,
Metric: &fixture.ClientProcess1CPUMetric,
},
{
ID: process.MemoryUsage,
Label: "Memory",
Format: "filesize",
Group: "",
Value: 0.01,
Metric: &fixture.MemoryMetric,
Value: 0.02,
Metric: &fixture.ClientProcess1MemoryMetric,
},
},
},
@@ -50,16 +50,16 @@ func TestNodeMetrics(t *testing.T) {
Label: "CPU",
Format: "percent",
Group: "",
Value: 0.01,
Metric: &fixture.CPUMetric,
Value: 0.03,
Metric: &fixture.ClientContainerCPUMetric,
},
{
ID: docker.MemoryUsage,
Label: "Memory",
Format: "filesize",
Group: "",
Value: 0.01,
Metric: &fixture.MemoryMetric,
Value: 0.04,
Metric: &fixture.ClientContainerMemoryMetric,
},
},
},
@@ -72,37 +72,37 @@ func TestNodeMetrics(t *testing.T) {
Label: "CPU",
Format: "percent",
Group: "",
Value: 0.01,
Metric: &fixture.CPUMetric,
Value: 0.07,
Metric: &fixture.ClientHostCPUMetric,
},
{
ID: host.MemUsage,
ID: host.MemoryUsage,
Label: "Memory",
Format: "filesize",
Group: "",
Value: 0.01,
Metric: &fixture.MemoryMetric,
Value: 0.08,
Metric: &fixture.ClientHostMemoryMetric,
},
{
ID: host.Load1,
Label: "Load (1m)",
Group: "load",
Value: 0.01,
Metric: &fixture.LoadMetric,
Value: 0.09,
Metric: &fixture.ClientHostLoad1Metric,
},
{
ID: host.Load5,
Label: "Load (5m)",
Group: "load",
Value: 0.01,
Metric: &fixture.LoadMetric,
Value: 0.10,
Metric: &fixture.ClientHostLoad5Metric,
},
{
ID: host.Load15,
Label: "Load (15m)",
Group: "load",
Value: 0.01,
Metric: &fixture.LoadMetric,
Value: 0.11,
Metric: &fixture.ClientHostLoad15Metric,
},
},
},

View File

@@ -90,7 +90,7 @@ var (
topologyID string
NodeSummaryGroup
}{
{report.Host, NodeSummaryGroup{TopologyID: "hosts", Label: "Hosts", Columns: []string{host.CPUUsage, host.MemUsage}}},
{report.Host, NodeSummaryGroup{TopologyID: "hosts", Label: "Hosts", Columns: []string{host.CPUUsage, host.MemoryUsage}}},
{report.Pod, NodeSummaryGroup{TopologyID: "pods", Label: "Pods", Columns: []string{}}},
{report.ContainerImage, NodeSummaryGroup{TopologyID: "containers-by-image", Label: "Container Images", Columns: []string{}}},
{report.Container, NodeSummaryGroup{TopologyID: "containers", Label: "Containers", Columns: []string{docker.CPUTotalUsage, docker.MemoryUsage}}},

View File

@@ -51,36 +51,36 @@ func TestMakeDetailedHostNode(t *testing.T) {
ID: host.CPUUsage,
Format: "percent",
Label: "CPU",
Value: 0.01,
Metric: &fixture.CPUMetric,
Value: 0.07,
Metric: &fixture.ClientHostCPUMetric,
},
{
ID: host.MemUsage,
ID: host.MemoryUsage,
Format: "filesize",
Label: "Memory",
Value: 0.01,
Metric: &fixture.MemoryMetric,
Value: 0.08,
Metric: &fixture.ClientHostMemoryMetric,
},
{
ID: host.Load1,
Group: "load",
Label: "Load (1m)",
Value: 0.01,
Metric: &fixture.LoadMetric,
Value: 0.09,
Metric: &fixture.ClientHostLoad1Metric,
},
{
ID: host.Load5,
Group: "load",
Label: "Load (5m)",
Value: 0.01,
Metric: &fixture.LoadMetric,
Value: 0.10,
Metric: &fixture.ClientHostLoad5Metric,
},
{
ID: host.Load15,
Label: "Load (15m)",
Group: "load",
Value: 0.01,
Metric: &fixture.LoadMetric,
Value: 0.11,
Metric: &fixture.ClientHostLoad15Metric,
},
},
},
@@ -139,15 +139,15 @@ func TestMakeDetailedContainerNode(t *testing.T) {
ID: docker.CPUTotalUsage,
Format: "percent",
Label: "CPU",
Value: 0.01,
Metric: &fixture.CPUMetric,
Value: 0.05,
Metric: &fixture.ServerContainerCPUMetric,
},
{
ID: docker.MemoryUsage,
Format: "filesize",
Label: "Memory",
Value: 0.01,
Metric: &fixture.MemoryMetric,
Value: 0.06,
Metric: &fixture.ServerContainerMemoryMetric,
},
},
},

View File

@@ -99,16 +99,26 @@ var (
ServiceID = "ping/pongservice"
ServiceNodeID = report.MakeServiceNodeID(KubernetesNamespace, "pongservice")
LoadMetric = report.MakeMetric().Add(Now, 0.01).WithFirst(Now.Add(-15 * time.Second))
LoadMetrics = report.Metrics{
host.Load1: LoadMetric,
host.Load5: LoadMetric,
host.Load15: LoadMetric,
}
ClientProcess1CPUMetric = report.MakeMetric().Add(Now, 0.01).WithFirst(Now.Add(-1 * time.Second))
ClientProcess1MemoryMetric = report.MakeMetric().Add(Now, 0.02).WithFirst(Now.Add(-2 * time.Second))
CPUMetric = report.MakeMetric().Add(Now, 0.01).WithFirst(Now.Add(-15 * time.Second))
ClientContainerCPUMetric = report.MakeMetric().Add(Now, 0.03).WithFirst(Now.Add(-3 * time.Second))
ClientContainerMemoryMetric = report.MakeMetric().Add(Now, 0.04).WithFirst(Now.Add(-4 * time.Second))
MemoryMetric = report.MakeMetric().Add(Now, 0.01).WithFirst(Now.Add(-15 * time.Second))
ServerContainerCPUMetric = report.MakeMetric().Add(Now, 0.05).WithFirst(Now.Add(-5 * time.Second))
ServerContainerMemoryMetric = report.MakeMetric().Add(Now, 0.06).WithFirst(Now.Add(-6 * time.Second))
ClientHostCPUMetric = report.MakeMetric().Add(Now, 0.07).WithFirst(Now.Add(-7 * time.Second))
ClientHostMemoryMetric = report.MakeMetric().Add(Now, 0.08).WithFirst(Now.Add(-8 * time.Second))
ClientHostLoad1Metric = report.MakeMetric().Add(Now, 0.09).WithFirst(Now.Add(-9 * time.Second))
ClientHostLoad5Metric = report.MakeMetric().Add(Now, 0.10).WithFirst(Now.Add(-10 * time.Second))
ClientHostLoad15Metric = report.MakeMetric().Add(Now, 0.11).WithFirst(Now.Add(-11 * time.Second))
ServerHostCPUMetric = report.MakeMetric().Add(Now, 0.12).WithFirst(Now.Add(-12 * time.Second))
ServerHostMemoryMetric = report.MakeMetric().Add(Now, 0.13).WithFirst(Now.Add(-13 * time.Second))
ServerHostLoad1Metric = report.MakeMetric().Add(Now, 0.14).WithFirst(Now.Add(-14 * time.Second))
ServerHostLoad5Metric = report.MakeMetric().Add(Now, 0.15).WithFirst(Now.Add(-15 * time.Second))
ServerHostLoad15Metric = report.MakeMetric().Add(Now, 0.16).WithFirst(Now.Add(-16 * time.Second))
Report = report.Report{
Endpoint: report.Topology{
@@ -210,8 +220,8 @@ var (
"container": report.MakeStringSet(ClientContainerNodeID),
"container_image": report.MakeStringSet(ClientContainerImageNodeID),
}).WithMetrics(report.Metrics{
process.CPUUsage: CPUMetric,
process.MemoryUsage: MemoryMetric,
process.CPUUsage: ClientProcess1CPUMetric,
process.MemoryUsage: ClientProcess1MemoryMetric,
}),
ClientProcess2NodeID: report.MakeNodeWith(map[string]string{
process.PID: Client2PID,
@@ -257,8 +267,8 @@ var (
"container_image": report.MakeStringSet(ClientContainerImageNodeID),
"pod": report.MakeStringSet(ClientPodID),
}).WithMetrics(report.Metrics{
docker.CPUTotalUsage: CPUMetric,
docker.MemoryUsage: MemoryMetric,
docker.CPUTotalUsage: ClientContainerCPUMetric,
docker.MemoryUsage: ClientContainerMemoryMetric,
}),
ServerContainerNodeID: report.MakeNodeWith(map[string]string{
docker.ContainerID: ServerContainerID,
@@ -277,8 +287,8 @@ var (
"container_image": report.MakeStringSet(ServerContainerImageNodeID),
"pod": report.MakeStringSet(ServerPodID),
}).WithMetrics(report.Metrics{
docker.CPUTotalUsage: CPUMetric,
docker.MemoryUsage: MemoryMetric,
docker.CPUTotalUsage: ServerContainerCPUMetric,
docker.MemoryUsage: ServerContainerMemoryMetric,
}),
},
},
@@ -342,11 +352,11 @@ var (
}).WithID(ClientHostNodeID).WithTopology(report.Host).WithSets(report.Sets{
host.LocalNetworks: report.MakeStringSet("10.10.10.0/24"),
}).WithMetrics(report.Metrics{
host.CPUUsage: CPUMetric,
host.MemUsage: MemoryMetric,
host.Load1: LoadMetric,
host.Load5: LoadMetric,
host.Load15: LoadMetric,
host.CPUUsage: ClientHostCPUMetric,
host.MemoryUsage: ClientHostMemoryMetric,
host.Load1: ClientHostLoad1Metric,
host.Load5: ClientHostLoad5Metric,
host.Load15: ClientHostLoad15Metric,
}),
ServerHostNodeID: report.MakeNodeWith(map[string]string{
"host_name": ServerHostName,
@@ -355,11 +365,11 @@ var (
}).WithID(ServerHostNodeID).WithTopology(report.Host).WithSets(report.Sets{
host.LocalNetworks: report.MakeStringSet("10.10.10.0/24"),
}).WithMetrics(report.Metrics{
host.CPUUsage: CPUMetric,
host.MemUsage: MemoryMetric,
host.Load1: LoadMetric,
host.Load5: LoadMetric,
host.Load15: LoadMetric,
host.CPUUsage: ServerHostCPUMetric,
host.MemoryUsage: ServerHostMemoryMetric,
host.Load1: ServerHostLoad1Metric,
host.Load5: ServerHostLoad5Metric,
host.Load15: ServerHostLoad15Metric,
}),
},
},