From 56122dd0cc6e2fb6a5b1dd146c141cadeef7eb8a Mon Sep 17 00:00:00 2001 From: Paul Bellamy Date: Mon, 7 Dec 2015 13:27:08 +0000 Subject: [PATCH] Details panel backend redesign Megasquish: [app] remove unused edge endpoint [WIP] refactoring node details api endpoint [WIP] plumbing the children through the rendering process adding IDList.Remove and StringSet.Remove [WIP] working on adding parents to detailed node renderings WIP UI components with mock backend data for new details grouping children by type UI components for node details health and info metric formatters for details panel Column headers and links for details table [WIP] started on rendering node metadata and metrics in the detail view DetailedNode.LabelMajor -> DetailedNode.Label rendering decent labels for parents of detailed nodes render metrics onto the top-level detailed node removing dead code Links to relatives metrics have a Format not Unit Show more/less actions for tables and relatives adjusted metric formatter TopologyTagger should tag k8s topology nodes make renderablenode ids more consistent, e.g. container:abcd1234 working on rendering correct summaries for each node adding report.Node.Rank, so that merging is independent of order rendering children and parents correctly output child renderableNode ids, so we can link to them add group field to metrics, so they can be grouped Refactored details health items to prepare for grouping add metrics to processNodeSummaries hide summary section if there is no data for it fixing up tests moving detailed node rendering into a separate package Node ID/Topology are fields not metadata - This way I think we don't have to care about Metadata being non-commutative. - ID and topology are still non-commutative, as I'm not sure how to sanely merge them, but it's possible we don't care. host memory usage is a filesize, not a percent working on fixing some tests adding children to hosts detail panel - Had to redo how parents are calculated, so that children wouldn't interfere with it - have to have the host at the end because it is non-commutative only render links for linkable children (i.e. not unconnected processes) resolving TODOs fixing up lint errors make nil a valid value for render.Children so tests are cleaner working on backend tests make client handle missing metrics property Stop rendering container image nodes with process summaries/parents fix parent link to container images Calculate parents as a set on report.Node (except k8s) refactoring detailed.NodeSummary stuff removing RenderableNode.Summary*, we already track it on report.Node working on tests add Columns field to NodeSummaryGroup fixing up render/topologies_test fix children links to container images get children of hosts rendering right working on host renderer tests Change container report.Node.ID to a1b2c3; The id should be globally unique, so we don't need the host id. This lets the kubernetes probe return a container node with the pod id, which will get merged into the real containers with other reports. The catch is that the kubernetes api doesn't tell us which hostname the container is running on, so we can't populate the old-style node ids. change terminology of system pods and services Fix kubernetes services with no selector Fixes handling of kubernetes service, which has no pods fix parent links for pods/services refactor detailed metadata to include sets and latest data fixing up host rendering tests fleshing out tests for node metadata and metrics don't render container pseudo-nodes as processes Update test for id format change. --- app/api_topology.go | 5 +- app/api_topology_test.go | 9 +- integration/410_container_control_test.sh | 2 +- probe/docker/container.go | 6 +- probe/docker/container_test.go | 2 + probe/docker/controls.go | 2 +- probe/docker/controls_test.go | 4 +- probe/docker/reporter.go | 8 +- probe/docker/reporter_test.go | 8 +- probe/docker/tagger.go | 7 +- probe/docker/tagger_test.go | 9 +- probe/host/tagger.go | 15 +- probe/host/tagger_test.go | 2 + probe/kubernetes/pod.go | 9 + probe/kubernetes/reporter.go | 21 +- probe/kubernetes/reporter_test.go | 28 +- probe/overlay/weave.go | 2 +- probe/overlay/weave_test.go | 2 +- probe/probe_internal_test.go | 4 +- probe/topology_tagger.go | 12 +- prog/probe.go | 2 +- render/detailed/metadata.go | 135 +++++ render/detailed/metadata_test.go | 53 ++ render/detailed/metrics.go | 176 +++++++ render/detailed/metrics_test.go | 121 +++++ render/detailed/node.go | 208 ++++++++ render/detailed/node_test.go | 185 +++++++ render/detailed/summary.go | 140 ++++++ render/detailed_node.go | 572 ---------------------- render/detailed_node_test.go | 249 ---------- render/expected/expected.go | 281 +++++------ render/filters.go | 11 + render/filters_test.go | 20 +- render/id.go | 36 +- render/mapping.go | 131 +++-- render/mapping_internal_test.go | 2 +- render/renderable_node.go | 39 +- render/renderable_node_test.go | 8 +- render/selectors.go | 7 +- render/short_lived_connections_test.go | 18 +- render/topologies.go | 50 +- render/topologies_test.go | 8 +- report/id.go | 24 +- report/id_list.go | 5 + report/merge_test.go | 25 +- report/metrics.go | 8 +- report/node_set.go | 69 +++ report/node_set_test.go | 152 ++++++ report/topology.go | 60 ++- report/topology_test.go | 21 + test/fixture/report_fixture.go | 105 +++- 51 files changed, 1894 insertions(+), 1184 deletions(-) create mode 100644 render/detailed/metadata.go create mode 100644 render/detailed/metadata_test.go create mode 100644 render/detailed/metrics.go create mode 100644 render/detailed/metrics_test.go create mode 100644 render/detailed/node.go create mode 100644 render/detailed/node_test.go create mode 100644 render/detailed/summary.go delete mode 100644 render/detailed_node.go delete mode 100644 render/detailed_node_test.go create mode 100644 report/node_set.go create mode 100644 report/node_set_test.go diff --git a/app/api_topology.go b/app/api_topology.go index 1e682294d..f0769976f 100644 --- a/app/api_topology.go +++ b/app/api_topology.go @@ -7,6 +7,7 @@ import ( "github.com/gorilla/websocket" "github.com/weaveworks/scope/render" + "github.com/weaveworks/scope/render/detailed" ) const ( @@ -21,7 +22,7 @@ type APITopology struct { // APINode is returned by the /api/topology/{name}/{id} handler. type APINode struct { - Node render.DetailedNode `json:"node"` + Node detailed.Node `json:"node"` } // Full topology. @@ -59,7 +60,7 @@ func handleNode(nodeID string) func(Reporter, render.Renderer, http.ResponseWrit http.NotFound(w, r) return } - respondWith(w, http.StatusOK, APINode{Node: render.MakeDetailedNode(rpt, node)}) + respondWith(w, http.StatusOK, APINode{Node: detailed.MakeNode(rpt, node)}) } } diff --git a/app/api_topology_test.go b/app/api_topology_test.go index 600d01f07..0282710fa 100644 --- a/app/api_topology_test.go +++ b/app/api_topology_test.go @@ -82,8 +82,7 @@ func TestAPITopologyApplications(t *testing.T) { t.Fatal(err) } equals(t, expected.ServerProcessID, node.Node.ID) - equals(t, "apache", node.Node.LabelMajor) - equals(t, fmt.Sprintf("%s (server:%s)", fixture.ServerHostID, fixture.ServerPID), node.Node.LabelMinor) + equals(t, "apache", node.Node.Label) equals(t, false, node.Node.Pseudo) // Let's not unit-test the specific content of the detail tables } @@ -96,8 +95,7 @@ func TestAPITopologyApplications(t *testing.T) { t.Fatal(err) } equals(t, fixture.Client1Name, node.Node.ID) - equals(t, fixture.Client1Name, node.Node.LabelMajor) - equals(t, "2 processes", node.Node.LabelMinor) + equals(t, fixture.Client1Name, node.Node.Label) equals(t, false, node.Node.Pseudo) // Let's not unit-test the specific content of the detail tables } @@ -125,8 +123,7 @@ func TestAPITopologyHosts(t *testing.T) { t.Fatal(err) } equals(t, expected.ServerHostRenderedID, node.Node.ID) - equals(t, "server", node.Node.LabelMajor) - equals(t, "hostname.com", node.Node.LabelMinor) + equals(t, "server", node.Node.Label) equals(t, false, node.Node.Pseudo) // Let's not unit-test the specific content of the detail tables } diff --git a/integration/410_container_control_test.sh b/integration/410_container_control_test.sh index b96e434b9..fa28a33cd 100755 --- a/integration/410_container_control_test.sh +++ b/integration/410_container_control_test.sh @@ -14,7 +14,7 @@ wait_for_containers $HOST1 60 alpine assert "docker_on $HOST1 inspect --format='{{.State.Running}}' alpine" "true" PROBEID=$(docker_on $HOST1 logs weavescope 2>&1 | grep "probe starting" | sed -n 's/^.*ID \([0-9a-f]*\)$/\1/p') HOSTID=$(echo $HOST1 | cut -d"." -f1) -assert_raises "curl -f -X POST 'http://$HOST1:4040/api/control/$PROBEID/$HOSTID;$CID/docker_stop_container'" +assert_raises "curl -f -X POST 'http://$HOST1:4040/api/control/$PROBEID/$CID;/docker_stop_container'" sleep 5 assert "docker_on $HOST1 inspect --format='{{.State.Running}}' alpine" "false" diff --git a/probe/docker/container.go b/probe/docker/container.go index f848c2f46..16e8ea3fa 100644 --- a/probe/docker/container.go +++ b/probe/docker/container.go @@ -331,7 +331,11 @@ func (c *container) GetNode(hostID string, localAddrs []net.IP) report.Node { ContainerIPsWithScopes: report.MakeStringSet(ipsWithScopes...), }).WithLatest( ContainerState, mtime.Now(), state, - ).WithMetrics(c.metrics()) + ).WithMetrics( + c.metrics(), + ).WithParents(report.Sets{ + "container_image": report.MakeStringSet(report.MakeContainerImageNodeID(hostID, c.container.Image)), + }) if c.container.State.Paused { result = result.WithControls(UnpauseContainer) diff --git a/probe/docker/container_test.go b/probe/docker/container_test.go index c83dbe8a6..609fbb05e 100644 --- a/probe/docker/container_test.go +++ b/probe/docker/container_test.go @@ -92,6 +92,8 @@ func TestContainer(t *testing.T) { ).WithMetrics(report.Metrics{ "cpu_total_usage": report.MakeMetric(), "memory_usage": report.MakeMetric().Add(now, 12345), + }).WithParents(report.Sets{ + "container_image": report.MakeStringSet(report.MakeContainerImageNodeID("scope", "baz")), }) test.Poll(t, 100*time.Millisecond, want, func() interface{} { node := c.GetNode("scope", []net.IP{}) diff --git a/probe/docker/controls.go b/probe/docker/controls.go index 2057069e4..b0871e77f 100644 --- a/probe/docker/controls.go +++ b/probe/docker/controls.go @@ -142,7 +142,7 @@ func (r *registry) execContainer(containerID string, req xfer.Request) xfer.Resp func captureContainerID(f func(string, xfer.Request) xfer.Response) func(xfer.Request) xfer.Response { return func(req xfer.Request) xfer.Response { - _, containerID, ok := report.ParseContainerNodeID(req.NodeID) + containerID, ok := report.ParseContainerNodeID(req.NodeID) if !ok { return xfer.ResponseErrorf("Invalid ID: %s", req.NodeID) } diff --git a/probe/docker/controls_test.go b/probe/docker/controls_test.go index 97ab285f6..f6b355704 100644 --- a/probe/docker/controls_test.go +++ b/probe/docker/controls_test.go @@ -30,7 +30,7 @@ func TestControls(t *testing.T) { } { result := controls.HandleControlRequest(xfer.Request{ Control: tc.command, - NodeID: report.MakeContainerNodeID("", "a1b2c3d4e5"), + NodeID: report.MakeContainerNodeID("a1b2c3d4e5"), }) if !reflect.DeepEqual(result, xfer.Response{ Error: tc.result, @@ -72,7 +72,7 @@ func TestPipes(t *testing.T) { } { result := controls.HandleControlRequest(xfer.Request{ Control: tc, - NodeID: report.MakeContainerNodeID("", "ping"), + NodeID: report.MakeContainerNodeID("ping"), }) want := xfer.Response{ Pipe: "pipeid", diff --git a/probe/docker/reporter.go b/probe/docker/reporter.go index 0de846be9..bef4d1160 100644 --- a/probe/docker/reporter.go +++ b/probe/docker/reporter.go @@ -48,7 +48,7 @@ func (r *Reporter) ContainerUpdated(c Container) { // Publish a 'short cut' report container just this container rpt := report.MakeReport() rpt.Shortcut = true - rpt.Container.AddNode(report.MakeContainerNodeID(r.hostID, c.ID()), c.GetNode(r.hostID, localAddrs)) + rpt.Container.AddNode(report.MakeContainerNodeID(c.ID()), c.GetNode(r.hostID, localAddrs)) r.probe.Publish(rpt) } @@ -104,7 +104,7 @@ func (r *Reporter) containerTopology(localAddrs []net.IP) report.Topology { }) r.registry.WalkContainers(func(c Container) { - nodeID := report.MakeContainerNodeID(r.hostID, c.ID()) + nodeID := report.MakeContainerNodeID(c.ID()) result.AddNode(nodeID, c.GetNode(r.hostID, localAddrs)) }) @@ -117,6 +117,8 @@ func (r *Reporter) containerImageTopology() report.Topology { r.registry.WalkImages(func(image *docker_client.APIImages) { nmd := report.MakeNodeWith(map[string]string{ ImageID: image.ID, + }).WithParents(report.Sets{ + "host": report.MakeStringSet(report.MakeHostNodeID(r.hostID)), }) AddLabels(nmd, image.Labels) @@ -124,7 +126,7 @@ func (r *Reporter) containerImageTopology() report.Topology { nmd.Metadata[ImageName] = image.RepoTags[0] } - nodeID := report.MakeContainerNodeID(r.hostID, image.ID) + nodeID := report.MakeContainerImageNodeID(r.hostID, image.ID) result.AddNode(nodeID, nmd) }) diff --git a/probe/docker/reporter_test.go b/probe/docker/reporter_test.go index 6af889887..c19eee696 100644 --- a/probe/docker/reporter_test.go +++ b/probe/docker/reporter_test.go @@ -55,7 +55,7 @@ func TestReporter(t *testing.T) { want := report.MakeReport() want.Container = report.Topology{ Nodes: report.Nodes{ - report.MakeContainerNodeID("", "ping"): report.MakeNodeWith(map[string]string{ + report.MakeContainerNodeID("ping"): report.MakeNodeWith(map[string]string{ docker.ContainerID: "ping", docker.ContainerName: "pong", docker.ImageID: "baz", @@ -101,15 +101,17 @@ func TestReporter(t *testing.T) { } want.ContainerImage = report.Topology{ Nodes: report.Nodes{ - report.MakeContainerNodeID("", "baz"): report.MakeNodeWith(map[string]string{ + report.MakeContainerImageNodeID("host1", "baz"): report.MakeNodeWith(map[string]string{ docker.ImageID: "baz", docker.ImageName: "bang", + }).WithParents(report.Sets{ + "host": report.MakeStringSet(report.MakeHostNodeID("host1")), }), }, Controls: report.Controls{}, } - reporter := docker.NewReporter(mockRegistryInstance, "", nil) + reporter := docker.NewReporter(mockRegistryInstance, "host1", nil) have, _ := reporter.Report() if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) diff --git a/probe/docker/tagger.go b/probe/docker/tagger.go index 4582d70c1..1c7d3564e 100644 --- a/probe/docker/tagger.go +++ b/probe/docker/tagger.go @@ -23,13 +23,15 @@ var ( // nodes that have a PID. type Tagger struct { registry Registry + hostID string procWalker process.Walker } // NewTagger returns a usable Tagger. -func NewTagger(registry Registry, procWalker process.Walker) *Tagger { +func NewTagger(registry Registry, hostID string, procWalker process.Walker) *Tagger { return &Tagger{ registry: registry, + hostID: hostID, procWalker: procWalker, } } @@ -84,6 +86,9 @@ func (t *Tagger) tag(tree process.Tree, topology *report.Topology) { topology.AddNode(nodeID, report.MakeNodeWith(map[string]string{ ContainerID: c.ID(), + }).WithParents(report.Sets{ + "container": report.MakeStringSet(report.MakeContainerNodeID(c.ID())), + "container_image": report.MakeStringSet(report.MakeContainerImageNodeID(t.hostID, c.Image())), })) } } diff --git a/probe/docker/tagger_test.go b/probe/docker/tagger_test.go index 1415d4e1b..f58e7df05 100644 --- a/probe/docker/tagger_test.go +++ b/probe/docker/tagger_test.go @@ -38,7 +38,12 @@ func TestTagger(t *testing.T) { var ( pid1NodeID = report.MakeProcessNodeID("somehost.com", "2") pid2NodeID = report.MakeProcessNodeID("somehost.com", "3") - wantNode = report.MakeNodeWith(map[string]string{docker.ContainerID: "ping"}) + wantNode = report.MakeNodeWith(map[string]string{ + docker.ContainerID: "ping", + }).WithParents(report.Sets{ + "container": report.MakeStringSet(report.MakeContainerNodeID("ping")), + "container_image": report.MakeStringSet(report.MakeContainerImageNodeID("somehost.com", "baz")), + }) ) input := report.MakeReport() @@ -49,7 +54,7 @@ func TestTagger(t *testing.T) { want.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{process.PID: "2"}).Merge(wantNode)) want.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{process.PID: "3"}).Merge(wantNode)) - tagger := docker.NewTagger(mockRegistryInstance, nil) + tagger := docker.NewTagger(mockRegistryInstance, "somehost.com", nil) have, err := tagger.Tag(input) if err != nil { t.Errorf("%v", err) diff --git a/probe/host/tagger.go b/probe/host/tagger.go index 6b5237896..af5feb9ec 100644 --- a/probe/host/tagger.go +++ b/probe/host/tagger.go @@ -26,16 +26,21 @@ func (Tagger) Name() string { return "Host" } // Tag implements Tagger. func (t Tagger) Tag(r report.Report) (report.Report, error) { - metadata := map[string]string{ - report.HostNodeID: t.hostNodeID, - report.ProbeID: t.probeID, - } + var ( + metadata = map[string]string{ + report.HostNodeID: t.hostNodeID, + report.ProbeID: t.probeID, + } + parents = report.Sets{ + "host": report.MakeStringSet(t.hostNodeID), + } + ) // Explicity don't tag Endpoints and Addresses - These topologies include pseudo nodes, // and as such do their own host tagging for _, topology := range []report.Topology{r.Process, r.Container, r.ContainerImage, r.Host, r.Overlay} { for id, node := range topology.Nodes { - topology.AddNode(id, node.WithMetadata(metadata)) + topology.AddNode(id, node.WithMetadata(metadata).WithParents(parents)) } } return r, nil diff --git a/probe/host/tagger_test.go b/probe/host/tagger_test.go index 0a736f4ce..453486105 100644 --- a/probe/host/tagger_test.go +++ b/probe/host/tagger_test.go @@ -22,6 +22,8 @@ func TestTagger(t *testing.T) { want := nodeMetadata.Merge(report.MakeNodeWith(map[string]string{ report.HostNodeID: report.MakeHostNodeID(hostID), report.ProbeID: probeID, + }).WithParents(report.Sets{ + "host": report.MakeStringSet(report.MakeHostNodeID(hostID)), })) rpt, _ := host.NewTagger(hostID, probeID).Tag(r) have := rpt.Process.Nodes[endpointNodeID].Copy() diff --git a/probe/kubernetes/pod.go b/probe/kubernetes/pod.go index 6cf86a205..0f1dd5f9c 100644 --- a/probe/kubernetes/pod.go +++ b/probe/kubernetes/pod.go @@ -84,5 +84,14 @@ func (p *pod) GetNode() report.Node { if len(p.serviceIDs) > 0 { n.Metadata[ServiceIDs] = strings.Join(p.serviceIDs, " ") } + for _, serviceID := range p.serviceIDs { + segments := strings.SplitN(serviceID, "/", 2) + if len(segments) != 2 { + continue + } + n = n.WithParents(report.Sets{ + "service": report.MakeStringSet(report.MakeServiceNodeID(p.Namespace(), segments[1])), + }) + } return n } diff --git a/probe/kubernetes/reporter.go b/probe/kubernetes/reporter.go index 816c0f005..5673bf5ff 100644 --- a/probe/kubernetes/reporter.go +++ b/probe/kubernetes/reporter.go @@ -26,12 +26,13 @@ func (r *Reporter) Report() (report.Report, error) { if err != nil { return result, err } - podTopology, err := r.podTopology(services) + podTopology, containerTopology, err := r.podTopology(services) if err != nil { return result, err } result.Service = result.Service.Merge(serviceTopology) result.Pod = result.Pod.Merge(podTopology) + result.Container = result.Container.Merge(containerTopology) return result, nil } @@ -49,8 +50,8 @@ func (r *Reporter) serviceTopology() (report.Topology, []Service, error) { return result, services, err } -func (r *Reporter) podTopology(services []Service) (report.Topology, error) { - result := report.MakeTopology() +func (r *Reporter) podTopology(services []Service) (report.Topology, report.Topology, error) { + pods, containers := report.MakeTopology(), report.MakeTopology() err := r.client.WalkPods(func(p Pod) error { for _, service := range services { if service.Selector().Matches(p.Labels()) { @@ -58,8 +59,18 @@ func (r *Reporter) podTopology(services []Service) (report.Topology, error) { } } nodeID := report.MakePodNodeID(p.Namespace(), p.Name()) - result = result.AddNode(nodeID, p.GetNode()) + pods = pods.AddNode(nodeID, p.GetNode()) + + container := report.MakeNodeWith(map[string]string{ + PodID: p.ID(), + Namespace: p.Namespace(), + }).WithParents(report.Sets{ + "pod": report.MakeStringSet(nodeID), + }) + for _, containerID := range p.ContainerIDs() { + containers.AddNode(report.MakeContainerNodeID(containerID), container) + } return nil }) - return result, err + return pods, containers, err } diff --git a/probe/kubernetes/reporter_test.go b/probe/kubernetes/reporter_test.go index 36b61f59f..49a5a2507 100644 --- a/probe/kubernetes/reporter_test.go +++ b/probe/kubernetes/reporter_test.go @@ -111,6 +111,7 @@ func TestReporter(t *testing.T) { want := report.MakeReport() pod1ID := report.MakePodNodeID("ping", "pong-a") pod2ID := report.MakePodNodeID("ping", "pong-b") + serviceID := report.MakeServiceNodeID("ping", "pongservice") want.Pod = report.MakeTopology().AddNode(pod1ID, report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-a", kubernetes.PodName: "pong-a", @@ -118,6 +119,8 @@ func TestReporter(t *testing.T) { kubernetes.PodCreated: pod1.Created(), kubernetes.PodContainerIDs: "container1 container2", kubernetes.ServiceIDs: "ping/pongservice", + }).WithParents(report.Sets{ + "service": report.MakeStringSet(serviceID), })).AddNode(pod2ID, report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-b", kubernetes.PodName: "pong-b", @@ -125,13 +128,36 @@ func TestReporter(t *testing.T) { kubernetes.PodCreated: pod1.Created(), kubernetes.PodContainerIDs: "container3 container4", kubernetes.ServiceIDs: "ping/pongservice", + }).WithParents(report.Sets{ + "service": report.MakeStringSet(serviceID), })) - want.Service = report.MakeTopology().AddNode(report.MakeServiceNodeID("ping", "pongservice"), report.MakeNodeWith(map[string]string{ + want.Service = report.MakeTopology().AddNode(serviceID, report.MakeNodeWith(map[string]string{ kubernetes.ServiceID: "ping/pongservice", kubernetes.ServiceName: "pongservice", kubernetes.Namespace: "ping", kubernetes.ServiceCreated: pod1.Created(), })) + want.Container = report.MakeTopology().AddNode(report.MakeContainerNodeID("container1"), report.MakeNodeWith(map[string]string{ + kubernetes.PodID: "ping/pong-a", + kubernetes.Namespace: "ping", + }).WithParents(report.Sets{ + "pod": report.MakeStringSet(pod1ID), + })).AddNode(report.MakeContainerNodeID("container2"), report.MakeNodeWith(map[string]string{ + kubernetes.PodID: "ping/pong-a", + kubernetes.Namespace: "ping", + }).WithParents(report.Sets{ + "pod": report.MakeStringSet(pod1ID), + })).AddNode(report.MakeContainerNodeID("container3"), report.MakeNodeWith(map[string]string{ + kubernetes.PodID: "ping/pong-b", + kubernetes.Namespace: "ping", + }).WithParents(report.Sets{ + "pod": report.MakeStringSet(pod2ID), + })).AddNode(report.MakeContainerNodeID("container4"), report.MakeNodeWith(map[string]string{ + kubernetes.PodID: "ping/pong-b", + kubernetes.Namespace: "ping", + }).WithParents(report.Sets{ + "pod": report.MakeStringSet(pod2ID), + })) reporter := kubernetes.NewReporter(mockClientInstance) have, _ := reporter.Report() diff --git a/probe/overlay/weave.go b/probe/overlay/weave.go index 67ccdb505..f72c83762 100644 --- a/probe/overlay/weave.go +++ b/probe/overlay/weave.go @@ -195,7 +195,7 @@ func (w *Weave) Tag(r report.Report) (report.Report, error) { if entry.Tombstone > 0 { continue } - nodeID := report.MakeContainerNodeID(w.hostID, entry.ContainerID) + nodeID := report.MakeContainerNodeID(entry.ContainerID) node, ok := r.Container.Nodes[nodeID] if !ok { continue diff --git a/probe/overlay/weave_test.go b/probe/overlay/weave_test.go index 9d551f793..9449a5dbe 100644 --- a/probe/overlay/weave_test.go +++ b/probe/overlay/weave_test.go @@ -46,7 +46,7 @@ func TestWeaveTaggerOverlayTopology(t *testing.T) { } { - nodeID := report.MakeContainerNodeID(mockHostID, mockContainerID) + nodeID := report.MakeContainerNodeID(mockContainerID) want := report.Report{ Container: report.MakeTopology().AddNode(nodeID, report.MakeNodeWith(map[string]string{ docker.ContainerID: mockContainerID, diff --git a/probe/probe_internal_test.go b/probe/probe_internal_test.go index 3370447e5..c50559d1e 100644 --- a/probe/probe_internal_test.go +++ b/probe/probe_internal_test.go @@ -33,8 +33,8 @@ func TestApply(t *testing.T) { from report.Topology via string }{ - {endpointNode.Merge(report.MakeNodeWith(map[string]string{"topology": "endpoint"})), r.Endpoint, endpointNodeID}, - {addressNode.Merge(report.MakeNodeWith(map[string]string{"topology": "address"})), r.Address, addressNodeID}, + {endpointNode.Merge(report.MakeNode().WithID("c").WithTopology("endpoint")), r.Endpoint, endpointNodeID}, + {addressNode.Merge(report.MakeNode().WithID("d").WithTopology("address")), r.Address, addressNodeID}, } { if want, have := tuple.want, tuple.from.Nodes[tuple.via]; !reflect.DeepEqual(want, have) { t.Errorf("want %+v, have %+v", want, have) diff --git a/probe/topology_tagger.go b/probe/topology_tagger.go index 1c8f975f9..d117a6f26 100644 --- a/probe/topology_tagger.go +++ b/probe/topology_tagger.go @@ -4,9 +4,6 @@ import ( "github.com/weaveworks/scope/report" ) -// Topology is the Node key for the origin topology. -const Topology = "topology" - type topologyTagger struct{} // NewTopologyTagger tags each node with the topology that it comes from. It's @@ -19,18 +16,19 @@ func (topologyTagger) Name() string { return "Topology" } // Tag implements Tagger func (topologyTagger) Tag(r report.Report) (report.Report, error) { - for val, topology := range map[string]*report.Topology{ + for name, t := range map[string]*report.Topology{ "endpoint": &(r.Endpoint), "address": &(r.Address), "process": &(r.Process), "container": &(r.Container), "container_image": &(r.ContainerImage), + "pod": &(r.Pod), + "service": &(r.Service), "host": &(r.Host), "overlay": &(r.Overlay), } { - metadata := map[string]string{Topology: val} - for id, node := range topology.Nodes { - topology.AddNode(id, node.WithMetadata(metadata)) + for id, node := range t.Nodes { + t.AddNode(id, node.WithID(id).WithTopology(name)) } } return r, nil diff --git a/prog/probe.go b/prog/probe.go index f8aa9ed1b..3742e80cf 100644 --- a/prog/probe.go +++ b/prog/probe.go @@ -131,7 +131,7 @@ func probeMain() { } if registry, err := docker.NewRegistry(*dockerInterval, clients); err == nil { defer registry.Stop() - p.AddTagger(docker.NewTagger(registry, processCache)) + p.AddTagger(docker.NewTagger(registry, hostID, processCache)) p.AddReporter(docker.NewReporter(registry, hostID, p)) } else { log.Printf("Docker: failed to start registry: %v", err) diff --git a/render/detailed/metadata.go b/render/detailed/metadata.go new file mode 100644 index 000000000..e0dc35940 --- /dev/null +++ b/render/detailed/metadata.go @@ -0,0 +1,135 @@ +package detailed + +import ( + "fmt" + "sort" + "strings" + + "github.com/weaveworks/scope/probe/docker" + "github.com/weaveworks/scope/probe/host" + "github.com/weaveworks/scope/probe/kubernetes" + "github.com/weaveworks/scope/probe/overlay" + "github.com/weaveworks/scope/probe/process" + "github.com/weaveworks/scope/report" +) + +var ( + processNodeMetadata = renderMetadata( + meta(process.PID, "PID"), + meta(process.PPID, "Parent PID"), + meta(process.Cmdline, "Command"), + meta(process.Threads, "# Threads"), + ) + containerNodeMetadata = renderMetadata( + meta(docker.ContainerID, "ID"), + meta(docker.ImageID, "Image ID"), + ltst(docker.ContainerState, "State"), + sets(docker.ContainerIPs, "IPs"), + sets(docker.ContainerPorts, "Ports"), + meta(docker.ContainerCreated, "Created"), + meta(docker.ContainerCommand, "Command"), + meta(overlay.WeaveMACAddress, "Weave MAC"), + meta(overlay.WeaveDNSHostname, "Weave DNS Hostname"), + getDockerLabelRows, + ) + containerImageNodeMetadata = renderMetadata( + meta(docker.ImageID, "Image ID"), + getDockerLabelRows, + ) + podNodeMetadata = renderMetadata( + meta(kubernetes.PodID, "ID"), + meta(kubernetes.Namespace, "Namespace"), + meta(kubernetes.PodCreated, "Created"), + ) + hostNodeMetadata = renderMetadata( + meta(host.HostName, "Hostname"), + meta(host.OS, "Operating system"), + meta(host.KernelVersion, "Kernel version"), + meta(host.Uptime, "Uptime"), + sets(host.LocalNetworks, "Local Networks"), + ) +) + +// MetadataRow is a row for the metadata table. +type MetadataRow struct { + ID string `json:"id"` + Label string `json:"label"` + Value string `json:"value"` +} + +// Copy returns a value copy of a metadata row. +func (m MetadataRow) Copy() MetadataRow { + return MetadataRow{ + ID: m.ID, + Label: m.Label, + Value: m.Value, + } +} + +// NodeMetadata produces a table (to be consumed directly by the UI) based on +// an origin ID, which is (optimistically) a node ID in one of our topologies. +func NodeMetadata(n report.Node) []MetadataRow { + renderers := map[string]func(report.Node) []MetadataRow{ + "process": processNodeMetadata, + "container": containerNodeMetadata, + "container_image": containerImageNodeMetadata, + "pod": podNodeMetadata, + "host": hostNodeMetadata, + } + if renderer, ok := renderers[n.Topology]; ok { + return renderer(n) + } + return nil +} + +func renderMetadata(templates ...func(report.Node) []MetadataRow) func(report.Node) []MetadataRow { + return func(nmd report.Node) []MetadataRow { + rows := []MetadataRow{} + for _, template := range templates { + rows = append(rows, template(nmd)...) + } + return rows + } +} + +func meta(id, label string) func(report.Node) []MetadataRow { + return func(n report.Node) []MetadataRow { + if val, ok := n.Metadata[id]; ok { + return []MetadataRow{{ID: id, Label: label, Value: val}} + } + return nil + } +} + +func sets(id, label string) func(report.Node) []MetadataRow { + return func(n report.Node) []MetadataRow { + if val, ok := n.Sets[id]; ok && len(val) > 0 { + return []MetadataRow{{ID: id, Label: label, Value: strings.Join(val, ", ")}} + } + return nil + } +} + +func ltst(id, label string) func(report.Node) []MetadataRow { + return func(n report.Node) []MetadataRow { + if val, ok := n.Latest.Lookup(id); ok { + return []MetadataRow{{ID: id, Label: label, Value: val}} + } + return nil + } +} + +func getDockerLabelRows(nmd report.Node) []MetadataRow { + rows := []MetadataRow{} + // Add labels in alphabetical order + labels := docker.ExtractLabels(nmd) + labelKeys := make([]string, 0, len(labels)) + for k := range labels { + labelKeys = append(labelKeys, k) + } + sort.Strings(labelKeys) + for _, labelKey := range labelKeys { + rows = append(rows, MetadataRow{ID: "label_" + labelKey, Label: fmt.Sprintf("Label %q", labelKey), Value: labels[labelKey]}) + } + return rows +} diff --git a/render/detailed/metadata_test.go b/render/detailed/metadata_test.go new file mode 100644 index 000000000..e4f86bebc --- /dev/null +++ b/render/detailed/metadata_test.go @@ -0,0 +1,53 @@ +package detailed_test + +import ( + "reflect" + "testing" + + "github.com/weaveworks/scope/probe/docker" + "github.com/weaveworks/scope/render/detailed" + "github.com/weaveworks/scope/report" + "github.com/weaveworks/scope/test" + "github.com/weaveworks/scope/test/fixture" +) + +func TestNodeMetadata(t *testing.T) { + inputs := []struct { + name string + node report.Node + want []detailed.MetadataRow + }{ + { + name: "container", + node: report.MakeNodeWith(map[string]string{ + docker.ContainerID: fixture.ClientContainerID, + docker.LabelPrefix + "label1": "label1value", + }).WithTopology("container").WithSets(report.Sets{ + docker.ContainerIPs: report.MakeStringSet("10.10.10.0/24", "10.10.10.1/24"), + }).WithLatest(docker.ContainerState, fixture.Now, docker.StateRunning), + want: []detailed.MetadataRow{ + {ID: docker.ContainerID, Label: "ID", Value: fixture.ClientContainerID}, + {ID: docker.ContainerState, Label: "State", Value: "running"}, + {ID: docker.ContainerIPs, Label: "IPs", Value: "10.10.10.0/24, 10.10.10.1/24"}, + { + ID: "label_label1", + Label: "Label \"label1\"", + Value: "label1value", + }, + }, + }, + { + name: "unknown topology", + node: report.MakeNodeWith(map[string]string{ + docker.ContainerID: fixture.ClientContainerID, + }).WithTopology("foobar").WithID(fixture.ClientContainerNodeID), + want: nil, + }, + } + for _, input := range inputs { + have := detailed.NodeMetadata(input.node) + if !reflect.DeepEqual(input.want, have) { + t.Errorf("%s: %s", input.name, test.Diff(input.want, have)) + } + } +} diff --git a/render/detailed/metrics.go b/render/detailed/metrics.go new file mode 100644 index 000000000..c38cef3f3 --- /dev/null +++ b/render/detailed/metrics.go @@ -0,0 +1,176 @@ +package detailed + +import ( + "encoding/json" + "math" + + "github.com/weaveworks/scope/probe/docker" + "github.com/weaveworks/scope/probe/host" + "github.com/weaveworks/scope/probe/process" + "github.com/weaveworks/scope/report" +) + +const ( + defaultFormat = "" + filesizeFormat = "filesize" + percentFormat = "percent" +) + +// MetricRow is a tuple of data used to render a metric as a sparkline and +// accoutrements. +type MetricRow struct { + ID string + Label string + Format string + Group string + Value float64 + Metric *report.Metric +} + +// Copy returns a value copy of the MetricRow +func (m MetricRow) Copy() MetricRow { + metric := m.Metric.Copy() + return MetricRow{ + ID: m.ID, + Label: m.Label, + Format: m.Format, + Group: m.Group, + Value: m.Value, + Metric: &metric, + } +} + +// MarshalJSON marshals this MetricRow to json. It takes the basic Metric +// rendering, then adds some row-specific fields. +func (m MetricRow) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + ID string `json:"id"` + Label string `json:"label"` + Format string `json:"format,omitempty"` + Group string `json:"group,omitempty"` + Value float64 `json:"value"` + report.WireMetrics + }{ + ID: m.ID, + Label: m.Label, + Format: m.Format, + Group: m.Group, + Value: m.Value, + WireMetrics: m.Metric.ToIntermediate(), + }) +} + +func metricRow(id, label string, metric report.Metric, format, group string) MetricRow { + var last float64 + if s := metric.LastSample(); s != nil { + last = s.Value + } + return MetricRow{ + ID: id, + Label: label, + Format: format, + Group: group, + Value: toFixed(last, 2), + Metric: &metric, + } +} + +// toFixed truncates decimals of float64 down to specified precision +func toFixed(num float64, precision int) float64 { + output := math.Pow(10, float64(precision)) + return float64(int64(num*output)) / output +} + +// NodeMetrics produces a table (to be consumed directly by the UI) based on +// an origin ID, which is (optimistically) a node ID in one of our topologies. +func NodeMetrics(n report.Node) []MetricRow { + renderers := map[string]func(report.Node) []MetricRow{ + "process": processNodeMetrics, + "container": containerNodeMetrics, + "host": hostNodeMetrics, + } + if renderer, ok := renderers[n.Topology]; ok { + return renderer(n) + } + return nil +} + +func processNodeMetrics(nmd report.Node) []MetricRow { + rows := []MetricRow{} + for _, tuple := range []struct { + ID, Label, fmt string + }{ + {process.CPUUsage, "CPU Usage", percentFormat}, + {process.MemoryUsage, "Memory Usage", filesizeFormat}, + } { + if val, ok := nmd.Metrics[tuple.ID]; ok { + rows = append(rows, metricRow( + tuple.ID, + tuple.Label, + val, + tuple.fmt, + "", + )) + } + } + return rows +} + +func containerNodeMetrics(nmd report.Node) []MetricRow { + rows := []MetricRow{} + if val, ok := nmd.Metrics[docker.CPUTotalUsage]; ok { + rows = append(rows, metricRow( + docker.CPUTotalUsage, + "CPU Usage", + val, + percentFormat, + "", + )) + } + if val, ok := nmd.Metrics[docker.MemoryUsage]; ok { + rows = append(rows, metricRow( + docker.MemoryUsage, + "Memory Usage", + val, + filesizeFormat, + "", + )) + } + return rows +} + +func hostNodeMetrics(nmd report.Node) []MetricRow { + // Ensure that all metrics have the same max + maxLoad := 0.0 + for _, id := range []string{host.Load1, host.Load5, host.Load15} { + if metric, ok := nmd.Metrics[id]; ok { + if metric.Len() == 0 { + continue + } + if metric.Max > maxLoad { + maxLoad = metric.Max + } + } + } + + rows := []MetricRow{} + for _, tuple := range []struct{ ID, Label, fmt string }{ + {host.CPUUsage, "CPU Usage", percentFormat}, + {host.MemUsage, "Memory Usage", filesizeFormat}, + } { + if val, ok := nmd.Metrics[tuple.ID]; ok { + rows = append(rows, metricRow(tuple.ID, tuple.Label, val, tuple.fmt, "")) + } + } + for _, tuple := range []struct{ ID, Label string }{ + {host.Load1, "Load (1m)"}, + {host.Load5, "Load (5m)"}, + {host.Load15, "Load (15m)"}, + } { + if val, ok := nmd.Metrics[tuple.ID]; ok { + val.Max = maxLoad + rows = append(rows, metricRow(tuple.ID, tuple.Label, val, defaultFormat, "load")) + } + } + return rows +} diff --git a/render/detailed/metrics_test.go b/render/detailed/metrics_test.go new file mode 100644 index 000000000..f61c8c5f4 --- /dev/null +++ b/render/detailed/metrics_test.go @@ -0,0 +1,121 @@ +package detailed_test + +import ( + "reflect" + "testing" + + "github.com/weaveworks/scope/probe/docker" + "github.com/weaveworks/scope/probe/host" + "github.com/weaveworks/scope/probe/process" + "github.com/weaveworks/scope/render/detailed" + "github.com/weaveworks/scope/report" + "github.com/weaveworks/scope/test" + "github.com/weaveworks/scope/test/fixture" +) + +func TestNodeMetrics(t *testing.T) { + inputs := []struct { + name string + node report.Node + want []detailed.MetricRow + }{ + { + name: "process", + node: fixture.Report.Process.Nodes[fixture.ClientProcess1NodeID], + want: []detailed.MetricRow{ + { + ID: process.CPUUsage, + Label: "CPU Usage", + Format: "percent", + Group: "", + Value: 0.01, + Metric: &fixture.CPUMetric, + }, + { + ID: process.MemoryUsage, + Label: "Memory Usage", + Format: "filesize", + Group: "", + Value: 0.01, + Metric: &fixture.MemoryMetric, + }, + }, + }, + { + name: "container", + node: fixture.Report.Container.Nodes[fixture.ClientContainerNodeID], + want: []detailed.MetricRow{ + { + ID: docker.CPUTotalUsage, + Label: "CPU Usage", + Format: "percent", + Group: "", + Value: 0.01, + Metric: &fixture.CPUMetric, + }, + { + ID: docker.MemoryUsage, + Label: "Memory Usage", + Format: "filesize", + Group: "", + Value: 0.01, + Metric: &fixture.MemoryMetric, + }, + }, + }, + { + name: "host", + node: fixture.Report.Host.Nodes[fixture.ClientHostNodeID], + want: []detailed.MetricRow{ + { + ID: host.CPUUsage, + Label: "CPU Usage", + Format: "percent", + Group: "", + Value: 0.01, + Metric: &fixture.CPUMetric, + }, + { + ID: host.MemUsage, + Label: "Memory Usage", + Format: "filesize", + Group: "", + Value: 0.01, + Metric: &fixture.MemoryMetric, + }, + { + ID: host.Load1, + Label: "Load (1m)", + Group: "load", + Value: 0.01, + Metric: &fixture.LoadMetric, + }, + { + ID: host.Load5, + Label: "Load (5m)", + Group: "load", + Value: 0.01, + Metric: &fixture.LoadMetric, + }, + { + ID: host.Load15, + Label: "Load (15m)", + Group: "load", + Value: 0.01, + Metric: &fixture.LoadMetric, + }, + }, + }, + { + name: "unknown topology", + node: report.MakeNode().WithTopology("foobar").WithID(fixture.ClientContainerNodeID), + want: nil, + }, + } + for _, input := range inputs { + have := detailed.NodeMetrics(input.node) + if !reflect.DeepEqual(input.want, have) { + t.Errorf("%s: %s", input.name, test.Diff(input.want, have)) + } + } +} diff --git a/render/detailed/node.go b/render/detailed/node.go new file mode 100644 index 000000000..29b5e7c8b --- /dev/null +++ b/render/detailed/node.go @@ -0,0 +1,208 @@ +package detailed + +import ( + "sort" + + "github.com/weaveworks/scope/probe/docker" + "github.com/weaveworks/scope/probe/host" + "github.com/weaveworks/scope/probe/kubernetes" + "github.com/weaveworks/scope/probe/process" + "github.com/weaveworks/scope/render" + "github.com/weaveworks/scope/report" +) + +// Node is the data type that's yielded to the JavaScript layer when +// we want deep information about an individual node. +type Node struct { + ID string `json:"id"` + Label string `json:"label"` + Rank string `json:"rank,omitempty"` + Pseudo bool `json:"pseudo,omitempty"` + Controls []ControlInstance `json:"controls"` + Metadata []MetadataRow `json:"metadata,omitempty"` + Metrics []MetricRow `json:"metrics,omitempty"` + Children []NodeSummaryGroup `json:"children,omitempty"` + Parents []Parent `json:"parents,omitempty"` +} + +// Parent is the information needed to build a link to the parent of a Node. +type Parent struct { + ID string `json:"id"` + Label string `json:"label"` + TopologyID string `json:"topologyId"` +} + +// ControlInstance contains a control description, and all the info +// needed to execute it. +type ControlInstance struct { + ProbeID string `json:"probeId"` + NodeID string `json:"nodeId"` + report.Control +} + +// MakeNode transforms a renderable node to a detailed node. It uses +// aggregate metadata, plus the set of origin node IDs, to produce tables. +func MakeNode(r report.Report, n render.RenderableNode) Node { + return Node{ + ID: n.ID, + Label: n.LabelMajor, + Rank: n.Rank, + Pseudo: n.Pseudo, + Controls: controls(r, n), + Metadata: NodeMetadata(n.Node), + Metrics: NodeMetrics(n.Node), + Children: children(n), + Parents: parents(r, n), + } +} + +func controlsFor(topology report.Topology, nodeID string) []ControlInstance { + result := []ControlInstance{} + node, ok := topology.Nodes[nodeID] + if !ok { + return result + } + + for _, id := range node.Controls.Controls { + if control, ok := topology.Controls[id]; ok { + result = append(result, ControlInstance{ + ProbeID: node.Metadata[report.ProbeID], + NodeID: nodeID, + Control: control, + }) + } + } + return result +} + +func controls(r report.Report, n render.RenderableNode) []ControlInstance { + if _, ok := r.Process.Nodes[n.ControlNode]; ok { + return controlsFor(r.Process, n.ControlNode) + } else if _, ok := r.Container.Nodes[n.ControlNode]; ok { + return controlsFor(r.Container, n.ControlNode) + } else if _, ok := r.ContainerImage.Nodes[n.ControlNode]; ok { + return controlsFor(r.ContainerImage, n.ControlNode) + } else if _, ok := r.Host.Nodes[n.ControlNode]; ok { + return controlsFor(r.Host, n.ControlNode) + } + return []ControlInstance{} +} + +var ( + nodeSummaryGroupSpecs = []struct { + topologyID string + NodeSummaryGroup + }{ + {"host", NodeSummaryGroup{TopologyID: "hosts", Label: "Hosts", Columns: []string{host.CPUUsage, host.MemUsage}}}, + {"pod", NodeSummaryGroup{TopologyID: "pods", Label: "Pods", Columns: []string{}}}, + {"container_image", NodeSummaryGroup{TopologyID: "containers-by-image", Label: "Container Images", Columns: []string{}}}, + {"container", NodeSummaryGroup{TopologyID: "containers", Label: "Containers", Columns: []string{docker.CPUTotalUsage, docker.MemoryUsage}}}, + {"process", NodeSummaryGroup{TopologyID: "applications", Label: "Applications", Columns: []string{process.PID, process.CPUUsage, process.MemoryUsage}}}, + } +) + +func children(n render.RenderableNode) []NodeSummaryGroup { + summaries := map[string][]NodeSummary{} + for _, child := range n.Children { + if child.ID == n.ID { + continue + } + + if summary, ok := MakeNodeSummary(child); ok { + summaries[child.Topology] = append(summaries[child.Topology], summary) + } + } + + nodeSummaryGroups := []NodeSummaryGroup{} + for _, spec := range nodeSummaryGroupSpecs { + if len(summaries[spec.topologyID]) > 0 { + sort.Sort(nodeSummariesByID(summaries[spec.TopologyID])) + group := spec.NodeSummaryGroup.Copy() + group.Nodes = summaries[spec.topologyID] + nodeSummaryGroups = append(nodeSummaryGroups, group) + } + } + return nodeSummaryGroups +} + +// parents is a total a hack to find the parents of a node (which is +// ill-defined). +func parents(r report.Report, n render.RenderableNode) (result []Parent) { + defer func() { + for i, parent := range result { + if parent.ID == n.ID { + result = append(result[:i], result[i+1:]...) + } + } + }() + + topologies := map[string]struct { + report.Topology + render func(report.Node) Parent + }{ + "container": {r.Container, containerParent}, + "pod": {r.Pod, podParent}, + "service": {r.Service, serviceParent}, + "container_image": {r.ContainerImage, containerImageParent}, + "host": {r.Host, hostParent}, + } + topologyIDs := []string{} + for topologyID := range topologies { + topologyIDs = append(topologyIDs, topologyID) + } + sort.Strings(topologyIDs) + for _, topologyID := range topologyIDs { + t := topologies[topologyID] + for _, id := range n.Node.Parents[topologyID] { + parent, ok := t.Nodes[id] + if !ok { + continue + } + + result = append(result, t.render(parent)) + } + } + return result +} + +func containerParent(n report.Node) Parent { + label, _ := render.GetRenderableContainerName(n) + return Parent{ + ID: render.MakeContainerID(n.Metadata[docker.ContainerID]), + Label: label, + TopologyID: "containers", + } +} + +func podParent(n report.Node) Parent { + return Parent{ + ID: render.MakePodID(n.Metadata[kubernetes.PodID]), + Label: n.Metadata[kubernetes.PodName], + TopologyID: "pods", + } +} + +func serviceParent(n report.Node) Parent { + return Parent{ + ID: render.MakeServiceID(n.Metadata[kubernetes.ServiceID]), + Label: n.Metadata[kubernetes.ServiceName], + TopologyID: "pods-by-service", + } +} + +func containerImageParent(n report.Node) Parent { + imageName := n.Metadata[docker.ImageName] + return Parent{ + ID: render.MakeContainerImageID(render.ImageNameWithoutVersion(imageName)), + Label: imageName, + TopologyID: "containers-by-image", + } +} + +func hostParent(n report.Node) Parent { + return Parent{ + ID: render.MakeHostID(n.Metadata[host.HostName]), + Label: n.Metadata[host.HostName], + TopologyID: "hosts", + } +} diff --git a/render/detailed/node_test.go b/render/detailed/node_test.go new file mode 100644 index 000000000..04051fdc9 --- /dev/null +++ b/render/detailed/node_test.go @@ -0,0 +1,185 @@ +package detailed_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/weaveworks/scope/probe/docker" + "github.com/weaveworks/scope/probe/host" + "github.com/weaveworks/scope/probe/process" + "github.com/weaveworks/scope/render" + "github.com/weaveworks/scope/render/detailed" + "github.com/weaveworks/scope/test" + "github.com/weaveworks/scope/test/fixture" +) + +func TestMakeDetailedHostNode(t *testing.T) { + renderableNode := render.HostRenderer.Render(fixture.Report)[render.MakeHostID(fixture.ClientHostID)] + have := detailed.MakeNode(fixture.Report, renderableNode) + + containerImageNodeSummary, _ := detailed.MakeNodeSummary(fixture.Report.ContainerImage.Nodes[fixture.ClientContainerImageNodeID]) + containerNodeSummary, _ := detailed.MakeNodeSummary(fixture.Report.Container.Nodes[fixture.ClientContainerNodeID]) + process1NodeSummary, _ := detailed.MakeNodeSummary(fixture.Report.Process.Nodes[fixture.ClientProcess1NodeID]) + process1NodeSummary.Linkable = true + process2NodeSummary, _ := detailed.MakeNodeSummary(fixture.Report.Process.Nodes[fixture.ClientProcess2NodeID]) + process2NodeSummary.Linkable = true + want := detailed.Node{ + ID: render.MakeHostID(fixture.ClientHostID), + Label: "client", + Rank: "hostname.com", + Pseudo: false, + Controls: []detailed.ControlInstance{}, + Metadata: []detailed.MetadataRow{ + { + ID: "host_name", + Label: "Hostname", + Value: "client.hostname.com", + }, + { + ID: "os", + Label: "Operating system", + Value: "Linux", + }, + { + ID: "local_networks", + Label: "Local Networks", + Value: "10.10.10.0/24", + }, + }, + Metrics: []detailed.MetricRow{ + { + ID: host.CPUUsage, + Format: "percent", + Label: "CPU Usage", + Value: 0.01, + Metric: &fixture.CPUMetric, + }, + { + ID: host.MemUsage, + Format: "filesize", + Label: "Memory Usage", + Value: 0.01, + Metric: &fixture.MemoryMetric, + }, + { + ID: host.Load1, + Group: "load", + Label: "Load (1m)", + Value: 0.01, + Metric: &fixture.LoadMetric, + }, + { + ID: host.Load5, + Group: "load", + Label: "Load (5m)", + Value: 0.01, + Metric: &fixture.LoadMetric, + }, + { + ID: host.Load15, + Label: "Load (15m)", + Group: "load", + Value: 0.01, + Metric: &fixture.LoadMetric, + }, + }, + Children: []detailed.NodeSummaryGroup{ + { + Label: "Container Images", + TopologyID: "containers-by-image", + Columns: []string{}, + Nodes: []detailed.NodeSummary{containerImageNodeSummary}, + }, + { + Label: "Containers", + TopologyID: "containers", + Columns: []string{docker.CPUTotalUsage, docker.MemoryUsage}, + Nodes: []detailed.NodeSummary{containerNodeSummary}, + }, + { + Label: "Applications", + TopologyID: "applications", + Columns: []string{process.PID, process.CPUUsage, process.MemoryUsage}, + Nodes: []detailed.NodeSummary{process1NodeSummary, process2NodeSummary}, + }, + }, + } + if !reflect.DeepEqual(want, have) { + t.Errorf("%s", test.Diff(want, have)) + } +} + +func TestMakeDetailedContainerNode(t *testing.T) { + id := render.MakeContainerID(fixture.ServerContainerID) + renderableNode, ok := render.ContainerRenderer.Render(fixture.Report)[id] + if !ok { + t.Fatalf("Node not found: %s", id) + } + have := detailed.MakeNode(fixture.Report, renderableNode) + want := detailed.Node{ + ID: id, + Label: "server", + Rank: "imageid456", + Pseudo: false, + Controls: []detailed.ControlInstance{}, + Metadata: []detailed.MetadataRow{ + {ID: "docker_container_id", Label: "ID", Value: fixture.ServerContainerID}, + {ID: "docker_image_id", Label: "Image ID", Value: fixture.ServerContainerImageID}, + {ID: "docker_container_state", Label: "State", Value: "running"}, + {ID: "label_" + render.AmazonECSContainerNameLabel, Label: fmt.Sprintf(`Label %q`, render.AmazonECSContainerNameLabel), Value: `server`}, + {ID: "label_foo1", Label: `Label "foo1"`, Value: `bar1`}, + {ID: "label_foo2", Label: `Label "foo2"`, Value: `bar2`}, + {ID: "label_io.kubernetes.pod.name", Label: `Label "io.kubernetes.pod.name"`, Value: "ping/pong-b"}, + }, + Metrics: []detailed.MetricRow{ + { + ID: docker.CPUTotalUsage, + Format: "percent", + Label: "CPU Usage", + Value: 0.01, + Metric: &fixture.CPUMetric, + }, + { + ID: docker.MemoryUsage, + Format: "filesize", + Label: "Memory Usage", + Value: 0.01, + Metric: &fixture.MemoryMetric, + }, + }, + Children: []detailed.NodeSummaryGroup{ + { + Label: "Applications", + TopologyID: "applications", + Columns: []string{process.PID, process.CPUUsage, process.MemoryUsage}, + Nodes: []detailed.NodeSummary{ + { + ID: fmt.Sprintf("process:%s:%s", "server.hostname.com", fixture.ServerPID), + Label: "apache", + Linkable: true, + Metadata: []detailed.MetadataRow{ + {ID: process.PID, Label: "PID", Value: fixture.ServerPID}, + }, + Metrics: []detailed.MetricRow{}, + }, + }, + }, + }, + Parents: []detailed.Parent{ + { + ID: render.MakeContainerImageID(fixture.ServerContainerImageName), + Label: fixture.ServerContainerImageName, + TopologyID: "containers-by-image", + }, + { + ID: render.MakeHostID(fixture.ServerHostName), + Label: fixture.ServerHostName, + TopologyID: "hosts", + }, + }, + } + if !reflect.DeepEqual(want, have) { + t.Errorf("%s", test.Diff(want, have)) + } +} diff --git a/render/detailed/summary.go b/render/detailed/summary.go new file mode 100644 index 000000000..b1bc7df8a --- /dev/null +++ b/render/detailed/summary.go @@ -0,0 +1,140 @@ +package detailed + +import ( + "fmt" + + "github.com/weaveworks/scope/probe/docker" + "github.com/weaveworks/scope/probe/host" + "github.com/weaveworks/scope/probe/kubernetes" + "github.com/weaveworks/scope/probe/process" + "github.com/weaveworks/scope/render" + "github.com/weaveworks/scope/report" +) + +// NodeSummaryGroup is a topology-typed group of children for a Node. +type NodeSummaryGroup struct { + Label string `json:"label"` + Nodes []NodeSummary `json:"nodes"` + TopologyID string `json:"topologyId"` + Columns []string `json:"columns"` +} + +// Copy returns a value copy of the NodeSummaryGroup +func (g NodeSummaryGroup) Copy() NodeSummaryGroup { + result := NodeSummaryGroup{ + TopologyID: g.TopologyID, + Label: g.Label, + Columns: g.Columns, + } + for _, node := range g.Nodes { + result.Nodes = append(result.Nodes, node.Copy()) + } + return result +} + +// NodeSummary is summary information about a child for a Node. +type NodeSummary struct { + ID string `json:"id"` + Label string `json:"label"` + Linkable bool `json:"linkable"` // Whether this node can be linked-to + Metadata []MetadataRow `json:"metadata,omitempty"` + Metrics []MetricRow `json:"metrics,omitempty"` +} + +// MakeNodeSummary summarizes a node, if possible. +func MakeNodeSummary(n report.Node) (NodeSummary, bool) { + renderers := map[string]func(report.Node) NodeSummary{ + "process": processNodeSummary, + "container": containerNodeSummary, + "container_image": containerImageNodeSummary, + "pod": podNodeSummary, + "host": hostNodeSummary, + } + if renderer, ok := renderers[n.Topology]; ok { + return renderer(n), true + } + return NodeSummary{}, false +} + +// Copy returns a value copy of the NodeSummary +func (n NodeSummary) Copy() NodeSummary { + result := NodeSummary{ + ID: n.ID, + Label: n.Label, + Linkable: n.Linkable, + } + for _, row := range n.Metadata { + result.Metadata = append(result.Metadata, row.Copy()) + } + for _, row := range n.Metrics { + result.Metrics = append(result.Metrics, row.Copy()) + } + return result +} + +func processNodeSummary(nmd report.Node) NodeSummary { + var ( + id string + label, nameFound = nmd.Metadata[process.Name] + ) + if pid, ok := nmd.Metadata[process.PID]; ok { + if !nameFound { + label = fmt.Sprintf("(%s)", pid) + } + id = render.MakeProcessID(report.ExtractHostID(nmd), pid) + } + _, isConnected := nmd.Metadata[render.IsConnected] + return NodeSummary{ + ID: id, + Label: label, + Linkable: isConnected, + Metadata: processNodeMetadata(nmd), + Metrics: processNodeMetrics(nmd), + } +} + +func containerNodeSummary(nmd report.Node) NodeSummary { + label, _ := render.GetRenderableContainerName(nmd) + return NodeSummary{ + ID: render.MakeContainerID(nmd.Metadata[docker.ContainerID]), + Label: label, + Linkable: true, + Metadata: containerNodeMetadata(nmd), + Metrics: containerNodeMetrics(nmd), + } +} + +func containerImageNodeSummary(nmd report.Node) NodeSummary { + imageName := nmd.Metadata[docker.ImageName] + return NodeSummary{ + ID: render.MakeContainerImageID(render.ImageNameWithoutVersion(imageName)), + Label: imageName, + Linkable: true, + Metadata: containerImageNodeMetadata(nmd), + } +} + +func podNodeSummary(nmd report.Node) NodeSummary { + return NodeSummary{ + ID: render.MakePodID(nmd.Metadata[kubernetes.PodID]), + Label: nmd.Metadata[kubernetes.PodName], + Linkable: true, + Metadata: podNodeMetadata(nmd), + } +} + +func hostNodeSummary(nmd report.Node) NodeSummary { + return NodeSummary{ + ID: render.MakeHostID(nmd.Metadata[host.HostName]), + Label: nmd.Metadata[host.HostName], + Linkable: true, + Metadata: hostNodeMetadata(nmd), + Metrics: hostNodeMetrics(nmd), + } +} + +type nodeSummariesByID []NodeSummary + +func (s nodeSummariesByID) Len() int { return len(s) } +func (s nodeSummariesByID) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodeSummariesByID) Less(i, j int) bool { return s[i].ID < s[j].ID } diff --git a/render/detailed_node.go b/render/detailed_node.go deleted file mode 100644 index e8516562b..000000000 --- a/render/detailed_node.go +++ /dev/null @@ -1,572 +0,0 @@ -package render - -import ( - "fmt" - "sort" - "strconv" - - "github.com/weaveworks/scope/probe/docker" - "github.com/weaveworks/scope/probe/host" - "github.com/weaveworks/scope/probe/overlay" - "github.com/weaveworks/scope/probe/process" - "github.com/weaveworks/scope/report" -) - -const ( - containerImageRank = 4 - containerRank = 3 - processRank = 2 - hostRank = 1 - connectionsRank = 0 // keep connections at the bottom until they are expandable in the UI -) - -// DetailedNode is the data type that's yielded to the JavaScript layer when -// we want deep information about an individual node. -type DetailedNode struct { - ID string `json:"id"` - LabelMajor string `json:"label_major"` - LabelMinor string `json:"label_minor,omitempty"` - Rank string `json:"rank,omitempty"` - Pseudo bool `json:"pseudo,omitempty"` - Tables []Table `json:"tables"` - Controls []ControlInstance `json:"controls"` -} - -// Table is a dataset associated with a node. It will be displayed in the -// detail panel when a user clicks on a node. -type Table struct { - Title string `json:"title"` // e.g. Bandwidth - Numeric bool `json:"numeric"` // should the major column be right-aligned? - Rank int `json:"-"` // used to sort tables; not emitted. - Rows []Row `json:"rows"` -} - -// Row is a single entry in a Table dataset. -type Row struct { - Key string `json:"key"` // e.g. Ingress - ValueMajor string `json:"value_major"` // e.g. 25 - ValueMinor string `json:"value_minor,omitempty"` // e.g. KB/s - Expandable bool `json:"expandable,omitempty"` // Whether it can be expanded (hidden by default) - ValueType string `json:"value_type,omitempty"` // e.g. sparkline - Metric *report.Metric `json:"metric,omitempty"` // e.g. sparkline data samples -} - -// ControlInstance contains a control description, and all the info -// needed to execute it. -type ControlInstance struct { - ProbeID string `json:"probeId"` - NodeID string `json:"nodeId"` - report.Control -} - -type sortableRows []Row - -func (r sortableRows) Len() int { return len(r) } -func (r sortableRows) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r sortableRows) Less(i, j int) bool { - switch { - case r[i].Key != r[j].Key: - return r[i].Key < r[j].Key - - case r[i].ValueMajor != r[j].ValueMajor: - return r[i].ValueMajor < r[j].ValueMajor - - default: - return r[i].ValueMinor < r[j].ValueMinor - } -} - -type sortableTables []Table - -func (t sortableTables) Len() int { return len(t) } -func (t sortableTables) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t sortableTables) Less(i, j int) bool { return t[i].Rank > t[j].Rank } - -// MakeDetailedNode transforms a renderable node to a detailed node. It uses -// aggregate metadata, plus the set of origin node IDs, to produce tables. -func MakeDetailedNode(r report.Report, n RenderableNode) DetailedNode { - tables := sortableTables{} - - // Figure out if multiple hosts/containers are referenced by the renderableNode - multiContainer, multiHost := getRenderingContext(r, n) - - // RenderableNode may be the result of merge operation(s), and so may have - // multiple origins. The ultimate goal here is to generate tables to view - // in the UI, so we skip the intermediate representations, but we could - // add them later. - connections := []Row{} - for _, id := range n.Origins { - if table, ok := OriginTable(r, id, multiHost, multiContainer); ok { - tables = append(tables, table) - } else if _, ok := r.Endpoint.Nodes[id]; ok { - connections = append(connections, connectionDetailsRows(r.Endpoint, id)...) - } else if _, ok := r.Address.Nodes[id]; ok { - connections = append(connections, connectionDetailsRows(r.Address, id)...) - } - } - - if table, ok := connectionsTable(connections, r, n); ok { - tables = append(tables, table) - } - - // Sort tables by rank - sort.Sort(tables) - - return DetailedNode{ - ID: n.ID, - LabelMajor: n.LabelMajor, - LabelMinor: n.LabelMinor, - Rank: n.Rank, - Pseudo: n.Pseudo, - Tables: tables, - Controls: controls(r, n), - } -} - -func getRenderingContext(r report.Report, n RenderableNode) (multiContainer, multiHost bool) { - var ( - originHosts = map[string]struct{}{} - originContainers = map[string]struct{}{} - ) - for _, id := range n.Origins { - for _, topology := range r.Topologies() { - if nmd, ok := topology.Nodes[id]; ok { - originHosts[report.ExtractHostID(nmd)] = struct{}{} - if id, ok := nmd.Metadata[docker.ContainerID]; ok { - originContainers[id] = struct{}{} - } - } - // Return early if possible - multiHost = len(originHosts) > 1 - multiContainer = len(originContainers) > 1 - if multiHost && multiContainer { - return - } - } - } - return -} - -func connectionsTable(connections []Row, r report.Report, n RenderableNode) (Table, bool) { - sec := r.Window.Seconds() - rate := func(u *uint64) (float64, bool) { - if u == nil { - return 0.0, false - } - if sec <= 0 { - return 0.0, true - } - return float64(*u) / sec, true - } - shortenByteRate := func(rate float64) (major, minor string) { - switch { - case rate > 1024*1024: - return fmt.Sprintf("%.2f", rate/1024/1024), "MBps" - case rate > 1024: - return fmt.Sprintf("%.1f", rate/1024), "KBps" - default: - return fmt.Sprintf("%.0f", rate), "Bps" - } - } - - rows := []Row{} - if n.EdgeMetadata.MaxConnCountTCP != nil { - rows = append(rows, Row{Key: "TCP connections", ValueMajor: strconv.FormatUint(*n.EdgeMetadata.MaxConnCountTCP, 10)}) - } - if rate, ok := rate(n.EdgeMetadata.EgressPacketCount); ok { - rows = append(rows, Row{Key: "Egress packet rate", ValueMajor: fmt.Sprintf("%.0f", rate), ValueMinor: "packets/sec"}) - } - if rate, ok := rate(n.EdgeMetadata.IngressPacketCount); ok { - rows = append(rows, Row{Key: "Ingress packet rate", ValueMajor: fmt.Sprintf("%.0f", rate), ValueMinor: "packets/sec"}) - } - if rate, ok := rate(n.EdgeMetadata.EgressByteCount); ok { - s, unit := shortenByteRate(rate) - rows = append(rows, Row{Key: "Egress byte rate", ValueMajor: s, ValueMinor: unit}) - } - if rate, ok := rate(n.EdgeMetadata.IngressByteCount); ok { - s, unit := shortenByteRate(rate) - rows = append(rows, Row{Key: "Ingress byte rate", ValueMajor: s, ValueMinor: unit}) - } - if len(connections) > 0 { - sort.Sort(sortableRows(connections)) - rows = append(rows, Row{Key: "Client", ValueMajor: "Server", Expandable: true}) - rows = append(rows, connections...) - } - if len(rows) > 0 { - return Table{ - Title: "Connections", - Numeric: false, - Rank: connectionsRank, - Rows: rows, - }, true - } - return Table{}, false -} - -func controlsFor(topology report.Topology, nodeID string) []ControlInstance { - result := []ControlInstance{} - node, ok := topology.Nodes[nodeID] - if !ok { - return result - } - - for _, id := range node.Controls.Controls { - if control, ok := topology.Controls[id]; ok { - result = append(result, ControlInstance{ - ProbeID: node.Metadata[report.ProbeID], - NodeID: nodeID, - Control: control, - }) - } - } - return result -} - -func controls(r report.Report, n RenderableNode) []ControlInstance { - if _, ok := r.Process.Nodes[n.ControlNode]; ok { - return controlsFor(r.Process, n.ControlNode) - } else if _, ok := r.Container.Nodes[n.ControlNode]; ok { - return controlsFor(r.Container, n.ControlNode) - } else if _, ok := r.ContainerImage.Nodes[n.ControlNode]; ok { - return controlsFor(r.ContainerImage, n.ControlNode) - } else if _, ok := r.Host.Nodes[n.ControlNode]; ok { - return controlsFor(r.Host, n.ControlNode) - } - return []ControlInstance{} -} - -// OriginTable produces a table (to be consumed directly by the UI) based on -// an origin ID, which is (optimistically) a node ID in one of our topologies. -func OriginTable(r report.Report, originID string, addHostTags bool, addContainerTags bool) (Table, bool) { - result, show := Table{}, false - if nmd, ok := r.Process.Nodes[originID]; ok { - result, show = processOriginTable(nmd, addHostTags, addContainerTags) - } - if nmd, ok := r.Container.Nodes[originID]; ok { - result, show = containerOriginTable(nmd, addHostTags) - } - if nmd, ok := r.ContainerImage.Nodes[originID]; ok { - result, show = containerImageOriginTable(nmd) - } - if nmd, ok := r.Host.Nodes[originID]; ok { - result, show = hostOriginTable(nmd) - } - return result, show -} - -func connectionDetailsRows(topology report.Topology, originID string) []Row { - rows := []Row{} - labeler := func(nodeID string, sets report.Sets) (string, bool) { - if _, addr, port, ok := report.ParseEndpointNodeID(nodeID); ok { - if names, ok := sets["name"]; ok { - return fmt.Sprintf("%s:%s", names[0], port), true - } - return fmt.Sprintf("%s:%s", addr, port), true - } - if _, addr, ok := report.ParseAddressNodeID(nodeID); ok { - return addr, true - } - return "", false - } - local, ok := labeler(originID, topology.Nodes[originID].Sets) - if !ok { - return rows - } - // Firstly, collection outgoing connections from this node. - for _, serverNodeID := range topology.Nodes[originID].Adjacency { - remote, ok := labeler(serverNodeID, topology.Nodes[serverNodeID].Sets) - if !ok { - continue - } - rows = append(rows, Row{ - Key: local, - ValueMajor: remote, - Expandable: true, - }) - } - // Next, scan the topology for incoming connections to this node. - for clientNodeID, clientNode := range topology.Nodes { - if clientNodeID == originID { - continue - } - serverNodeIDs := clientNode.Adjacency - if !serverNodeIDs.Contains(originID) { - continue - } - remote, ok := labeler(clientNodeID, clientNode.Sets) - if !ok { - continue - } - rows = append(rows, Row{ - Key: remote, - ValueMajor: local, - ValueMinor: "", - Expandable: true, - }) - } - return rows -} - -func processOriginTable(nmd report.Node, addHostTag bool, addContainerTag bool) (Table, bool) { - rows := []Row{} - for _, tuple := range []struct{ key, human string }{ - {process.PPID, "Parent PID"}, - {process.Cmdline, "Command"}, - {process.Threads, "# Threads"}, - } { - if val, ok := nmd.Metadata[tuple.key]; ok { - rows = append(rows, Row{Key: tuple.human, ValueMajor: val, ValueMinor: ""}) - } - } - - if containerID, ok := nmd.Metadata[docker.ContainerID]; ok && addContainerTag { - rows = append([]Row{{Key: "Container ID", ValueMajor: containerID}}, rows...) - } - - if addHostTag { - rows = append([]Row{{Key: "Host", ValueMajor: report.ExtractHostID(nmd)}}, rows...) - } - - for _, tuple := range []struct { - key, human string - fmt formatter - }{ - {process.CPUUsage, "CPU Usage", formatPercent}, - {process.MemoryUsage, "Memory Usage", formatMemory}, - } { - if val, ok := nmd.Metrics[tuple.key]; ok { - rows = append(rows, sparklineRow(tuple.human, val, tuple.fmt)) - } - } - - var ( - title = "Process" - name, commFound = nmd.Metadata[process.Name] - pid, pidFound = nmd.Metadata[process.PID] - ) - if commFound { - title += ` "` + name + `"` - } - if pidFound { - title += " (" + pid + ")" - } - return Table{ - Title: title, - Numeric: false, - Rows: rows, - Rank: processRank, - }, len(rows) > 0 || commFound || pidFound -} - -type formatter func(report.Metric) (report.Metric, string) - -func sparklineRow(human string, metric report.Metric, format formatter) Row { - if format == nil { - format = formatDefault - } - metric, lastStr := format(metric) - return Row{Key: human, ValueMajor: lastStr, Metric: &metric, ValueType: "sparkline"} -} - -func formatDefault(m report.Metric) (report.Metric, string) { - if s := m.LastSample(); s != nil { - return m, fmt.Sprintf("%0.2f", s.Value) - } - return m, "" -} - -func memoryScale(n float64) (string, float64) { - brackets := []struct { - human string - shift uint - }{ - {"bytes", 0}, - {"KB", 10}, - {"MB", 20}, - {"GB", 30}, - {"TB", 40}, - {"PB", 50}, - } - for _, bracket := range brackets { - unit := (1 << bracket.shift) - if n < float64(unit<<10) { - return bracket.human, float64(unit) - } - } - return "PB", float64(1 << 50) -} - -func formatMemory(m report.Metric) (report.Metric, string) { - s := m.LastSample() - if s == nil { - return m, "" - } - human, divisor := memoryScale(s.Value) - return m.Div(divisor), fmt.Sprintf("%0.2f %s", s.Value/divisor, human) -} - -func formatPercent(m report.Metric) (report.Metric, string) { - if s := m.LastSample(); s != nil { - return m, fmt.Sprintf("%0.2f%%", s.Value) - } - return m, "" -} - -func containerOriginTable(nmd report.Node, addHostTag bool) (Table, bool) { - rows := []Row{} - for _, tuple := range []struct{ key, human string }{ - {docker.ContainerState, "State"}, - } { - if val, ok := nmd.Latest.Lookup(tuple.key); ok && val != "" { - rows = append(rows, Row{Key: tuple.human, ValueMajor: val, ValueMinor: ""}) - } - } - - for _, tuple := range []struct{ key, human string }{ - {docker.ContainerID, "ID"}, - {docker.ImageID, "Image ID"}, - {docker.ContainerPorts, "Ports"}, - {docker.ContainerCreated, "Created"}, - {docker.ContainerCommand, "Command"}, - {overlay.WeaveMACAddress, "Weave MAC"}, - {overlay.WeaveDNSHostname, "Weave DNS Hostname"}, - } { - if val, ok := nmd.Metadata[tuple.key]; ok && val != "" { - rows = append(rows, Row{Key: tuple.human, ValueMajor: val, ValueMinor: ""}) - } - } - - for _, ip := range docker.ExtractContainerIPs(nmd) { - rows = append(rows, Row{Key: "IP Address", ValueMajor: ip, ValueMinor: ""}) - } - rows = append(rows, getDockerLabelRows(nmd)...) - - if addHostTag { - rows = append([]Row{{Key: "Host", ValueMajor: report.ExtractHostID(nmd)}}, rows...) - } - - if val, ok := nmd.Metrics[docker.MemoryUsage]; ok { - rows = append(rows, sparklineRow("Memory Usage", val, formatMemory)) - } - if val, ok := nmd.Metrics[docker.CPUTotalUsage]; ok { - rows = append(rows, sparklineRow("CPU Usage", val, formatPercent)) - } - - var ( - title = "Container" - name, nameFound = GetRenderableContainerName(nmd) - ) - if nameFound { - title += ` "` + name + `"` - } - - return Table{ - Title: title, - Numeric: false, - Rows: rows, - Rank: containerRank, - }, len(rows) > 0 || nameFound -} - -func containerImageOriginTable(nmd report.Node) (Table, bool) { - rows := []Row{} - for _, tuple := range []struct{ key, human string }{ - {docker.ImageID, "Image ID"}, - } { - if val, ok := nmd.Metadata[tuple.key]; ok { - rows = append(rows, Row{Key: tuple.human, ValueMajor: val, ValueMinor: ""}) - } - } - rows = append(rows, getDockerLabelRows(nmd)...) - title := "Container Image" - var ( - nameFound bool - name string - ) - if name, nameFound = nmd.Metadata[docker.ImageName]; nameFound { - title += ` "` + name + `"` - } - return Table{ - Title: title, - Numeric: false, - Rows: rows, - Rank: containerImageRank, - }, len(rows) > 0 || nameFound -} - -func getDockerLabelRows(nmd report.Node) []Row { - rows := []Row{} - // Add labels in alphabetical order - labels := docker.ExtractLabels(nmd) - labelKeys := make([]string, 0, len(labels)) - for k := range labels { - labelKeys = append(labelKeys, k) - } - sort.Strings(labelKeys) - for _, labelKey := range labelKeys { - rows = append(rows, Row{Key: fmt.Sprintf("Label %q", labelKey), ValueMajor: labels[labelKey]}) - } - return rows -} - -func hostOriginTable(nmd report.Node) (Table, bool) { - // Ensure that all metrics have the same max - maxLoad := 0.0 - for _, key := range []string{host.Load1, host.Load5, host.Load15} { - if metric, ok := nmd.Metrics[key]; ok { - if metric.Len() == 0 { - continue - } - if metric.Max > maxLoad { - maxLoad = metric.Max - } - } - } - - rows := []Row{} - for _, tuple := range []struct{ key, human string }{ - {host.Load1, "Load (1m)"}, - {host.Load5, "Load (5m)"}, - {host.Load15, "Load (15m)"}, - } { - if val, ok := nmd.Metrics[tuple.key]; ok { - val.Max = maxLoad - rows = append(rows, sparklineRow(tuple.human, val, nil)) - } - } - for _, tuple := range []struct { - key, human string - fmt formatter - }{ - {host.CPUUsage, "CPU Usage", formatPercent}, - {host.MemUsage, "Memory Usage", formatMemory}, - } { - if val, ok := nmd.Metrics[tuple.key]; ok { - rows = append(rows, sparklineRow(tuple.human, val, tuple.fmt)) - } - } - for _, tuple := range []struct{ key, human string }{ - {host.OS, "Operating system"}, - {host.KernelVersion, "Kernel version"}, - {host.Uptime, "Uptime"}, - } { - if val, ok := nmd.Metadata[tuple.key]; ok { - rows = append(rows, Row{Key: tuple.human, ValueMajor: val, ValueMinor: ""}) - } - } - - title := "Host" - var ( - name string - foundName bool - ) - if name, foundName = nmd.Metadata[host.HostName]; foundName { - title += ` "` + name + `"` - } - return Table{ - Title: title, - Numeric: false, - Rows: rows, - Rank: hostRank, - }, len(rows) > 0 || foundName -} diff --git a/render/detailed_node_test.go b/render/detailed_node_test.go deleted file mode 100644 index fa31f411e..000000000 --- a/render/detailed_node_test.go +++ /dev/null @@ -1,249 +0,0 @@ -package render_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/weaveworks/scope/render" - "github.com/weaveworks/scope/test" - "github.com/weaveworks/scope/test/fixture" -) - -func TestOriginTable(t *testing.T) { - if _, ok := render.OriginTable(fixture.Report, "not-found", false, false); ok { - t.Errorf("unknown origin ID gave unexpected success") - } - for originID, want := range map[string]render.Table{ - fixture.ServerProcessNodeID: { - Title: fmt.Sprintf(`Process "apache" (%s)`, fixture.ServerPID), - Numeric: false, - Rank: 2, - Rows: []render.Row{}, - }, - fixture.ServerHostNodeID: { - Title: fmt.Sprintf("Host %q", fixture.ServerHostName), - Numeric: false, - Rank: 1, - Rows: []render.Row{ - {Key: "Load (1m)", ValueMajor: "0.01", Metric: &fixture.LoadMetric, ValueType: "sparkline"}, - {Key: "Load (5m)", ValueMajor: "0.01", Metric: &fixture.LoadMetric, ValueType: "sparkline"}, - {Key: "Load (15m)", ValueMajor: "0.01", Metric: &fixture.LoadMetric, ValueType: "sparkline"}, - {Key: "Operating system", ValueMajor: "Linux"}, - }, - }, - } { - have, ok := render.OriginTable(fixture.Report, originID, false, false) - if !ok { - t.Errorf("%q: not OK", originID) - continue - } - if !reflect.DeepEqual(want, have) { - t.Errorf("%q: %s", originID, test.Diff(want, have)) - } - } - - // Test host/container tags - for originID, want := range map[string]render.Table{ - fixture.ServerProcessNodeID: { - Title: fmt.Sprintf(`Process "apache" (%s)`, fixture.ServerPID), - Numeric: false, - Rank: 2, - Rows: []render.Row{ - {Key: "Host", ValueMajor: fixture.ServerHostID}, - {Key: "Container ID", ValueMajor: fixture.ServerContainerID}, - }, - }, - fixture.ServerContainerNodeID: { - Title: `Container "server"`, - Numeric: false, - Rank: 3, - Rows: []render.Row{ - {Key: "Host", ValueMajor: fixture.ServerHostID}, - {Key: "State", ValueMajor: "running"}, - {Key: "ID", ValueMajor: fixture.ServerContainerID}, - {Key: "Image ID", ValueMajor: fixture.ServerContainerImageID}, - {Key: fmt.Sprintf(`Label %q`, render.AmazonECSContainerNameLabel), ValueMajor: `server`}, - {Key: `Label "foo1"`, ValueMajor: `bar1`}, - {Key: `Label "foo2"`, ValueMajor: `bar2`}, - {Key: `Label "io.kubernetes.pod.name"`, ValueMajor: "ping/pong-b"}, - }, - }, - } { - have, ok := render.OriginTable(fixture.Report, originID, true, true) - if !ok { - t.Errorf("%q: not OK", originID) - continue - } - if !reflect.DeepEqual(want, have) { - t.Errorf("%q: %s", originID, test.Diff(want, have)) - } - } -} - -func TestMakeDetailedHostNode(t *testing.T) { - renderableNode := render.HostRenderer.Render(fixture.Report)[render.MakeHostID(fixture.ClientHostID)] - have := render.MakeDetailedNode(fixture.Report, renderableNode) - want := render.DetailedNode{ - ID: render.MakeHostID(fixture.ClientHostID), - LabelMajor: "client", - LabelMinor: "hostname.com", - Rank: "hostname.com", - Pseudo: false, - Controls: []render.ControlInstance{}, - Tables: []render.Table{ - { - Title: fmt.Sprintf("Host %q", fixture.ClientHostName), - Numeric: false, - Rank: 1, - Rows: []render.Row{ - { - Key: "Load (1m)", - ValueMajor: "0.01", - Metric: &fixture.LoadMetric, - ValueType: "sparkline", - }, - { - Key: "Load (5m)", - ValueMajor: "0.01", - Metric: &fixture.LoadMetric, - ValueType: "sparkline", - }, - { - Key: "Load (15m)", - ValueMajor: "0.01", - Metric: &fixture.LoadMetric, - ValueType: "sparkline", - }, - { - Key: "Operating system", - ValueMajor: "Linux", - }, - }, - }, - { - Title: "Connections", - Numeric: false, - Rank: 0, - Rows: []render.Row{ - { - Key: "TCP connections", - ValueMajor: "3", - }, - { - Key: "Client", - ValueMajor: "Server", - Expandable: true, - }, - { - Key: "10.10.10.20", - ValueMajor: "192.168.1.1", - Expandable: true, - }, - }, - }, - }, - } - if !reflect.DeepEqual(want, have) { - t.Errorf("%s", test.Diff(want, have)) - } -} - -func TestMakeDetailedContainerNode(t *testing.T) { - renderableNode := render.ContainerRenderer.Render(fixture.Report)[fixture.ServerContainerID] - have := render.MakeDetailedNode(fixture.Report, renderableNode) - want := render.DetailedNode{ - ID: fixture.ServerContainerID, - LabelMajor: "server", - LabelMinor: fixture.ServerHostName, - Rank: "imageid456", - Pseudo: false, - Controls: []render.ControlInstance{}, - Tables: []render.Table{ - { - Title: `Container Image "image/server"`, - Numeric: false, - Rank: 4, - Rows: []render.Row{ - {Key: "Image ID", ValueMajor: fixture.ServerContainerImageID}, - {Key: `Label "foo1"`, ValueMajor: `bar1`}, - {Key: `Label "foo2"`, ValueMajor: `bar2`}, - }, - }, - { - Title: `Container "server"`, - Numeric: false, - Rank: 3, - Rows: []render.Row{ - {Key: "State", ValueMajor: "running"}, - {Key: "ID", ValueMajor: fixture.ServerContainerID}, - {Key: "Image ID", ValueMajor: fixture.ServerContainerImageID}, - {Key: fmt.Sprintf(`Label %q`, render.AmazonECSContainerNameLabel), ValueMajor: `server`}, - {Key: `Label "foo1"`, ValueMajor: `bar1`}, - {Key: `Label "foo2"`, ValueMajor: `bar2`}, - {Key: `Label "io.kubernetes.pod.name"`, ValueMajor: "ping/pong-b"}, - }, - }, - { - Title: fmt.Sprintf(`Process "apache" (%s)`, fixture.ServerPID), - Numeric: false, - Rank: 2, - Rows: []render.Row{}, - }, - { - Title: fmt.Sprintf("Host %q", fixture.ServerHostName), - Numeric: false, - Rank: 1, - Rows: []render.Row{ - {Key: "Load (1m)", ValueMajor: "0.01", Metric: &fixture.LoadMetric, ValueType: "sparkline"}, - {Key: "Load (5m)", ValueMajor: "0.01", Metric: &fixture.LoadMetric, ValueType: "sparkline"}, - {Key: "Load (15m)", ValueMajor: "0.01", Metric: &fixture.LoadMetric, ValueType: "sparkline"}, - {Key: "Operating system", ValueMajor: "Linux"}, - }, - }, - { - Title: "Connections", - Numeric: false, - Rank: 0, - Rows: []render.Row{ - {Key: "Ingress packet rate", ValueMajor: "105", ValueMinor: "packets/sec"}, - {Key: "Ingress byte rate", ValueMajor: "1.0", ValueMinor: "KBps"}, - {Key: "Client", ValueMajor: "Server", Expandable: true}, - { - Key: fmt.Sprintf("%s:%s", fixture.UnknownClient1IP, fixture.UnknownClient1Port), - ValueMajor: fmt.Sprintf("%s:%s", fixture.ServerIP, fixture.ServerPort), - Expandable: true, - }, - { - Key: fmt.Sprintf("%s:%s", fixture.UnknownClient2IP, fixture.UnknownClient2Port), - ValueMajor: fmt.Sprintf("%s:%s", fixture.ServerIP, fixture.ServerPort), - Expandable: true, - }, - { - Key: fmt.Sprintf("%s:%s", fixture.UnknownClient3IP, fixture.UnknownClient3Port), - ValueMajor: fmt.Sprintf("%s:%s", fixture.ServerIP, fixture.ServerPort), - Expandable: true, - }, - { - Key: fmt.Sprintf("%s:%s", fixture.ClientIP, fixture.ClientPort54001), - ValueMajor: fmt.Sprintf("%s:%s", fixture.ServerIP, fixture.ServerPort), - Expandable: true, - }, - { - Key: fmt.Sprintf("%s:%s", fixture.ClientIP, fixture.ClientPort54002), - ValueMajor: fmt.Sprintf("%s:%s", fixture.ServerIP, fixture.ServerPort), - Expandable: true, - }, - { - Key: fmt.Sprintf("%s:%s", fixture.RandomClientIP, fixture.RandomClientPort), - ValueMajor: fmt.Sprintf("%s:%s", fixture.ServerIP, fixture.ServerPort), - Expandable: true, - }, - }, - }, - }, - } - if !reflect.DeepEqual(want, have) { - t.Errorf("%s", test.Diff(want, have)) - } -} diff --git a/render/expected/expected.go b/render/expected/expected.go index a863d657d..b4946d7a9 100644 --- a/render/expected/expected.go +++ b/render/expected/expected.go @@ -23,10 +23,6 @@ var ( EgressPacketCount: newu64(70), EgressByteCount: newu64(700), }, - Origins: report.MakeIDList( - fixture.UnknownClient1NodeID, - fixture.UnknownClient2NodeID, - ), } } unknownPseudoNode2 = func(adjacent string) render.RenderableNode { @@ -39,9 +35,6 @@ var ( EgressPacketCount: newu64(50), EgressByteCount: newu64(500), }, - Origins: report.MakeIDList( - fixture.UnknownClient3NodeID, - ), } } theInternetNode = func(adjacent string) render.RenderableNode { @@ -54,10 +47,6 @@ var ( EgressPacketCount: newu64(60), EgressByteCount: newu64(600), }, - Origins: report.MakeIDList( - fixture.RandomClientNodeID, - fixture.GoogleEndpointNodeID, - ), } } ClientProcess1ID = render.MakeProcessID(fixture.ClientHostID, fixture.Client1PID) @@ -72,12 +61,7 @@ var ( LabelMinor: fmt.Sprintf("%s (%s)", fixture.ClientHostID, fixture.Client1PID), Rank: fixture.Client1Name, Pseudo: false, - Origins: report.MakeIDList( - fixture.Client54001NodeID, - fixture.ClientProcess1NodeID, - fixture.ClientHostNodeID, - ), - Node: report.MakeNode().WithAdjacent(ServerProcessID), + Node: report.MakeNode().WithAdjacent(ServerProcessID), EdgeMetadata: report.EdgeMetadata{ EgressPacketCount: newu64(10), EgressByteCount: newu64(100), @@ -89,12 +73,7 @@ var ( LabelMinor: fmt.Sprintf("%s (%s)", fixture.ClientHostID, fixture.Client2PID), Rank: fixture.Client2Name, Pseudo: false, - Origins: report.MakeIDList( - fixture.Client54002NodeID, - fixture.ClientProcess2NodeID, - fixture.ClientHostNodeID, - ), - Node: report.MakeNode().WithAdjacent(ServerProcessID), + Node: report.MakeNode().WithAdjacent(ServerProcessID), EdgeMetadata: report.EdgeMetadata{ EgressPacketCount: newu64(20), EgressByteCount: newu64(200), @@ -106,28 +85,18 @@ var ( LabelMinor: fmt.Sprintf("%s (%s)", fixture.ServerHostID, fixture.ServerPID), Rank: fixture.ServerName, Pseudo: false, - Origins: report.MakeIDList( - fixture.Server80NodeID, - fixture.ServerProcessNodeID, - fixture.ServerHostNodeID, - ), - Node: report.MakeNode(), + Node: report.MakeNode(), EdgeMetadata: report.EdgeMetadata{ IngressPacketCount: newu64(210), IngressByteCount: newu64(2100), }, }, nonContainerProcessID: { - ID: nonContainerProcessID, - LabelMajor: fixture.NonContainerName, - LabelMinor: fmt.Sprintf("%s (%s)", fixture.ServerHostID, fixture.NonContainerPID), - Rank: fixture.NonContainerName, - Pseudo: false, - Origins: report.MakeIDList( - fixture.NonContainerProcessNodeID, - fixture.ServerHostNodeID, - fixture.NonContainerNodeID, - ), + ID: nonContainerProcessID, + LabelMajor: fixture.NonContainerName, + LabelMinor: fmt.Sprintf("%s (%s)", fixture.ServerHostID, fixture.NonContainerPID), + Rank: fixture.NonContainerName, + Pseudo: false, Node: report.MakeNode().WithAdjacent(render.TheInternetID), EdgeMetadata: report.EdgeMetadata{}, }, @@ -136,6 +105,10 @@ var ( render.TheInternetID: theInternetNode(ServerProcessID), }).Prune() + ServerProcessRenderedID = render.MakeProcessID(fixture.ServerHostID, fixture.ServerPID) + ClientProcess1RenderedID = render.MakeProcessID(fixture.ClientHostID, fixture.Client1PID) + ClientProcess2RenderedID = render.MakeProcessID(fixture.ClientHostID, fixture.Client2PID) + RenderedProcessNames = (render.RenderableNodes{ fixture.Client1Name: { ID: fixture.Client1Name, @@ -143,12 +116,9 @@ var ( LabelMinor: "2 processes", Rank: fixture.Client1Name, Pseudo: false, - Origins: report.MakeIDList( - fixture.Client54001NodeID, - fixture.Client54002NodeID, - fixture.ClientProcess1NodeID, - fixture.ClientProcess2NodeID, - fixture.ClientHostNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.ClientProcess1NodeID], + fixture.Report.Process.Nodes[fixture.ClientProcess2NodeID], ), Node: report.MakeNode().WithAdjacent(fixture.ServerName), EdgeMetadata: report.EdgeMetadata{ @@ -162,10 +132,8 @@ var ( LabelMinor: "1 process", Rank: fixture.ServerName, Pseudo: false, - Origins: report.MakeIDList( - fixture.Server80NodeID, - fixture.ServerProcessNodeID, - fixture.ServerHostNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.ServerProcessNodeID], ), Node: report.MakeNode(), EdgeMetadata: report.EdgeMetadata{ @@ -179,10 +147,8 @@ var ( LabelMinor: "1 process", Rank: fixture.NonContainerName, Pseudo: false, - Origins: report.MakeIDList( - fixture.NonContainerProcessNodeID, - fixture.ServerHostNodeID, - fixture.NonContainerNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.NonContainerProcessNodeID], ), Node: report.MakeNode().WithAdjacent(render.TheInternetID), EdgeMetadata: report.EdgeMetadata{}, @@ -192,41 +158,35 @@ var ( render.TheInternetID: theInternetNode(fixture.ServerName), }).Prune() + ServerContainerRenderedID = render.MakeContainerID(fixture.ServerContainerID) + ClientContainerRenderedID = render.MakeContainerID(fixture.ClientContainerID) + RenderedContainers = (render.RenderableNodes{ - fixture.ClientContainerID: { - ID: fixture.ClientContainerID, + ClientContainerRenderedID: { + ID: ClientContainerRenderedID, LabelMajor: "client", LabelMinor: fixture.ClientHostName, Rank: fixture.ClientContainerImageName, Pseudo: false, - Origins: report.MakeIDList( - fixture.ClientContainerImageNodeID, - fixture.ClientContainerNodeID, - fixture.Client54001NodeID, - fixture.Client54002NodeID, - fixture.ClientProcess1NodeID, - fixture.ClientProcess2NodeID, - fixture.ClientHostNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.ClientProcess1NodeID], + fixture.Report.Process.Nodes[fixture.ClientProcess2NodeID], ), - Node: report.MakeNode().WithAdjacent(fixture.ServerContainerID), + Node: report.MakeNode().WithAdjacent(ServerContainerRenderedID), EdgeMetadata: report.EdgeMetadata{ EgressPacketCount: newu64(30), EgressByteCount: newu64(300), }, ControlNode: fixture.ClientContainerNodeID, }, - fixture.ServerContainerID: { - ID: fixture.ServerContainerID, + ServerContainerRenderedID: { + ID: ServerContainerRenderedID, LabelMajor: "server", LabelMinor: fixture.ServerHostName, Rank: fixture.ServerContainerImageName, Pseudo: false, - Origins: report.MakeIDList( - fixture.ServerContainerImageNodeID, - fixture.ServerContainerNodeID, - fixture.Server80NodeID, - fixture.ServerProcessNodeID, - fixture.ServerHostNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.ServerProcessNodeID], ), Node: report.MakeNode(), EdgeMetadata: report.EdgeMetadata{ @@ -241,51 +201,46 @@ var ( LabelMinor: fixture.ServerHostName, Rank: "", Pseudo: true, - Origins: report.MakeIDList( - fixture.NonContainerProcessNodeID, - fixture.ServerHostNodeID, - fixture.NonContainerNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.NonContainerProcessNodeID], ), Node: report.MakeNode().WithAdjacent(render.TheInternetID), EdgeMetadata: report.EdgeMetadata{}, }, - render.TheInternetID: theInternetNode(fixture.ServerContainerID), + render.TheInternetID: theInternetNode(ServerContainerRenderedID), }).Prune() + ClientContainerImageRenderedName = render.MakeContainerImageID(fixture.ClientContainerImageName) + ServerContainerImageRenderedName = render.MakeContainerImageID(fixture.ServerContainerImageName) + RenderedContainerImages = (render.RenderableNodes{ - fixture.ClientContainerImageName: { - ID: fixture.ClientContainerImageName, + ClientContainerImageRenderedName: { + ID: ClientContainerImageRenderedName, LabelMajor: fixture.ClientContainerImageName, LabelMinor: "1 container", Rank: fixture.ClientContainerImageName, Pseudo: false, - Origins: report.MakeIDList( - fixture.ClientContainerImageNodeID, - fixture.ClientContainerNodeID, - fixture.Client54001NodeID, - fixture.Client54002NodeID, - fixture.ClientProcess1NodeID, - fixture.ClientProcess2NodeID, - fixture.ClientHostNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.ClientProcess1NodeID], + fixture.Report.Process.Nodes[fixture.ClientProcess2NodeID], + fixture.Report.Container.Nodes[fixture.ClientContainerNodeID], ), - Node: report.MakeNode().WithAdjacent(fixture.ServerContainerImageName), + Node: report.MakeNode().WithAdjacent(ServerContainerImageRenderedName), EdgeMetadata: report.EdgeMetadata{ EgressPacketCount: newu64(30), EgressByteCount: newu64(300), }, }, - fixture.ServerContainerImageName: { - ID: fixture.ServerContainerImageName, + ServerContainerImageRenderedName: { + ID: ServerContainerImageRenderedName, LabelMajor: fixture.ServerContainerImageName, LabelMinor: "1 container", Rank: fixture.ServerContainerImageName, Pseudo: false, - Origins: report.MakeIDList( - fixture.ServerContainerImageNodeID, - fixture.ServerContainerNodeID, - fixture.Server80NodeID, - fixture.ServerProcessNodeID, - fixture.ServerHostNodeID), + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.ServerProcessNodeID], + fixture.Report.Container.Nodes[fixture.ServerContainerNodeID], + ), Node: report.MakeNode(), EdgeMetadata: report.EdgeMetadata{ IngressPacketCount: newu64(210), @@ -298,15 +253,13 @@ var ( LabelMinor: fixture.ServerHostName, Rank: "", Pseudo: true, - Origins: report.MakeIDList( - fixture.NonContainerNodeID, - fixture.NonContainerProcessNodeID, - fixture.ServerHostNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.NonContainerProcessNodeID], ), Node: report.MakeNode().WithAdjacent(render.TheInternetID), EdgeMetadata: report.EdgeMetadata{}, }, - render.TheInternetID: theInternetNode(fixture.ServerContainerImageName), + render.TheInternetID: theInternetNode(ServerContainerImageRenderedName), }).Prune() ServerHostRenderedID = render.MakeHostID(fixture.ServerHostID) @@ -321,13 +274,15 @@ var ( LabelMinor: "hostname.com", // after first . Rank: "hostname.com", Pseudo: false, - Origins: report.MakeIDList( - fixture.ServerHostNodeID, - fixture.ServerAddressNodeID, + Children: report.MakeNodeSet( + fixture.Report.Container.Nodes[fixture.ServerContainerNodeID], + fixture.Report.Container.Nodes[fixture.ServerProcessNodeID], ), Node: report.MakeNode(), EdgeMetadata: report.EdgeMetadata{ - MaxConnCountTCP: newu64(3), + IngressPacketCount: newu64(210), + IngressByteCount: newu64(2100), + MaxConnCountTCP: newu64(3), }, }, ClientHostRenderedID: { @@ -336,13 +291,16 @@ var ( LabelMinor: "hostname.com", // after first . Rank: "hostname.com", Pseudo: false, - Origins: report.MakeIDList( - fixture.ClientHostNodeID, - fixture.ClientAddressNodeID, + Children: report.MakeNodeSet( + fixture.Report.Container.Nodes[fixture.ClientContainerNodeID], + fixture.Report.Process.Nodes[fixture.ClientProcess1NodeID], + fixture.Report.Process.Nodes[fixture.ClientProcess2NodeID], ), Node: report.MakeNode().WithAdjacent(ServerHostRenderedID), EdgeMetadata: report.EdgeMetadata{ - MaxConnCountTCP: newu64(3), + EgressPacketCount: newu64(30), + EgressByteCount: newu64(300), + MaxConnCountTCP: newu64(3), }, }, pseudoHostID1: { @@ -351,7 +309,10 @@ var ( Pseudo: true, Node: report.MakeNode().WithAdjacent(ServerHostRenderedID), EdgeMetadata: report.EdgeMetadata{}, - Origins: report.MakeIDList(fixture.UnknownAddress1NodeID, fixture.UnknownAddress2NodeID), + Children: report.MakeNodeSet( + fixture.Report.Container.Nodes[fixture.ServerContainerNodeID], + fixture.Report.Process.Nodes[fixture.ServerProcessNodeID], + ), }, pseudoHostID2: { ID: pseudoHostID2, @@ -359,7 +320,6 @@ var ( Pseudo: true, Node: report.MakeNode().WithAdjacent(ServerHostRenderedID), EdgeMetadata: report.EdgeMetadata{}, - Origins: report.MakeIDList(fixture.UnknownAddress3NodeID), }, render.TheInternetID: { ID: render.TheInternetID, @@ -367,46 +327,43 @@ var ( Pseudo: true, Node: report.MakeNode().WithAdjacent(ServerHostRenderedID), EdgeMetadata: report.EdgeMetadata{}, - Origins: report.MakeIDList(fixture.RandomAddressNodeID), }, }).Prune() + ClientPodRenderedID = render.MakePodID("ping/pong-a") + ServerPodRenderedID = render.MakePodID("ping/pong-b") + RenderedPods = (render.RenderableNodes{ - "ping/pong-a": { - ID: "ping/pong-a", + ClientPodRenderedID: { + ID: ClientPodRenderedID, LabelMajor: "pong-a", LabelMinor: "1 container", Rank: "ping/pong-a", Pseudo: false, - Origins: report.MakeIDList( - fixture.Client54001NodeID, - fixture.Client54002NodeID, - fixture.ClientProcess1NodeID, - fixture.ClientProcess2NodeID, - fixture.ClientHostNodeID, - fixture.ClientContainerNodeID, - fixture.ClientContainerImageNodeID, - fixture.ClientPodNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.ClientProcess1NodeID], + fixture.Report.Process.Nodes[fixture.ClientProcess2NodeID], + fixture.Report.Container.Nodes[fixture.ClientContainerNodeID], + fixture.Report.ContainerImage.Nodes[fixture.ClientContainerImageNodeID], + fixture.Report.Pod.Nodes[fixture.ClientPodNodeID], ), - Node: report.MakeNode().WithAdjacent("ping/pong-b"), + Node: report.MakeNode().WithAdjacent(ServerPodRenderedID), EdgeMetadata: report.EdgeMetadata{ EgressPacketCount: newu64(30), EgressByteCount: newu64(300), }, }, - "ping/pong-b": { - ID: "ping/pong-b", + ServerPodRenderedID: { + ID: ServerPodRenderedID, LabelMajor: "pong-b", LabelMinor: "1 container", Rank: "ping/pong-b", Pseudo: false, - Origins: report.MakeIDList( - fixture.Server80NodeID, - fixture.ServerPodNodeID, - fixture.ServerProcessNodeID, - fixture.ServerContainerNodeID, - fixture.ServerHostNodeID, - fixture.ServerContainerImageNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.ServerProcessNodeID], + fixture.Report.Container.Nodes[fixture.ServerContainerNodeID], + fixture.Report.ContainerImage.Nodes[fixture.ServerContainerImageNodeID], + fixture.Report.Pod.Nodes[fixture.ServerPodNodeID], ), Node: report.MakeNode(), EdgeMetadata: report.EdgeMetadata{ @@ -420,10 +377,8 @@ var ( LabelMinor: fixture.ServerHostName, Rank: "", Pseudo: true, - Origins: report.MakeIDList( - fixture.ServerHostNodeID, - fixture.NonContainerProcessNodeID, - fixture.NonContainerNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.NonContainerProcessNodeID], ), Node: report.MakeNode().WithAdjacent(render.TheInternetID), EdgeMetadata: report.EdgeMetadata{}, @@ -432,43 +387,35 @@ var ( ID: render.TheInternetID, LabelMajor: render.TheInternetMajor, Pseudo: true, - Node: report.MakeNode().WithAdjacent("ping/pong-b"), + Node: report.MakeNode().WithAdjacent(ServerPodRenderedID), EdgeMetadata: report.EdgeMetadata{ EgressPacketCount: newu64(60), EgressByteCount: newu64(600), }, - Origins: report.MakeIDList( - fixture.RandomClientNodeID, - fixture.GoogleEndpointNodeID, - ), }, }).Prune() + ServiceRenderedID = render.MakeServiceID("ping/pongservice") + RenderedPodServices = (render.RenderableNodes{ - "ping/pongservice": { - ID: fixture.ServiceID, + ServiceRenderedID: { + ID: ServiceRenderedID, LabelMajor: "pongservice", LabelMinor: "2 pods", Rank: fixture.ServiceID, Pseudo: false, - Origins: report.MakeIDList( - fixture.Client54001NodeID, - fixture.Client54002NodeID, - fixture.ClientProcess1NodeID, - fixture.ClientProcess2NodeID, - fixture.ClientHostNodeID, - fixture.ClientContainerNodeID, - fixture.ClientContainerImageNodeID, - fixture.ClientPodNodeID, - fixture.Server80NodeID, - fixture.ServerPodNodeID, - fixture.ServiceNodeID, - fixture.ServerProcessNodeID, - fixture.ServerContainerNodeID, - fixture.ServerHostNodeID, - fixture.ServerContainerImageNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.ClientProcess1NodeID], + fixture.Report.Process.Nodes[fixture.ClientProcess2NodeID], + fixture.Report.Container.Nodes[fixture.ClientContainerNodeID], + fixture.Report.ContainerImage.Nodes[fixture.ClientContainerImageNodeID], + fixture.Report.Pod.Nodes[fixture.ClientPodNodeID], + fixture.Report.Process.Nodes[fixture.ServerProcessNodeID], + fixture.Report.Container.Nodes[fixture.ServerContainerNodeID], + fixture.Report.ContainerImage.Nodes[fixture.ServerContainerImageNodeID], + fixture.Report.Pod.Nodes[fixture.ServerPodNodeID], ), - Node: report.MakeNode().WithAdjacent(fixture.ServiceID), // ?? Shouldn't be adjacent to itself? + Node: report.MakeNode().WithAdjacent(ServiceRenderedID), EdgeMetadata: report.EdgeMetadata{ EgressPacketCount: newu64(30), EgressByteCount: newu64(300), @@ -482,10 +429,8 @@ var ( LabelMinor: fixture.ServerHostName, Rank: "", Pseudo: true, - Origins: report.MakeIDList( - fixture.ServerHostNodeID, - fixture.NonContainerProcessNodeID, - fixture.NonContainerNodeID, + Children: report.MakeNodeSet( + fixture.Report.Process.Nodes[fixture.NonContainerProcessNodeID], ), Node: report.MakeNode().WithAdjacent(render.TheInternetID), EdgeMetadata: report.EdgeMetadata{}, @@ -494,15 +439,11 @@ var ( ID: render.TheInternetID, LabelMajor: render.TheInternetMajor, Pseudo: true, - Node: report.MakeNode().WithAdjacent(fixture.ServiceID), + Node: report.MakeNode().WithAdjacent(ServiceRenderedID), EdgeMetadata: report.EdgeMetadata{ EgressPacketCount: newu64(60), EgressByteCount: newu64(600), }, - Origins: report.MakeIDList( - fixture.RandomClientNodeID, - fixture.GoogleEndpointNodeID, - ), }, }).Prune() ) diff --git a/render/filters.go b/render/filters.go index 58f04045b..4db23be33 100644 --- a/render/filters.go +++ b/render/filters.go @@ -119,6 +119,17 @@ func (f Filter) Stats(rpt report.Report) Stats { // to indicate a node has an edge pointing to it or from it const IsConnected = "is_connected" +// FilterPseudo produces a renderer that removes pseudo nodes from the given +// renderer +func FilterPseudo(r Renderer) Renderer { + return Filter{ + Renderer: r, + FilterFunc: func(node RenderableNode) bool { + return !node.Pseudo + }, + } +} + // FilterUnconnected produces a renderer that filters unconnected nodes // from the given renderer func FilterUnconnected(r Renderer) Renderer { diff --git a/render/filters_test.go b/render/filters_test.go index 325e29d25..6722462cf 100644 --- a/render/filters_test.go +++ b/render/filters_test.go @@ -48,7 +48,7 @@ func TestFilterRender2(t *testing.T) { } } -func TestFilterUnconnectedPesudoNodes(t *testing.T) { +func TestFilterUnconnectedPseudoNodes(t *testing.T) { // Test pseudo nodes that are made unconnected by filtering // are also removed. { @@ -123,3 +123,21 @@ func TestFilterUnconnectedSelf(t *testing.T) { } } } + +func TestFilterPseudo(t *testing.T) { + // Test pseudonodes are removed + { + nodes := render.RenderableNodes{ + "foo": {ID: "foo", Node: report.MakeNode()}, + "bar": {ID: "bar", Pseudo: true, Node: report.MakeNode()}, + } + renderer := render.FilterPseudo(mockRenderer{RenderableNodes: nodes}) + want := render.RenderableNodes{ + "foo": {ID: "foo", Node: report.MakeNode()}, + } + have := renderer.Render(report.MakeReport()).Prune() + if !reflect.DeepEqual(want, have) { + t.Error(test.Diff(want, have)) + } + } +} diff --git a/render/id.go b/render/id.go index 5bbf4c06a..952c3d369 100644 --- a/render/id.go +++ b/render/id.go @@ -1,32 +1,56 @@ package render import ( - "fmt" "strings" ) +// makeID is the generic ID maker +func makeID(prefix string, parts ...string) string { + return strings.Join(append([]string{prefix}, parts...), ":") +} + // MakeEndpointID makes an endpoint node ID for rendered nodes. func MakeEndpointID(hostID, addr, port string) string { - return fmt.Sprintf("endpoint:%s:%s:%s", hostID, addr, port) + return makeID("endpoint", hostID, addr, port) } // MakeProcessID makes a process node ID for rendered nodes. func MakeProcessID(hostID, pid string) string { - return fmt.Sprintf("process:%s:%s", hostID, pid) + return makeID("process", hostID, pid) } // MakeAddressID makes an address node ID for rendered nodes. func MakeAddressID(hostID, addr string) string { - return fmt.Sprintf("address:%s:%s", hostID, addr) + return makeID("address", hostID, addr) +} + +// MakeContainerID makes a container node ID for rendered nodes. +func MakeContainerID(containerID string) string { + return makeID("container", containerID) +} + +// MakeContainerImageID makes a container image node ID for rendered nodes. +func MakeContainerImageID(imageID string) string { + return makeID("container_image", imageID) +} + +// MakePodID makes a pod node ID for rendered nodes. +func MakePodID(podID string) string { + return makeID("pod", podID) +} + +// MakeServiceID makes a service node ID for rendered nodes. +func MakeServiceID(serviceID string) string { + return makeID("service", serviceID) } // MakeHostID makes a host node ID for rendered nodes. func MakeHostID(hostID string) string { - return fmt.Sprintf("host:%s", hostID) + return makeID("host", hostID) } // MakePseudoNodeID produces a pseudo node ID from its composite parts, // for use in rendered nodes. func MakePseudoNodeID(parts ...string) string { - return strings.Join(append([]string{"pseudo"}, parts...), ":") + return makeID("pseudo", parts...) } diff --git a/render/mapping.go b/render/mapping.go index 74b3feb81..a1b675007 100644 --- a/render/mapping.go +++ b/render/mapping.go @@ -123,12 +123,13 @@ func MapProcessIdentity(m RenderableNode, _ report.Networks) RenderableNodes { // renderable node. As it is only ever run on container topology nodes, we // expect that certain keys are present. func MapContainerIdentity(m RenderableNode, _ report.Networks) RenderableNodes { - id, ok := m.Metadata[docker.ContainerID] + containerID, ok := m.Metadata[docker.ContainerID] if !ok { return RenderableNodes{} } var ( + id = MakeContainerID(containerID) major, _ = GetRenderableContainerName(m.Node) minor = report.ExtractHostID(m.Node) rank = m.Metadata[docker.ImageID] @@ -136,10 +137,6 @@ func MapContainerIdentity(m RenderableNode, _ report.Networks) RenderableNodes { node := NewRenderableNodeWith(id, major, minor, rank, m) node.ControlNode = m.ID - if imageID, ok := m.Metadata[docker.ImageID]; ok { - hostID, _, _ := report.ParseContainerNodeID(m.ID) - node.Origins = node.Origins.Add(report.MakeContainerNodeID(hostID, imageID)) - } return RenderableNodes{id: node} } @@ -171,14 +168,15 @@ func GetRenderableContainerName(nmd report.Node) (string, bool) { // image renderable node. As it is only ever run on container image topology // nodes, we expect that certain keys are present. func MapContainerImageIdentity(m RenderableNode, _ report.Networks) RenderableNodes { - id, ok := m.Metadata[docker.ImageID] + imageID, ok := m.Metadata[docker.ImageID] if !ok { return RenderableNodes{} } var ( + id = MakeContainerImageID(imageID) major = m.Metadata[docker.ImageName] - rank = m.Metadata[docker.ImageID] + rank = imageID ) return RenderableNodes{id: NewRenderableNodeWith(id, major, "", rank, m)} @@ -188,12 +186,13 @@ func MapContainerImageIdentity(m RenderableNode, _ report.Networks) RenderableNo // only ever run on pod topology nodes, we expect that certain keys // are present. func MapPodIdentity(m RenderableNode, _ report.Networks) RenderableNodes { - id, ok := m.Metadata[kubernetes.PodID] + podID, ok := m.Metadata[kubernetes.PodID] if !ok { return RenderableNodes{} } var ( + id = MakePodID(podID) major = m.Metadata[kubernetes.PodName] rank = m.Metadata[kubernetes.PodID] ) @@ -205,12 +204,13 @@ func MapPodIdentity(m RenderableNode, _ report.Networks) RenderableNodes { // only ever run on service topology nodes, we expect that certain keys // are present. func MapServiceIdentity(m RenderableNode, _ report.Networks) RenderableNodes { - id, ok := m.Metadata[kubernetes.ServiceID] + serviceID, ok := m.Metadata[kubernetes.ServiceID] if !ok { return RenderableNodes{} } var ( + id = MakeServiceID(serviceID) major = m.Metadata[kubernetes.ServiceName] rank = m.Metadata[kubernetes.ServiceID] ) @@ -308,6 +308,7 @@ func MapEndpoint2IP(m RenderableNode, local report.Networks) RenderableNodes { // So we need to emit two nodes, for two different cases. id := report.MakeScopedEndpointNodeID(scope, addr, "") idWithPort := report.MakeScopedEndpointNodeID(scope, addr, port) + m = m.WithParents(nil) return RenderableNodes{ id: NewRenderableNodeWith(id, "", "", "", m), idWithPort: NewRenderableNodeWith(idWithPort, "", "", "", m), @@ -340,7 +341,7 @@ func MapContainer2IP(m RenderableNode, _ report.Networks) RenderableNodes { if mapping := portMappingMatch.FindStringSubmatch(portMapping); mapping != nil { ip, port := mapping[1], mapping[2] id := report.MakeScopedEndpointNodeID("", ip, port) - node := NewRenderableNodeWith(id, "", "", "", m) + node := NewRenderableNodeWith(id, "", "", "", m.WithParents(nil)) node.Counters[containersKey] = 1 result[id] = node } @@ -367,12 +368,14 @@ func MapIP2Container(n RenderableNode, _ report.Networks) RenderableNodes { // If this node is not a container, exclude it. // This excludes all the nodes we've dragged in from endpoint // that we failed to join to a container. - id, ok := n.Node.Metadata[docker.ContainerID] + containerID, ok := n.Node.Metadata[docker.ContainerID] if !ok { return RenderableNodes{} } - return RenderableNodes{id: NewDerivedNode(id, n)} + id := MakeContainerID(containerID) + + return RenderableNodes{id: NewDerivedNode(id, n.WithParents(nil))} } // MapEndpoint2Process maps endpoint RenderableNodes to process @@ -397,7 +400,7 @@ func MapEndpoint2Process(n RenderableNode, _ report.Networks) RenderableNodes { } id := MakeProcessID(report.ExtractHostID(n.Node), pid) - return RenderableNodes{id: NewDerivedNode(id, n)} + return RenderableNodes{id: NewDerivedNode(id, n.WithParents(nil))} } // MapProcess2Container maps process RenderableNodes to container @@ -426,16 +429,23 @@ func MapProcess2Container(n RenderableNode, _ report.Networks) RenderableNodes { // into an per-host "Uncontained" node. If for whatever reason // this node doesn't have a host id in their nodemetadata, it'll // all get grouped into a single uncontained node. - id, ok := n.Node.Metadata[docker.ContainerID] - if !ok { - hostID := report.ExtractHostID(n.Node) + var ( + id string + node RenderableNode + hostID = report.ExtractHostID(n.Node) + ) + n = n.WithParents(nil) + if containerID, ok := n.Node.Metadata[docker.ContainerID]; ok { + id = MakeContainerID(containerID) + node = NewDerivedNode(id, n) + } else { id = MakePseudoNodeID(UncontainedID, hostID) - node := newDerivedPseudoNode(id, UncontainedMajor, n) + node = newDerivedPseudoNode(id, UncontainedMajor, n) node.LabelMinor = hostID - return RenderableNodes{id: node} } - return RenderableNodes{id: NewDerivedNode(id, n)} + node.Children = node.Children.Add(n.Node) + return RenderableNodes{id: node} } // MapProcess2Name maps process RenderableNodes to RenderableNodes @@ -458,6 +468,9 @@ func MapProcess2Name(n RenderableNode, _ report.Networks) RenderableNodes { node.LabelMajor = name node.Rank = name node.Node.Counters[processesKey] = 1 + node.Node.Topology = "process_name" + node.Node.ID = name + node.Children = node.Children.Add(n.Node) return RenderableNodes{name: node} } @@ -497,14 +510,21 @@ func MapContainer2ContainerImage(n RenderableNode, _ report.Networks) Renderable // Otherwise, if some some reason the container doesn't have a image_id // (maybe slightly out of sync reports), just drop it - id, ok := n.Node.Metadata[docker.ImageID] + imageID, ok := n.Node.Metadata[docker.ImageID] if !ok { return RenderableNodes{} } // Add container id key to the counters, which will later be counted to produce the minor label - result := NewDerivedNode(id, n) + id := MakeContainerImageID(imageID) + result := NewDerivedNode(id, n.WithParents(nil)) result.Node.Counters[containersKey] = 1 + + // Add the container as a child of the new image node + result.Children = result.Children.Add(n.Node) + + result.Node.Topology = "container_image" + result.Node.ID = report.MakeContainerImageNodeID(report.ExtractHostID(n.Node), imageID) return RenderableNodes{id: result} } @@ -532,15 +552,19 @@ func MapPod2Service(n RenderableNode, _ report.Networks) RenderableNodes { } result := RenderableNodes{} - for _, id := range strings.Fields(ids) { - n := NewDerivedNode(id, n) + for _, serviceID := range strings.Fields(ids) { + id := MakeServiceID(serviceID) + n := NewDerivedNode(id, n.WithParents(nil)) n.Node.Counters[podsKey] = 1 + n.Children = n.Children.Add(n.Node) result[id] = n } return result } -func imageNameWithoutVersion(name string) string { +// ImageNameWithoutVersion splits the image name apart, returning the name +// without the version, if possible +func ImageNameWithoutVersion(name string) string { parts := strings.SplitN(name, "/", 3) if len(parts) == 3 { name = fmt.Sprintf("%s/%s", parts[1], parts[2]) @@ -565,13 +589,38 @@ func MapContainerImage2Name(n RenderableNode, _ report.Networks) RenderableNodes return RenderableNodes{} } - name = imageNameWithoutVersion(name) + name = ImageNameWithoutVersion(name) + id := MakeContainerImageID(name) - node := NewDerivedNode(name, n) + node := NewDerivedNode(id, n) node.LabelMajor = name node.Rank = name node.Node = n.Node.Copy() // Propagate NMD for container counting. - return RenderableNodes{name: node} + return RenderableNodes{id: node} +} + +// MapX2Host maps any RenderableNodes to host +// RenderableNodes. +// +// If this function is given a node without a hostname +// (including other pseudo nodes), it will drop the node. +// +// Otherwise, this function will produce a node with the correct ID +// format for a container, but without any Major or Minor labels. +// It does not have enough info to do that, and the resulting graph +// must be merged with a container graph to get that info. +func MapX2Host(n RenderableNode, _ report.Networks) RenderableNodes { + // Propogate all pseudo nodes + if n.Pseudo { + return RenderableNodes{n.ID: n} + } + if _, ok := n.Node.Metadata[report.HostNodeID]; !ok { + return RenderableNodes{} + } + id := MakeHostID(report.ExtractHostID(n.Node)) + result := NewDerivedNode(id, n.WithParents(nil)) + result.Children = result.Children.Add(n.Node) + return RenderableNodes{id: result} } // MapContainer2Pod maps container RenderableNodes to pod @@ -593,23 +642,27 @@ func MapContainer2Pod(n RenderableNode, _ report.Networks) RenderableNodes { // Otherwise, if some some reason the container doesn't have a pod_id (maybe // slightly out of sync reports, or its not in a pod), just drop it - id, ok := n.Node.Metadata["docker_label_io.kubernetes.pod.name"] + podID, ok := n.Node.Metadata[kubernetes.PodID] if !ok { return RenderableNodes{} } + id := MakePodID(podID) // Add container- key to NMD, which will later be counted to produce the // minor label - result := NewRenderableNodeWith(id, "", "", id, n) + result := NewRenderableNodeWith(id, "", "", podID, n.WithParents(nil)) result.Node.Counters[containersKey] = 1 // Due to a bug in kubernetes, addon pods on the master node are not returned // from the API. This is a workaround until // https://github.com/kubernetes/kubernetes/issues/14738 is fixed. - if s := strings.SplitN(id, "/", 2); len(s) == 2 { + if s := strings.SplitN(podID, "/", 2); len(s) == 2 { result.LabelMajor = s[1] result.Node.Metadata[kubernetes.Namespace] = s[0] result.Node.Metadata[kubernetes.PodName] = s[1] } + + result.Children = result.Children.Add(n.Node) + return RenderableNodes{id: result} } @@ -633,6 +686,12 @@ func MapContainer2Hostname(n RenderableNode, _ report.Networks) RenderableNodes // Add container id key to the counters, which will later be counted to produce the minor label result.Node.Counters[containersKey] = 1 + + result.Node.Topology = "container_hostname" + result.Node.ID = id + + result.Children = result.Children.Add(n.Node) + return RenderableNodes{id: result} } @@ -669,18 +728,6 @@ func MapCountPods(n RenderableNode, _ report.Networks) RenderableNodes { return RenderableNodes{n.ID: n} } -// MapAddress2Host maps address RenderableNodes to host RenderableNodes. -// -// Otherthan pseudo nodes, we can assume all nodes have a HostID -func MapAddress2Host(n RenderableNode, _ report.Networks) RenderableNodes { - if n.Pseudo { - return RenderableNodes{n.ID: n} - } - - id := MakeHostID(report.ExtractHostID(n.Node)) - return RenderableNodes{id: NewDerivedNode(id, n)} -} - // trySplitAddr is basically ParseArbitraryNodeID, since its callsites // (pseudo funcs) just have opaque node IDs and don't know what topology they // come from. Without changing how pseudo funcs work, we can't make it much diff --git a/render/mapping_internal_test.go b/render/mapping_internal_test.go index 75d2bfc8d..c04e2778d 100644 --- a/render/mapping_internal_test.go +++ b/render/mapping_internal_test.go @@ -12,7 +12,7 @@ func TestDockerImageName(t *testing.T) { {"docker-registry.domain.name:5000/repo/image1:ver", "repo/image1"}, {"foo", "foo"}, } { - name := imageNameWithoutVersion(input.in) + name := ImageNameWithoutVersion(input.in) if name != input.name { t.Fatalf("%s: %s != %s", input.in, name, input.name) } diff --git a/render/renderable_node.go b/render/renderable_node.go index 8519f3687..368e0353b 100644 --- a/render/renderable_node.go +++ b/render/renderable_node.go @@ -8,13 +8,13 @@ import ( // an element of a topology. It should contain information that's relevant // to rendering a node when there are many nodes visible at once. type RenderableNode struct { - ID string `json:"id"` // - LabelMajor string `json:"label_major"` // e.g. "process", human-readable - LabelMinor string `json:"label_minor,omitempty"` // e.g. "hostname", human-readable, optional - Rank string `json:"rank"` // to help the layout engine - Pseudo bool `json:"pseudo,omitempty"` // sort-of a placeholder node, for rendering purposes - Origins report.IDList `json:"origins,omitempty"` // Core node IDs that contributed information - ControlNode string `json:"-"` // ID of node from which to show the controls in the UI + ID string `json:"id"` // + LabelMajor string `json:"label_major"` // e.g. "process", human-readable + LabelMinor string `json:"label_minor,omitempty"` // e.g. "hostname", human-readable, optional + Rank string `json:"rank"` // to help the layout engine + Pseudo bool `json:"pseudo,omitempty"` // sort-of a placeholder node, for rendering purposes + Children report.NodeSet `json:"children,omitempty"` // Nodes which have been grouped into this one + ControlNode string `json:"-"` // ID of node from which to show the controls in the UI report.EdgeMetadata `json:"metadata"` // Numeric sums report.Node @@ -28,23 +28,22 @@ func NewRenderableNode(id string) RenderableNode { LabelMinor: "", Rank: "", Pseudo: false, - Origins: report.MakeIDList(), EdgeMetadata: report.EdgeMetadata{}, Node: report.MakeNode(), } } // NewRenderableNodeWith makes a new RenderableNode with some fields filled in -func NewRenderableNodeWith(id, major, minor, rank string, rn RenderableNode) RenderableNode { +func NewRenderableNodeWith(id, major, minor, rank string, node RenderableNode) RenderableNode { return RenderableNode{ ID: id, LabelMajor: major, LabelMinor: minor, Rank: rank, Pseudo: false, - Origins: rn.Origins.Copy(), - EdgeMetadata: rn.EdgeMetadata.Copy(), - Node: rn.Node.Copy(), + Children: node.Children.Copy(), + EdgeMetadata: node.EdgeMetadata.Copy(), + Node: node.Node.Copy(), } } @@ -56,7 +55,7 @@ func NewDerivedNode(id string, node RenderableNode) RenderableNode { LabelMinor: "", Rank: "", Pseudo: node.Pseudo, - Origins: node.Origins.Copy(), + Children: node.Children.Copy(), EdgeMetadata: node.EdgeMetadata.Copy(), Node: node.Node.Copy(), ControlNode: "", // Do not propagate ControlNode when making a derived node! @@ -70,7 +69,7 @@ func newDerivedPseudoNode(id, major string, node RenderableNode) RenderableNode LabelMinor: "", Rank: "", Pseudo: true, - Origins: node.Origins.Copy(), + Children: node.Children.Copy(), EdgeMetadata: node.EdgeMetadata.Copy(), Node: node.Node.Copy(), } @@ -83,6 +82,13 @@ func (rn RenderableNode) WithNode(n report.Node) RenderableNode { return result } +// WithParents creates a new RenderableNode based on rn, where n has the given parents set +func (rn RenderableNode) WithParents(p report.Sets) RenderableNode { + result := rn.Copy() + result.Node.Parents = p + return result +} + // Merge merges rn with other and returns a new RenderableNode func (rn RenderableNode) Merge(other RenderableNode) RenderableNode { result := rn.Copy() @@ -107,7 +113,7 @@ func (rn RenderableNode) Merge(other RenderableNode) RenderableNode { panic(result.ID) } - result.Origins = rn.Origins.Merge(other.Origins) + result.Children = rn.Children.Merge(other.Children) result.EdgeMetadata = rn.EdgeMetadata.Merge(other.EdgeMetadata) result.Node = rn.Node.Merge(other.Node) @@ -122,7 +128,7 @@ func (rn RenderableNode) Copy() RenderableNode { LabelMinor: rn.LabelMinor, Rank: rn.Rank, Pseudo: rn.Pseudo, - Origins: rn.Origins.Copy(), + Children: rn.Children.Copy(), EdgeMetadata: rn.EdgeMetadata.Copy(), Node: rn.Node.Copy(), ControlNode: rn.ControlNode, @@ -135,6 +141,7 @@ func (rn RenderableNode) Copy() RenderableNode { func (rn RenderableNode) Prune() RenderableNode { cp := rn.Copy() cp.Node = report.MakeNode().WithAdjacent(cp.Node.Adjacency...) + cp.Children = nil return cp } diff --git a/render/renderable_node_test.go b/render/renderable_node_test.go index d955dec6b..9a9ad49a0 100644 --- a/render/renderable_node_test.go +++ b/render/renderable_node_test.go @@ -37,7 +37,7 @@ func TestMergeRenderableNode(t *testing.T) { Rank: "", Pseudo: false, Node: report.MakeNode().WithAdjacent("a1"), - Origins: report.MakeIDList("o1"), + Children: report.MakeNodeSet(report.MakeNode().WithID("child1")), } node2 := render.RenderableNode{ ID: "foo", @@ -46,7 +46,7 @@ func TestMergeRenderableNode(t *testing.T) { Rank: "rank", Pseudo: false, Node: report.MakeNode().WithAdjacent("a2"), - Origins: report.MakeIDList("o2"), + Children: report.MakeNodeSet(report.MakeNode().WithID("child2")), } want := render.RenderableNode{ ID: "foo", @@ -54,8 +54,8 @@ func TestMergeRenderableNode(t *testing.T) { LabelMinor: "minor", Rank: "rank", Pseudo: false, - Node: report.MakeNode().WithAdjacent("a1").WithAdjacent("a2"), - Origins: report.MakeIDList("o1", "o2"), + Node: report.MakeNode().WithID("foo").WithAdjacent("a1").WithAdjacent("a2"), + Children: report.MakeNodeSet(report.MakeNode().WithID("child1"), report.MakeNode().WithID("child2")), EdgeMetadata: report.EdgeMetadata{}, }.Prune() have := node1.Merge(node2).Prune() diff --git a/render/selectors.go b/render/selectors.go index 9274aaa45..18032aac9 100644 --- a/render/selectors.go +++ b/render/selectors.go @@ -22,12 +22,7 @@ func (t TopologySelector) Stats(r report.Report) Stats { func MakeRenderableNodes(t report.Topology) RenderableNodes { result := RenderableNodes{} for id, nmd := range t.Nodes { - rn := NewRenderableNode(id).WithNode(nmd) - rn.Origins = report.MakeIDList(id) - if hostNodeID, ok := nmd.Metadata[report.HostNodeID]; ok { - rn.Origins = rn.Origins.Add(hostNodeID) - } - result[id] = rn + result[id] = NewRenderableNode(id).WithNode(nmd) } // Push EdgeMetadata to both ends of the edges diff --git a/render/short_lived_connections_test.go b/render/short_lived_connections_test.go index 0b98b4f5f..00b3a2bba 100644 --- a/render/short_lived_connections_test.go +++ b/render/short_lived_connections_test.go @@ -28,7 +28,7 @@ var ( containerID = "a1b2c3d4e5" containerIP = "192.168.0.1" containerName = "foo" - containerNodeID = report.MakeContainerNodeID(serverHostID, containerID) + containerNodeID = report.MakeContainerNodeID(containerID) rpt = report.Report{ Endpoint: report.Topology{ @@ -37,13 +37,13 @@ var ( endpoint.Addr: randomIP, endpoint.Port: randomPort, endpoint.Conntracked: "true", - }).WithAdjacent(serverEndpointNodeID), + }).WithAdjacent(serverEndpointNodeID).WithID(randomEndpointNodeID).WithTopology("endpoint"), serverEndpointNodeID: report.MakeNode().WithMetadata(map[string]string{ endpoint.Addr: serverIP, endpoint.Port: serverPort, endpoint.Conntracked: "true", - }), + }).WithID(serverEndpointNodeID).WithTopology("endpoint"), }, }, Container: report.Topology{ @@ -55,7 +55,7 @@ var ( }).WithSets(report.Sets{ docker.ContainerIPs: report.MakeStringSet(containerIP), docker.ContainerPorts: report.MakeStringSet(fmt.Sprintf("%s:%s->%s/tcp", serverIP, serverPort, serverPort)), - }), + }).WithID(containerNodeID).WithTopology("container"), }, }, Host: report.Topology{ @@ -64,7 +64,7 @@ var ( report.HostNodeID: serverHostNodeID, }).WithSets(report.Sets{ host.LocalNetworks: report.MakeStringSet("192.168.0.0/16"), - }), + }).WithID(serverHostNodeID).WithTopology("host"), }, }, } @@ -74,16 +74,14 @@ var ( ID: render.TheInternetID, LabelMajor: render.TheInternetMajor, Pseudo: true, - Node: report.MakeNode().WithAdjacent(containerID), - Origins: report.MakeIDList(randomEndpointNodeID), + Node: report.MakeNode().WithAdjacent(render.MakeContainerID(containerID)), }, - containerID: { - ID: containerID, + render.MakeContainerID(containerID): { + ID: render.MakeContainerID(containerID), LabelMajor: containerName, LabelMinor: serverHostID, Rank: "", Pseudo: false, - Origins: report.MakeIDList(containerNodeID, serverEndpointNodeID, serverHostNodeID), Node: report.MakeNode(), ControlNode: containerNodeID, }, diff --git a/render/topologies.go b/render/topologies.go index d01121efc..edc914605 100644 --- a/render/topologies.go +++ b/render/topologies.go @@ -49,7 +49,7 @@ func (r processWithContainerNameRenderer) Render(rpt report.Report) RenderableNo if !ok { continue } - container, ok := containers[containerID] + container, ok := containers[MakeContainerID(containerID)] if !ok { continue } @@ -86,15 +86,10 @@ var ContainerRenderer = MakeReduce( _, isConnected := n.Node.Metadata[IsConnected] return inContainer || isConnected }, - Renderer: ColorConnected(Map{ + Renderer: Map{ MapFunc: MapProcess2Container, - Renderer: ProcessRenderer, - }), - }, - - Map{ - MapFunc: MapContainerIdentity, - Renderer: SelectContainer, + Renderer: ColorConnected(ProcessRenderer), + }, }, // This mapper brings in short lived connections by joining with container IPs. @@ -114,6 +109,11 @@ var ContainerRenderer = MakeReduce( }, ), }), + + Map{ + MapFunc: MapContainerIdentity, + Renderer: SelectContainer, + }, ) type containerWithImageNameRenderer struct { @@ -135,11 +135,11 @@ func (r containerWithImageNameRenderer) Render(rpt report.Report) RenderableNode if !ok { continue } - image, ok := images[imageID] + image, ok := images[MakeContainerImageID(imageID)] if !ok { continue } - c.Rank = imageNameWithoutVersion(image.LabelMajor) + c.Rank = ImageNameWithoutVersion(image.LabelMajor) c.Metadata = image.Metadata.Merge(c.Metadata) containers[id] = c } @@ -191,7 +191,25 @@ var AddressRenderer = Map{ // graph from the host topology and address graph. var HostRenderer = MakeReduce( Map{ - MapFunc: MapAddress2Host, + MapFunc: MapX2Host, + Renderer: Map{ + MapFunc: MapContainerImageIdentity, + Renderer: SelectContainerImage, + }, + }, + Map{ + MapFunc: MapX2Host, + Renderer: FilterPseudo(ContainerRenderer), + }, + Map{ + MapFunc: MapX2Host, + Renderer: Map{ + MapFunc: MapPodIdentity, + Renderer: SelectPod, + }, + }, + Map{ + MapFunc: MapX2Host, Renderer: AddressRenderer, }, Map{ @@ -205,14 +223,14 @@ var HostRenderer = MakeReduce( var PodRenderer = Map{ MapFunc: MapCountContainers, Renderer: MakeReduce( - Map{ - MapFunc: MapPodIdentity, - Renderer: SelectPod, - }, Map{ MapFunc: MapContainer2Pod, Renderer: ContainerRenderer, }, + Map{ + MapFunc: MapPodIdentity, + Renderer: SelectPod, + }, ), } diff --git a/render/topologies_test.go b/render/topologies_test.go index 2a70ce68f..7a74424df 100644 --- a/render/topologies_test.go +++ b/render/topologies_test.go @@ -43,7 +43,7 @@ func TestContainerFilterRenderer(t *testing.T) { input.Container.Nodes[fixture.ClientContainerNodeID].Metadata[docker.LabelPrefix+"works.weave.role"] = "system" have := render.FilterSystem(render.ContainerWithImageNameRenderer).Render(input).Prune() want := expected.RenderedContainers.Copy() - delete(want, fixture.ClientContainerID) + delete(want, expected.ClientContainerRenderedID) if !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } @@ -77,14 +77,14 @@ func TestPodFilterRenderer(t *testing.T) { // tag on containers or pod namespace in the topology and ensure // it is filtered out correctly. input := fixture.Report.Copy() - input.Pod.Nodes[fixture.ClientPodNodeID].Metadata[kubernetes.PodID] = "kube-system/foo" + input.Pod.Nodes[fixture.ClientPodNodeID].Metadata[kubernetes.PodID] = "pod:kube-system/foo" input.Pod.Nodes[fixture.ClientPodNodeID].Metadata[kubernetes.Namespace] = "kube-system" input.Pod.Nodes[fixture.ClientPodNodeID].Metadata[kubernetes.PodName] = "foo" input.Container.Nodes[fixture.ClientContainerNodeID].Metadata[docker.LabelPrefix+"io.kubernetes.pod.name"] = "kube-system/foo" have := render.FilterSystem(render.PodRenderer).Render(input).Prune() want := expected.RenderedPods.Copy() - delete(want, fixture.ClientPodID) - delete(want, fixture.ClientContainerID) + delete(want, expected.ClientPodRenderedID) + delete(want, expected.ClientContainerRenderedID) if !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } diff --git a/report/id.go b/report/id.go index 42b383611..f8401cb5a 100644 --- a/report/id.go +++ b/report/id.go @@ -102,13 +102,18 @@ func MakeHostNodeID(hostID string) string { } // MakeContainerNodeID produces a container node ID from its composite parts. -func MakeContainerNodeID(hostID, containerID string) string { - return hostID + ScopeDelim + containerID +func MakeContainerNodeID(containerID string) string { + return containerID + ScopeDelim + "" +} + +// MakeContainerImageNodeID produces a container image node ID from its composite parts. +func MakeContainerImageNodeID(hostID, containerImageID string) string { + return hostID + ScopeDelim + containerImageID } // MakePodNodeID produces a pod node ID from its composite parts. -func MakePodNodeID(hostID, podID string) string { - return hostID + ScopeDelim + podID +func MakePodNodeID(namespaceID, podID string) string { + return namespaceID + ScopeDelim + podID } // MakeServiceNodeID produces a service node ID from its composite parts. @@ -143,14 +148,13 @@ func ParseEndpointNodeID(endpointNodeID string) (hostID, address, port string, o return fields[0], fields[1], fields[2], true } -// ParseContainerNodeID produces the host and container id from an container -// node ID. -func ParseContainerNodeID(containerNodeID string) (hostID, containerID string, ok bool) { +// ParseContainerNodeID produces the container id from an container node ID. +func ParseContainerNodeID(containerNodeID string) (containerID string, ok bool) { fields := strings.SplitN(containerNodeID, ScopeDelim, 2) - if len(fields) != 2 { - return "", "", false + if len(fields) != 2 || fields[1] != "" { + return "", false } - return fields[0], fields[1], true + return fields[0], true } // ParseAddressNodeID produces the host ID, address from an address node ID. diff --git a/report/id_list.go b/report/id_list.go index a530d3c92..9c740e69b 100644 --- a/report/id_list.go +++ b/report/id_list.go @@ -15,6 +15,11 @@ func (a IDList) Add(ids ...string) IDList { return IDList(StringSet(a).Add(ids...)) } +// Remove is the only correct way to remove IDs from an IDList. +func (a IDList) Remove(ids ...string) IDList { + return IDList(StringSet(a).Remove(ids...)) +} + // Copy returns a copy of the IDList. func (a IDList) Copy() IDList { return IDList(StringSet(a).Copy()) diff --git a/report/merge_test.go b/report/merge_test.go index a97585887..e38d89bd1 100644 --- a/report/merge_test.go +++ b/report/merge_test.go @@ -193,7 +193,30 @@ func TestMergeNodes(t *testing.T) { }), }, }, - "Merge conflict": { + "Merge conflict with rank difference": { + a: report.Nodes{ + ":192.168.1.1:12345": report.MakeNodeWith(map[string]string{ + PID: "23128", + Name: "curl", + Domain: "node-a.local", + }), + }, + b: report.Nodes{ + ":192.168.1.1:12345": report.MakeNodeWith(map[string]string{ // <-- same ID + PID: "0", + Name: "curl", + Domain: "node-a.local", + }), + }, + want: report.Nodes{ + ":192.168.1.1:12345": report.MakeNodeWith(map[string]string{ + PID: "23128", + Name: "curl", + Domain: "node-a.local", + }), + }, + }, + "Merge conflict with no rank difference": { a: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(map[string]string{ PID: "23128", diff --git a/report/metrics.go b/report/metrics.go index a09893925..3c7f893c5 100644 --- a/report/metrics.go +++ b/report/metrics.go @@ -235,7 +235,9 @@ func parseTime(s string) time.Time { return t } -func (m Metric) toIntermediate() WireMetrics { +// ToIntermediate converts the metric to a representation suitable +// for serialization. +func (m Metric) ToIntermediate() WireMetrics { samples := []Sample{} if m.Samples != nil { m.Samples.Reverse().ForEach(func(s interface{}) { @@ -268,7 +270,7 @@ func (m WireMetrics) fromIntermediate() Metric { // MarshalJSON implements json.Marshaller func (m Metric) MarshalJSON() ([]byte, error) { buf := bytes.Buffer{} - in := m.toIntermediate() + in := m.ToIntermediate() err := json.NewEncoder(&buf).Encode(in) return buf.Bytes(), err } @@ -286,7 +288,7 @@ func (m *Metric) UnmarshalJSON(input []byte) error { // GobEncode implements gob.Marshaller func (m Metric) GobEncode() ([]byte, error) { buf := bytes.Buffer{} - err := gob.NewEncoder(&buf).Encode(m.toIntermediate()) + err := gob.NewEncoder(&buf).Encode(m.ToIntermediate()) return buf.Bytes(), err } diff --git a/report/node_set.go b/report/node_set.go new file mode 100644 index 000000000..9a36b864b --- /dev/null +++ b/report/node_set.go @@ -0,0 +1,69 @@ +package report + +import ( + "sort" +) + +// NodeSet is a sorted set of nodes keyed on (Topology, ID). Clients must use +// the Add method to add nodes +type NodeSet []Node + +// MakeNodeSet makes a new NodeSet with the given nodes. +// TODO: Make this more efficient +func MakeNodeSet(nodes ...Node) NodeSet { + if len(nodes) <= 0 { + return nil + } + result := NodeSet{} + for _, node := range nodes { + result = result.Add(node) + } + return result +} + +// Add adds the nodes to the NodeSet. Add is the only valid way to grow a +// NodeSet. Add returns the NodeSet to enable chaining. +func (n NodeSet) Add(nodes ...Node) NodeSet { + for _, node := range nodes { + i := sort.Search(len(n), func(i int) bool { + return n[i].Topology >= node.Topology && n[i].ID >= node.ID + }) + if i < len(n) && n[i].Topology == node.Topology && n[i].ID == node.ID { + // The list already has the element. + continue + } + // It a new element, insert it in order. + n = append(n, Node{}) + copy(n[i+1:], n[i:]) + n[i] = node.Copy() + } + return n +} + +// Merge combines the two NodeSets and returns a new result. +// TODO: Make this more efficient +func (n NodeSet) Merge(other NodeSet) NodeSet { + switch { + case len(other) <= 0: // Optimise special case, to avoid allocating + return n // (note unit test DeepEquals breaks if we don't do this) + case len(n) <= 0: + return other + } + result := n.Copy() + for _, node := range other { + result = result.Add(node) + } + return result +} + +// Copy returns a value copy of the NodeSet. +func (n NodeSet) Copy() NodeSet { + if n == nil { + return n + } + result := make(NodeSet, len(n)) + for i, node := range n { + result[i] = node.Copy() + } + return result +} diff --git a/report/node_set_test.go b/report/node_set_test.go new file mode 100644 index 000000000..518701299 --- /dev/null +++ b/report/node_set_test.go @@ -0,0 +1,152 @@ +package report_test + +import ( + "reflect" + "testing" + + "github.com/weaveworks/scope/report" +) + +type nodeSpec struct { + topology string + id string +} + +func TestMakeNodeSet(t *testing.T) { + for _, testcase := range []struct { + inputs []nodeSpec + wants []nodeSpec + }{ + {inputs: nil, wants: nil}, + {inputs: []nodeSpec{}, wants: []nodeSpec{}}, + { + inputs: []nodeSpec{{"", "a"}}, + wants: []nodeSpec{{"", "a"}}, + }, + { + inputs: []nodeSpec{{"", "a"}, {"", "a"}, {"1", "a"}}, + wants: []nodeSpec{{"", "a"}, {"1", "a"}}, + }, + { + inputs: []nodeSpec{{"", "b"}, {"", "c"}, {"", "a"}}, + wants: []nodeSpec{{"", "a"}, {"", "b"}, {"", "c"}}, + }, + { + inputs: []nodeSpec{{"2", "a"}, {"3", "a"}, {"1", "a"}}, + wants: []nodeSpec{{"1", "a"}, {"2", "a"}, {"3", "a"}}, + }, + } { + var ( + inputs []report.Node + wants []report.Node + ) + for _, spec := range testcase.inputs { + inputs = append(inputs, report.MakeNode().WithTopology(spec.topology).WithID(spec.id)) + } + for _, spec := range testcase.wants { + wants = append(wants, report.MakeNode().WithTopology(spec.topology).WithID(spec.id)) + } + if want, have := report.NodeSet(wants), report.MakeNodeSet(inputs...); !reflect.DeepEqual(want, have) { + t.Errorf("%#v: want %#v, have %#v", testcase.inputs, want, have) + } + } +} + +func TestNodeSetAdd(t *testing.T) { + for _, testcase := range []struct { + input report.NodeSet + nodes []report.Node + want report.NodeSet + }{ + {input: report.NodeSet(nil), nodes: []report.Node{}, want: report.NodeSet(nil)}, + { + input: report.MakeNodeSet(), + nodes: []report.Node{}, + want: report.MakeNodeSet(), + }, + { + input: report.MakeNodeSet(report.MakeNode().WithID("a")), + nodes: []report.Node{}, + want: report.MakeNodeSet(report.MakeNode().WithID("a")), + }, + { + input: report.MakeNodeSet(), + nodes: []report.Node{report.MakeNode().WithID("a")}, + want: report.MakeNodeSet(report.MakeNode().WithID("a")), + }, + { + input: report.MakeNodeSet(report.MakeNode().WithID("a")), + nodes: []report.Node{report.MakeNode().WithID("a")}, + want: report.MakeNodeSet(report.MakeNode().WithID("a")), + }, + { + input: report.MakeNodeSet(report.MakeNode().WithID("b")), + nodes: []report.Node{report.MakeNode().WithID("a"), report.MakeNode().WithID("b")}, + want: report.MakeNodeSet(report.MakeNode().WithID("a"), report.MakeNode().WithID("b")), + }, + { + input: report.MakeNodeSet(report.MakeNode().WithID("a")), + nodes: []report.Node{report.MakeNode().WithID("c"), report.MakeNode().WithID("b")}, + want: report.MakeNodeSet(report.MakeNode().WithID("a"), report.MakeNode().WithID("b"), report.MakeNode().WithID("c")), + }, + { + input: report.MakeNodeSet(report.MakeNode().WithID("a"), report.MakeNode().WithID("c")), + nodes: []report.Node{report.MakeNode().WithID("b"), report.MakeNode().WithID("b"), report.MakeNode().WithID("b")}, + want: report.MakeNodeSet(report.MakeNode().WithID("a"), report.MakeNode().WithID("b"), report.MakeNode().WithID("c")), + }, + } { + if want, have := testcase.want, testcase.input.Add(testcase.nodes...); !reflect.DeepEqual(want, have) { + t.Errorf("%v + %v: want %v, have %v", testcase.input, testcase.nodes, want, have) + } + } +} + +func TestNodeSetMerge(t *testing.T) { + for _, testcase := range []struct { + input report.NodeSet + other report.NodeSet + want report.NodeSet + }{ + {input: report.NodeSet(nil), other: report.NodeSet(nil), want: report.NodeSet(nil)}, + {input: report.MakeNodeSet(), other: report.MakeNodeSet(), want: report.MakeNodeSet()}, + { + input: report.MakeNodeSet(report.MakeNode().WithID("a")), + other: report.MakeNodeSet(), + want: report.MakeNodeSet(report.MakeNode().WithID("a")), + }, + { + input: report.MakeNodeSet(), + other: report.MakeNodeSet(report.MakeNode().WithID("a")), + want: report.MakeNodeSet(report.MakeNode().WithID("a")), + }, + { + input: report.MakeNodeSet(report.MakeNode().WithID("a")), + other: report.MakeNodeSet(report.MakeNode().WithID("b")), + want: report.MakeNodeSet(report.MakeNode().WithID("a"), report.MakeNode().WithID("b")), + }, + { + input: report.MakeNodeSet(report.MakeNode().WithID("b")), + other: report.MakeNodeSet(report.MakeNode().WithID("a")), + want: report.MakeNodeSet(report.MakeNode().WithID("a"), report.MakeNode().WithID("b")), + }, + { + input: report.MakeNodeSet(report.MakeNode().WithID("a")), + other: report.MakeNodeSet(report.MakeNode().WithID("a")), + want: report.MakeNodeSet(report.MakeNode().WithID("a")), + }, + { + input: report.MakeNodeSet(report.MakeNode().WithID("a"), report.MakeNode().WithID("c")), + other: report.MakeNodeSet(report.MakeNode().WithID("a"), report.MakeNode().WithID("b")), + want: report.MakeNodeSet(report.MakeNode().WithID("a"), report.MakeNode().WithID("b"), report.MakeNode().WithID("c")), + }, + { + input: report.MakeNodeSet(report.MakeNode().WithID("b")), + other: report.MakeNodeSet(report.MakeNode().WithID("a")), + want: report.MakeNodeSet(report.MakeNode().WithID("a"), report.MakeNode().WithID("b")), + }, + } { + if want, have := testcase.want, testcase.input.Merge(testcase.other); !reflect.DeepEqual(want, have) { + t.Errorf("%v + %v: want %v, have %v", testcase.input, testcase.other, want, have) + } + } +} diff --git a/report/topology.go b/report/topology.go index 567ae2c41..dc2dd1767 100644 --- a/report/topology.go +++ b/report/topology.go @@ -84,6 +84,8 @@ func (n Nodes) Merge(other Nodes) Nodes { // given node in a given topology, along with the edges emanating from the // node and metadata about those edges. type Node struct { + ID string `json:"id,omitempty"` + Topology string `json:"topology,omitempty"` Metadata Metadata `json:"metadata,omitempty"` Counters Counters `json:"counters,omitempty"` Sets Sets `json:"sets,omitempty"` @@ -92,6 +94,7 @@ type Node struct { Controls NodeControls `json:"controls,omitempty"` Latest LatestMap `json:"latest,omitempty"` Metrics Metrics `json:"metrics,omitempty"` + Parents Sets `json:"parents,omitempty"` } // MakeNode creates a new Node with no initial metadata. @@ -99,12 +102,12 @@ func MakeNode() Node { return Node{ Metadata: Metadata{}, Counters: Counters{}, - Sets: Sets{}, Adjacency: MakeIDList(), Edges: EdgeMetadatas{}, Controls: MakeNodeControls(), Latest: MakeLatestMap(), Metrics: Metrics{}, + Parents: Sets{}, } } @@ -113,6 +116,20 @@ func MakeNodeWith(m map[string]string) Node { return MakeNode().WithMetadata(m) } +// WithID returns a fresh copy of n, with ID changed. +func (n Node) WithID(id string) Node { + result := n.Copy() + result.ID = id + return result +} + +// WithTopology returns a fresh copy of n, with ID changed. +func (n Node) WithTopology(topology string) Node { + result := n.Copy() + result.Topology = topology + return result +} + // WithMetadata returns a fresh copy of n, with Metadata m merged in. func (n Node) WithMetadata(m map[string]string) Node { result := n.Copy() @@ -130,8 +147,7 @@ func (n Node) WithCounters(c map[string]int) Node { // WithSet returns a fresh copy of n, with set merged in at key. func (n Node) WithSet(key string, set StringSet) Node { result := n.Copy() - existing := n.Sets[key] - result.Sets[key] = existing.Merge(set) + result.Sets = result.Sets.Merge(Sets{key: set}) return result } @@ -186,9 +202,18 @@ func (n Node) WithLatest(k string, ts time.Time, v string) Node { return result } +// WithParents returns a fresh copy of n, with sets merged in. +func (n Node) WithParents(parents Sets) Node { + result := n.Copy() + result.Parents = result.Parents.Merge(parents) + return result +} + // Copy returns a value copy of the Node. func (n Node) Copy() Node { cp := MakeNode() + cp.ID = n.ID + cp.Topology = n.Topology cp.Metadata = n.Metadata.Copy() cp.Counters = n.Counters.Copy() cp.Sets = n.Sets.Copy() @@ -197,6 +222,7 @@ func (n Node) Copy() Node { cp.Controls = n.Controls.Copy() cp.Latest = n.Latest.Copy() cp.Metrics = n.Metrics.Copy() + cp.Parents = n.Parents.Copy() return cp } @@ -204,6 +230,12 @@ func (n Node) Copy() Node { // fresh node. func (n Node) Merge(other Node) Node { cp := n.Copy() + if cp.ID == "" { + cp.ID = other.ID + } + if cp.Topology == "" { + cp.Topology = other.Topology + } cp.Metadata = cp.Metadata.Merge(other.Metadata) cp.Counters = cp.Counters.Merge(other.Counters) cp.Sets = cp.Sets.Merge(other.Sets) @@ -212,6 +244,7 @@ func (n Node) Merge(other Node) Node { cp.Controls = cp.Controls.Merge(other.Controls) cp.Latest = cp.Latest.Merge(other.Latest) cp.Metrics = cp.Metrics.Merge(other.Metrics) + cp.Parents = cp.Parents.Merge(other.Parents) return cp } @@ -268,6 +301,9 @@ type Sets map[string]StringSet func (s Sets) Merge(other Sets) Sets { result := s.Copy() for k, v := range other { + if result == nil { + result = Sets{} + } result[k] = result[k].Merge(v) } return result @@ -275,6 +311,9 @@ func (s Sets) Merge(other Sets) Sets { // Copy returns a value copy of the sets map. func (s Sets) Copy() Sets { + if s == nil { + return s + } result := Sets{} for k, v := range s { result[k] = v.Copy() @@ -321,6 +360,21 @@ func (s StringSet) Add(strs ...string) StringSet { return s } +// Remove removes the strings from the StringSet. Remove is the only valid way +// to shrink a StringSet. Remove returns the StringSet to enable chaining. +func (s StringSet) Remove(strs ...string) StringSet { + for _, str := range strs { + i := sort.Search(len(s), func(i int) bool { return s[i] >= str }) + if i >= len(s) || s[i] != str { + // The list does not have the element. + continue + } + // has the element, remove it. + s = append(s[:i], s[i+1:]...) + } + return s +} + // Merge combines the two StringSets and returns a new result. func (s StringSet) Merge(other StringSet) StringSet { switch { diff --git a/report/topology_test.go b/report/topology_test.go index c965f9b7d..72beecc5d 100644 --- a/report/topology_test.go +++ b/report/topology_test.go @@ -45,6 +45,27 @@ func TestStringSetAdd(t *testing.T) { } } +func TestStringSetRemove(t *testing.T) { + for _, testcase := range []struct { + input report.StringSet + strs []string + want report.StringSet + }{ + {input: report.StringSet(nil), strs: []string{}, want: report.StringSet(nil)}, + {input: report.MakeStringSet(), strs: []string{}, want: report.MakeStringSet()}, + {input: report.MakeStringSet("a"), strs: []string{}, want: report.MakeStringSet("a")}, + {input: report.MakeStringSet(), strs: []string{"a"}, want: report.MakeStringSet()}, + {input: report.MakeStringSet("a"), strs: []string{"a"}, want: report.StringSet{}}, + {input: report.MakeStringSet("b"), strs: []string{"a", "b"}, want: report.StringSet{}}, + {input: report.MakeStringSet("a"), strs: []string{"c", "b"}, want: report.MakeStringSet("a")}, + {input: report.MakeStringSet("a", "c"), strs: []string{"b", "b", "b"}, want: report.MakeStringSet("a", "c")}, + } { + if want, have := testcase.want, testcase.input.Remove(testcase.strs...); !reflect.DeepEqual(want, have) { + t.Errorf("%v - %v: want %#v, have %#v", testcase.input, testcase.strs, want, have) + } + } +} + func TestStringSetMerge(t *testing.T) { for _, testcase := range []struct { input report.StringSet diff --git a/test/fixture/report_fixture.go b/test/fixture/report_fixture.go index bf4f2eeed..fa9785a5e 100644 --- a/test/fixture/report_fixture.go +++ b/test/fixture/report_fixture.go @@ -74,13 +74,13 @@ var ( ClientContainerID = "a1b2c3d4e5" ServerContainerID = "5e4d3c2b1a" - ClientContainerNodeID = report.MakeContainerNodeID(ClientHostID, ClientContainerID) - ServerContainerNodeID = report.MakeContainerNodeID(ServerHostID, ServerContainerID) + ClientContainerNodeID = report.MakeContainerNodeID(ClientContainerID) + ServerContainerNodeID = report.MakeContainerNodeID(ServerContainerID) ClientContainerImageID = "imageid123" ServerContainerImageID = "imageid456" - ClientContainerImageNodeID = report.MakeContainerNodeID(ClientHostID, ClientContainerImageID) - ServerContainerImageNodeID = report.MakeContainerNodeID(ServerHostID, ServerContainerImageID) + ClientContainerImageNodeID = report.MakeContainerImageNodeID(ClientHostID, ClientContainerImageID) + ServerContainerImageNodeID = report.MakeContainerImageNodeID(ServerHostID, ServerContainerImageID) ClientContainerImageName = "image/client" ServerContainerImageName = "image/server" @@ -91,12 +91,13 @@ var ( UnknownAddress3NodeID = report.MakeAddressNodeID(ServerHostID, UnknownClient3IP) RandomAddressNodeID = report.MakeAddressNodeID(ServerHostID, RandomClientIP) // this should become an internet node - ClientPodID = "ping/pong-a" - ServerPodID = "ping/pong-b" - ClientPodNodeID = report.MakePodNodeID("ping", "pong-a") - ServerPodNodeID = report.MakePodNodeID("ping", "pong-b") - ServiceID = "ping/pongservice" - ServiceNodeID = report.MakeServiceNodeID("ping", "pongservice") + KubernetesNamespace = "ping" + ClientPodID = "ping/pong-a" + ServerPodID = "ping/pong-b" + ClientPodNodeID = report.MakePodNodeID(KubernetesNamespace, "pong-a") + ServerPodNodeID = report.MakePodNodeID(KubernetesNamespace, "pong-b") + ServiceID = "ping/pongservice" + ServiceNodeID = report.MakeServiceNodeID(KubernetesNamespace, "pongservice") LoadMetric = report.MakeMetric().Add(Now, 0.01).WithFirst(Now.Add(-15 * time.Second)) LoadMetrics = report.Metrics{ @@ -105,6 +106,10 @@ var ( host.Load15: LoadMetric, } + CPUMetric = report.MakeMetric().Add(Now, 0.01).WithFirst(Now.Add(-15 * time.Second)) + + MemoryMetric = report.MakeMetric().Add(Now, 0.01).WithFirst(Now.Add(-15 * time.Second)) + Report = report.Report{ Endpoint: report.Topology{ Nodes: report.Nodes{ @@ -200,23 +205,40 @@ var ( process.Name: Client1Name, docker.ContainerID: ClientContainerID, report.HostNodeID: ClientHostNodeID, + }).WithID(ClientProcess1NodeID).WithTopology("process").WithParents(report.Sets{ + "host": report.MakeStringSet(ClientHostNodeID), + "container": report.MakeStringSet(ClientContainerNodeID), + "container_image": report.MakeStringSet(ClientContainerImageNodeID), + }).WithMetrics(report.Metrics{ + process.CPUUsage: CPUMetric, + process.MemoryUsage: MemoryMetric, }), ClientProcess2NodeID: report.MakeNodeWith(map[string]string{ process.PID: Client2PID, process.Name: Client2Name, docker.ContainerID: ClientContainerID, report.HostNodeID: ClientHostNodeID, + }).WithID(ClientProcess2NodeID).WithTopology("process").WithParents(report.Sets{ + "host": report.MakeStringSet(ClientHostNodeID), + "container": report.MakeStringSet(ClientContainerNodeID), + "container_image": report.MakeStringSet(ClientContainerImageNodeID), }), ServerProcessNodeID: report.MakeNodeWith(map[string]string{ process.PID: ServerPID, process.Name: ServerName, docker.ContainerID: ServerContainerID, report.HostNodeID: ServerHostNodeID, + }).WithID(ServerProcessNodeID).WithTopology("process").WithParents(report.Sets{ + "host": report.MakeStringSet(ServerHostNodeID), + "container": report.MakeStringSet(ServerContainerNodeID), + "container_image": report.MakeStringSet(ServerContainerImageNodeID), }), NonContainerProcessNodeID: report.MakeNodeWith(map[string]string{ process.PID: NonContainerPID, process.Name: NonContainerName, report.HostNodeID: ServerHostNodeID, + }).WithID(NonContainerProcessNodeID).WithTopology("process").WithParents(report.Sets{ + "host": report.MakeStringSet(ServerHostNodeID), }), }, }, @@ -228,17 +250,36 @@ var ( docker.ImageID: ClientContainerImageID, report.HostNodeID: ClientHostNodeID, docker.LabelPrefix + "io.kubernetes.pod.name": ClientPodID, - }).WithLatest(docker.ContainerState, Now, docker.StateRunning), + kubernetes.PodID: ClientPodID, + kubernetes.Namespace: KubernetesNamespace, + }).WithLatest(docker.ContainerState, Now, docker.StateRunning).WithID(ClientContainerNodeID).WithTopology("container").WithParents(report.Sets{ + "host": report.MakeStringSet(ClientHostNodeID), + "container_image": report.MakeStringSet(ClientContainerImageNodeID), + "pod": report.MakeStringSet(ClientPodID), + }).WithMetrics(report.Metrics{ + docker.CPUTotalUsage: CPUMetric, + docker.MemoryUsage: MemoryMetric, + }), ServerContainerNodeID: report.MakeNodeWith(map[string]string{ docker.ContainerID: ServerContainerID, docker.ContainerName: "task-name-5-server-aceb93e2f2b797caba01", + docker.ContainerState: "running", docker.ImageID: ServerContainerImageID, report.HostNodeID: ServerHostNodeID, docker.LabelPrefix + render.AmazonECSContainerNameLabel: "server", docker.LabelPrefix + "foo1": "bar1", docker.LabelPrefix + "foo2": "bar2", docker.LabelPrefix + "io.kubernetes.pod.name": ServerPodID, - }).WithLatest(docker.ContainerState, Now, docker.StateRunning), + kubernetes.PodID: ServerPodID, + kubernetes.Namespace: KubernetesNamespace, + }).WithLatest(docker.ContainerState, Now, docker.StateRunning).WithID(ServerContainerNodeID).WithTopology("container").WithParents(report.Sets{ + "host": report.MakeStringSet(ServerHostNodeID), + "container_image": report.MakeStringSet(ServerContainerImageNodeID), + "pod": report.MakeStringSet(ServerPodID), + }).WithMetrics(report.Metrics{ + docker.CPUTotalUsage: CPUMetric, + docker.MemoryUsage: MemoryMetric, + }), }, }, ContainerImage: report.Topology{ @@ -247,14 +288,18 @@ var ( docker.ImageID: ClientContainerImageID, docker.ImageName: ClientContainerImageName, report.HostNodeID: ClientHostNodeID, - }), + }).WithParents(report.Sets{ + "host": report.MakeStringSet(ClientHostNodeID), + }).WithID(ClientContainerImageNodeID).WithTopology("container_image"), ServerContainerImageNodeID: report.MakeNodeWith(map[string]string{ docker.ImageID: ServerContainerImageID, docker.ImageName: ServerContainerImageName, report.HostNodeID: ServerHostNodeID, docker.LabelPrefix + "foo1": "bar1", docker.LabelPrefix + "foo2": "bar2", - }), + }).WithParents(report.Sets{ + "host": report.MakeStringSet(ServerHostNodeID), + }).WithID(ServerContainerImageNodeID).WithTopology("container_image"), }, }, Address: report.Topology{ @@ -294,23 +339,27 @@ var ( "host_name": ClientHostName, "os": "Linux", report.HostNodeID: ClientHostNodeID, - }).WithSets(report.Sets{ + }).WithID(ClientHostNodeID).WithTopology("host").WithSets(report.Sets{ host.LocalNetworks: report.MakeStringSet("10.10.10.0/24"), }).WithMetrics(report.Metrics{ - host.Load1: LoadMetric, - host.Load5: LoadMetric, - host.Load15: LoadMetric, + host.CPUUsage: CPUMetric, + host.MemUsage: MemoryMetric, + host.Load1: LoadMetric, + host.Load5: LoadMetric, + host.Load15: LoadMetric, }), ServerHostNodeID: report.MakeNodeWith(map[string]string{ "host_name": ServerHostName, "os": "Linux", report.HostNodeID: ServerHostNodeID, - }).WithSets(report.Sets{ + }).WithID(ServerHostNodeID).WithTopology("host").WithSets(report.Sets{ host.LocalNetworks: report.MakeStringSet("10.10.10.0/24"), }).WithMetrics(report.Metrics{ - host.Load1: LoadMetric, - host.Load5: LoadMetric, - host.Load15: LoadMetric, + host.CPUUsage: CPUMetric, + host.MemUsage: MemoryMetric, + host.Load1: LoadMetric, + host.Load5: LoadMetric, + host.Load15: LoadMetric, }), }, }, @@ -319,16 +368,22 @@ var ( ClientPodNodeID: report.MakeNodeWith(map[string]string{ kubernetes.PodID: ClientPodID, kubernetes.PodName: "pong-a", - kubernetes.Namespace: "ping", + kubernetes.Namespace: KubernetesNamespace, kubernetes.PodContainerIDs: ClientContainerID, kubernetes.ServiceIDs: ServiceID, + }).WithID(ClientPodNodeID).WithTopology("pod").WithParents(report.Sets{ + "host": report.MakeStringSet(ClientHostNodeID), + "service": report.MakeStringSet(ServiceID), }), ServerPodNodeID: report.MakeNodeWith(map[string]string{ kubernetes.PodID: ServerPodID, kubernetes.PodName: "pong-b", - kubernetes.Namespace: "ping", + kubernetes.Namespace: KubernetesNamespace, kubernetes.PodContainerIDs: ServerContainerID, kubernetes.ServiceIDs: ServiceID, + }).WithID(ServerPodNodeID).WithTopology("pod").WithParents(report.Sets{ + "host": report.MakeStringSet(ServerHostNodeID), + "service": report.MakeStringSet(ServiceID), }), }, }, @@ -338,7 +393,7 @@ var ( kubernetes.ServiceID: ServiceID, kubernetes.ServiceName: "pongservice", kubernetes.Namespace: "ping", - }), + }).WithID(ServiceNodeID).WithTopology("service"), }, }, Sampling: report.Sampling{