Merge pull request #1360 from weaveworks/render-reorg

Reorganise the render/ package
This commit is contained in:
Paul Bellamy
2016-04-22 11:51:38 +01:00
18 changed files with 1003 additions and 947 deletions

View File

@@ -13,6 +13,7 @@ import (
"github.com/weaveworks/scope/common/xfer"
"github.com/weaveworks/scope/probe/appclient"
"github.com/weaveworks/scope/report"
"github.com/weaveworks/scope/test/fixture"
)
func main() {
@@ -21,22 +22,27 @@ func main() {
publishInterval = flag.Duration("publish.interval", 1*time.Second, "publish (output) interval")
publishToken = flag.String("publish.token", "fixprobe", "publish token, for if we are talking to the service")
publishID = flag.String("publish.id", "fixprobe", "publisher ID used to identify publishers")
useFixture = flag.Bool("fixture", false, "Use the embedded fixture report.")
)
flag.Parse()
if len(flag.Args()) != 1 {
if len(flag.Args()) != 1 && !*useFixture {
log.Fatal("usage: fixprobe [--args] report.json")
}
b, err := ioutil.ReadFile(flag.Arg(0))
if err != nil {
log.Fatal(err)
}
var fixedReport report.Report
decoder := codec.NewDecoderBytes(b, &codec.JsonHandle{})
if err := decoder.Decode(&fixedReport); err != nil {
log.Fatal(err)
if *useFixture {
fixedReport = fixture.Report
} else {
b, err := ioutil.ReadFile(flag.Arg(0))
if err != nil {
log.Fatal(err)
}
decoder := codec.NewDecoderBytes(b, &codec.JsonHandle{})
if err := decoder.Decode(&fixedReport); err != nil {
log.Fatal(err)
}
}
client, err := appclient.NewAppClient(appclient.ProbeConfig{

366
render/container.go Normal file
View File

@@ -0,0 +1,366 @@
package render
import (
"fmt"
"net"
"regexp"
"strings"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/endpoint"
"github.com/weaveworks/scope/probe/host"
"github.com/weaveworks/scope/report"
)
// Constants are used in the tests.
const (
UncontainedID = "uncontained"
UncontainedMajor = "Uncontained"
// Topology for IPs so we can differentiate them at the end
IP = "IP"
)
// ContainerRenderer is a Renderer which produces a renderable container
// graph by merging the process graph and the container topology.
// NB We only want processes in container _or_ processes with network connections
// but we need to be careful to ensure we only include each edge once, by only
// including the ProcessRenderer once.
var ContainerRenderer = MakeSilentFilter(
func(n report.Node) bool {
// Drop deleted containers
state, ok := n.Latest.Lookup(docker.ContainerState)
return !ok || state != docker.StateDeleted
},
MakeReduce(
MakeSilentFilter(
func(n report.Node) bool {
// Drop unconnected pseudo nodes (could appear due to filtering)
_, isConnected := n.Latest.Lookup(IsConnected)
return n.Topology != Pseudo || isConnected
},
MakeMap(
MapProcess2Container,
ColorConnected(ProcessRenderer),
),
),
// This mapper brings in short lived connections by joining with container IPs.
// We need to be careful to ensure we only include each edge once. Edges brought in
// by the above renders will have a pid, so its enough to filter out any nodes with
// pids.
SilentFilterUnconnected(MakeMap(
MapIP2Container,
MakeReduce(
MakeMap(
MapContainer2IP,
SelectContainer,
),
MakeMap(
MapEndpoint2IP,
SelectEndpoint,
),
),
)),
SelectContainer,
),
)
type containerWithHostIPsRenderer struct {
Renderer
}
// Render produces a process graph where the ips for host network mode are set
// to the host's IPs.
func (r containerWithHostIPsRenderer) Render(rpt report.Report) report.Nodes {
containers := r.Renderer.Render(rpt)
hosts := SelectHost.Render(rpt)
outputs := report.Nodes{}
for id, c := range containers {
outputs[id] = c
networkMode, ok := c.Latest.Lookup(docker.ContainerNetworkMode)
if !ok || networkMode != docker.NetworkModeHost {
continue
}
h, ok := hosts[report.MakeHostNodeID(report.ExtractHostID(c))]
if !ok {
continue
}
newIPs := report.MakeStringSet()
hostNetworks, _ := h.Sets.Lookup(host.LocalNetworks)
for _, cidr := range hostNetworks {
if ip, _, err := net.ParseCIDR(cidr); err == nil {
newIPs = newIPs.Add(ip.String())
}
}
output := c.Copy()
output.Sets = c.Sets.Add(docker.ContainerIPs, newIPs)
outputs[id] = output
}
return outputs
}
// ContainerWithHostIPsRenderer is a Renderer which produces a container graph
// enriched with host IPs on containers where NetworkMode is Host
var ContainerWithHostIPsRenderer = containerWithHostIPsRenderer{ContainerRenderer}
type containerWithImageNameRenderer struct {
Renderer
}
// Render produces a process graph where the minor labels contain the
// container name, if found. It also merges the image node metadata into the
// container metadata.
func (r containerWithImageNameRenderer) Render(rpt report.Report) report.Nodes {
containers := r.Renderer.Render(rpt)
images := SelectContainerImage.Render(rpt)
outputs := report.Nodes{}
for id, c := range containers {
outputs[id] = c
imageID, ok := c.Latest.Lookup(docker.ImageID)
if !ok {
continue
}
image, ok := images[report.MakeContainerImageNodeID(imageID)]
if !ok {
continue
}
output := c.Copy()
output.Latest = image.Latest.Merge(c.Latest)
outputs[id] = output
}
return outputs
}
// ContainerWithImageNameRenderer is a Renderer which produces a container
// graph where the ranks are the image names, not their IDs
var ContainerWithImageNameRenderer = containerWithImageNameRenderer{ContainerWithHostIPsRenderer}
// ContainerImageRenderer is a Renderer which produces a renderable container
// image graph by merging the container graph and the container image topology.
var ContainerImageRenderer = MakeReduce(
MakeMap(
MapContainer2ContainerImage,
ContainerRenderer,
),
SelectContainerImage,
)
// ContainerHostnameRenderer is a Renderer which produces a renderable container
// by hostname graph..
var ContainerHostnameRenderer = MakeMap(
MapContainer2Hostname,
ContainerRenderer,
)
var portMappingMatch = regexp.MustCompile(`([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}):([0-9]+)->([0-9]+)/tcp`)
// MapEndpoint2IP maps endpoint nodes to their IP address, for joining
// with container nodes. We drop endpoint nodes with pids, as they
// will be joined to containers through the process topology, and we
// don't want to double count edges.
func MapEndpoint2IP(m report.Node, local report.Networks) report.Nodes {
// Don't include procspied connections, to prevent double counting
_, ok := m.Latest.Lookup(endpoint.Procspied)
if ok {
return report.Nodes{}
}
scope, addr, port, ok := report.ParseEndpointNodeID(m.ID)
if !ok {
return report.Nodes{}
}
if ip := net.ParseIP(addr); ip != nil && !local.Contains(ip) {
return report.Nodes{TheInternetID: theInternetNode(m)}
}
// We don't always know what port a container is listening on, and
// container-to-container communications can be unambiguously identified
// without ports. OTOH, connections to the host IPs which have been port
// mapped to a container can only be unambiguously identified with the port.
// So we need to emit two nodes, for two different cases.
id := report.MakeScopedEndpointNodeID(scope, addr, "")
idWithPort := report.MakeScopedEndpointNodeID(scope, addr, port)
return report.Nodes{
id: NewDerivedNode(id, m).WithTopology(IP),
idWithPort: NewDerivedNode(idWithPort, m).WithTopology(IP),
}
}
// MapContainer2IP maps container nodes to their IP addresses (outputs
// multiple nodes). This allows container to be joined directly with
// the endpoint topology.
func MapContainer2IP(m report.Node, _ report.Networks) report.Nodes {
containerID, ok := m.Latest.Lookup(docker.ContainerID)
if !ok {
return report.Nodes{}
}
result := report.Nodes{}
if addrs, ok := m.Sets.Lookup(docker.ContainerIPsWithScopes); ok {
for _, addr := range addrs {
scope, addr, ok := report.ParseAddressNodeID(addr)
if !ok {
continue
}
id := report.MakeScopedEndpointNodeID(scope, addr, "")
result[id] = NewDerivedNode(id, m).
WithTopology(IP).
WithLatests(map[string]string{docker.ContainerID: containerID}).
WithCounters(map[string]int{IP: 1})
}
}
// Also output all the host:port port mappings (see above comment).
// In this case we assume this doesn't need a scope, as they are for host IPs.
ports, _ := m.Sets.Lookup(docker.ContainerPorts)
for _, portMapping := range ports {
if mapping := portMappingMatch.FindStringSubmatch(portMapping); mapping != nil {
ip, port := mapping[1], mapping[2]
id := report.MakeScopedEndpointNodeID("", ip, port)
result[id] = NewDerivedNode(id, m).
WithTopology(IP).
WithLatests(map[string]string{docker.ContainerID: containerID}).
WithCounters(map[string]int{IP: 1})
}
}
return result
}
// MapIP2Container maps IP nodes produced from MapContainer2IP back to
// container nodes. If there is more than one container with a given
// IP, it is dropped.
func MapIP2Container(n report.Node, _ report.Networks) report.Nodes {
// If an IP is shared between multiple containers, we can't
// reliably attribute an connection based on its IP
if count, _ := n.Counters.Lookup(IP); count > 1 {
return report.Nodes{}
}
// Propagate the internet pseudo node
if strings.HasSuffix(n.ID, TheInternetID) {
return report.Nodes{n.ID: n}
}
// If this node is not a container, exclude it.
// This excludes all the nodes we've dragged in from endpoint
// that we failed to join to a container.
containerID, ok := n.Latest.Lookup(docker.ContainerID)
if !ok {
return report.Nodes{}
}
id := report.MakeContainerNodeID(containerID)
return report.Nodes{
id: NewDerivedNode(id, n).
WithTopology(report.Container),
}
}
// MapProcess2Container maps process Nodes to container
// Nodes.
//
// If this function is given a node without a docker_container_id
// (including other pseudo nodes), it will produce an "Uncontained"
// pseudo node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a container graph to get that info.
func MapProcess2Container(n report.Node, _ report.Networks) report.Nodes {
// Propagate pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Otherwise, if the process is not in a container, group it
// into an per-host "Uncontained" node. If for whatever reason
// this node doesn't have a host id in their nodemetadata, it'll
// all get grouped into a single uncontained node.
var (
id string
node report.Node
)
if containerID, ok := n.Latest.Lookup(docker.ContainerID); ok {
id = report.MakeContainerNodeID(containerID)
node = NewDerivedNode(id, n).WithTopology(report.Container)
} else {
id = MakePseudoNodeID(UncontainedID, report.ExtractHostID(n))
node = NewDerivedPseudoNode(id, n)
}
return report.Nodes{id: node}
}
// MapContainer2ContainerImage maps container Nodes to container
// image Nodes.
//
// If this function is given a node without a docker_image_id
// (including other pseudo nodes), it will produce an "Uncontained"
// pseudo node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a container graph to get that info.
func MapContainer2ContainerImage(n report.Node, _ report.Networks) report.Nodes {
// Propagate all pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Otherwise, if some some reason the container doesn't have a image_id
// (maybe slightly out of sync reports), just drop it
imageID, timestamp, ok := n.Latest.LookupEntry(docker.ImageID)
if !ok {
return report.Nodes{}
}
// Add container id key to the counters, which will later be counted to produce the minor label
id := report.MakeContainerImageNodeID(imageID)
result := NewDerivedNode(id, n).WithTopology(report.ContainerImage)
result.Latest = result.Latest.Set(docker.ImageID, timestamp, imageID)
result.Counters = result.Counters.Add(n.Topology, 1)
return report.Nodes{id: result}
}
// MapContainer2Hostname maps container Nodes to 'hostname' renderabled nodes..
func MapContainer2Hostname(n report.Node, _ report.Networks) report.Nodes {
// Propagate all pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Otherwise, if some some reason the container doesn't have a hostname
// (maybe slightly out of sync reports), just drop it
id, timestamp, ok := n.Latest.LookupEntry(docker.ContainerHostname)
if !ok {
return report.Nodes{}
}
node := NewDerivedNode(id, n).WithTopology(MakeGroupNodeTopology(n.Topology, docker.ContainerHostname))
node.Latest = node.Latest.
Set(docker.ContainerHostname, timestamp, id).
Delete(docker.ContainerName) // TODO(paulbellamy): total hack to render these by hostname instead.
node.Counters = node.Counters.Add(n.Topology, 1)
return report.Nodes{id: node}
}
// ImageNameWithoutVersion splits the image name apart, returning the name
// without the version, if possible
func ImageNameWithoutVersion(name string) string {
parts := strings.SplitN(name, "/", 3)
if len(parts) == 3 {
name = fmt.Sprintf("%s/%s", parts[1], parts[2])
}
parts = strings.SplitN(name, ":", 2)
return parts[0]
}

100
render/container_test.go Normal file
View File

@@ -0,0 +1,100 @@
package render_test
import (
"fmt"
"net"
"testing"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/process"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/render/expected"
"github.com/weaveworks/scope/report"
"github.com/weaveworks/scope/test"
"github.com/weaveworks/scope/test/fixture"
"github.com/weaveworks/scope/test/reflect"
)
func TestMapProcess2Container(t *testing.T) {
for _, input := range []testcase{
{"empty", report.MakeNode("empty"), true},
{"basic process", report.MakeNodeWith("basic", map[string]string{process.PID: "201", docker.ContainerID: "a1b2c3"}), true},
{"uncontained", report.MakeNodeWith("uncontained", map[string]string{process.PID: "201", report.HostNodeID: report.MakeHostNodeID("foo")}), true},
} {
testMap(t, render.MapProcess2Container, input)
}
}
type testcase struct {
name string
n report.Node
ok bool
}
func testMap(t *testing.T, f render.MapFunc, input testcase) {
_, ipNet, err := net.ParseCIDR("1.2.3.0/16")
if err != nil {
t.Fatalf(err.Error())
}
localNetworks := report.Networks([]*net.IPNet{ipNet})
if have := f(input.n, localNetworks); input.ok != (len(have) > 0) {
name := input.name
if name == "" {
name = fmt.Sprintf("%v", input.n)
}
t.Errorf("%s: want %v, have %v", name, input.ok, have)
}
}
func TestContainerRenderer(t *testing.T) {
have := render.ContainerRenderer.Render(fixture.Report).Prune()
want := expected.RenderedContainers.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestContainerFilterRenderer(t *testing.T) {
// tag on of the containers in the topology and ensure
// it is filtered out correctly.
input := fixture.Report.Copy()
input.Container.Nodes[fixture.ClientContainerNodeID] = input.Container.Nodes[fixture.ClientContainerNodeID].WithLatests(map[string]string{
docker.LabelPrefix + "works.weave.role": "system",
})
have := render.FilterSystem(render.ContainerRenderer).Render(input).Prune()
want := expected.RenderedContainers.Copy().Prune()
delete(want, fixture.ClientContainerNodeID)
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestContainerWithHostIPsRenderer(t *testing.T) {
input := fixture.Report.Copy()
input.Container.Nodes[fixture.ClientContainerNodeID] = input.Container.Nodes[fixture.ClientContainerNodeID].WithLatests(map[string]string{
docker.ContainerNetworkMode: "host",
})
nodes := render.ContainerWithHostIPsRenderer.Render(input)
// Test host network nodes get the host IPs added.
haveNode, ok := nodes[fixture.ClientContainerNodeID]
if !ok {
t.Fatal("Expected output to have the client container node")
}
have, ok := haveNode.Sets.Lookup(docker.ContainerIPs)
if !ok {
t.Fatal("Container had no IPs set.")
}
want := report.MakeStringSet("10.10.10.0")
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestContainerImageRenderer(t *testing.T) {
have := render.ContainerImageRenderer.Render(fixture.Report).Prune()
want := expected.RenderedContainerImages.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}

View File

@@ -206,40 +206,6 @@ var (
render.OutgoingInternetID: theOutgoingInternetNode,
}
RenderedHosts = report.Nodes{
fixture.ClientHostNodeID: hostNode(fixture.ClientHostNodeID, fixture.ServerHostNodeID).
WithLatests(map[string]string{
host.HostName: fixture.ClientHostName,
}).
WithChildren(report.MakeNodeSet(
RenderedEndpoints[fixture.Client54001NodeID],
RenderedEndpoints[fixture.Client54002NodeID],
RenderedProcesses[fixture.ClientProcess1NodeID],
RenderedProcesses[fixture.ClientProcess2NodeID],
RenderedContainers[fixture.ClientContainerNodeID],
RenderedContainerImages[fixture.ClientContainerImageNodeID],
RenderedPods[fixture.ClientPodNodeID],
)),
fixture.ServerHostNodeID: hostNode(fixture.ServerHostNodeID, render.OutgoingInternetID).
WithChildren(report.MakeNodeSet(
RenderedEndpoints[fixture.Server80NodeID],
RenderedEndpoints[fixture.NonContainerNodeID],
RenderedProcesses[fixture.ServerProcessNodeID],
RenderedProcesses[fixture.NonContainerProcessNodeID],
RenderedContainers[fixture.ServerContainerNodeID],
RenderedContainerImages[fixture.ServerContainerImageNodeID],
RenderedPods[fixture.ServerPodNodeID],
)),
// due to https://github.com/weaveworks/scope/issues/1323 we are dropping
// all non-internet pseudo nodes for now.
// UnknownPseudoNode1ID: unknownPseudoNode1(fixture.ServerHostNodeID),
// UnknownPseudoNode2ID: unknownPseudoNode2(fixture.ServerHostNodeID),
render.IncomingInternetID: theIncomingInternetNode(fixture.ServerHostNodeID),
render.OutgoingInternetID: theOutgoingInternetNode,
}
unmanagedServerID = render.MakePseudoNodeID(render.UnmanagedID, fixture.ServerHostID)
unmanagedServerNode = pseudo(unmanagedServerID, render.OutgoingInternetID).WithChildren(report.MakeNodeSet(
uncontainedServerNode,
@@ -288,6 +254,40 @@ var (
render.IncomingInternetID: theIncomingInternetNode(fixture.ServiceNodeID),
render.OutgoingInternetID: theOutgoingInternetNode,
}
RenderedHosts = report.Nodes{
fixture.ClientHostNodeID: hostNode(fixture.ClientHostNodeID, fixture.ServerHostNodeID).
WithLatests(map[string]string{
host.HostName: fixture.ClientHostName,
}).
WithChildren(report.MakeNodeSet(
RenderedEndpoints[fixture.Client54001NodeID],
RenderedEndpoints[fixture.Client54002NodeID],
RenderedProcesses[fixture.ClientProcess1NodeID],
RenderedProcesses[fixture.ClientProcess2NodeID],
RenderedContainers[fixture.ClientContainerNodeID],
RenderedContainerImages[fixture.ClientContainerImageNodeID],
RenderedPods[fixture.ClientPodNodeID],
)),
fixture.ServerHostNodeID: hostNode(fixture.ServerHostNodeID, render.OutgoingInternetID).
WithChildren(report.MakeNodeSet(
RenderedEndpoints[fixture.Server80NodeID],
RenderedEndpoints[fixture.NonContainerNodeID],
RenderedProcesses[fixture.ServerProcessNodeID],
RenderedProcesses[fixture.NonContainerProcessNodeID],
RenderedContainers[fixture.ServerContainerNodeID],
RenderedContainerImages[fixture.ServerContainerImageNodeID],
RenderedPods[fixture.ServerPodNodeID],
)),
// due to https://github.com/weaveworks/scope/issues/1323 we are dropping
// all non-internet pseudo nodes for now.
// UnknownPseudoNode1ID: unknownPseudoNode1(fixture.ServerHostNodeID),
// UnknownPseudoNode2ID: unknownPseudoNode2(fixture.ServerHostNodeID),
render.IncomingInternetID: theIncomingInternetNode(fixture.ServerHostNodeID),
render.OutgoingInternetID: theOutgoingInternetNode,
}
)
func newu64(value uint64) *uint64 { return &value }

72
render/host.go Normal file
View File

@@ -0,0 +1,72 @@
package render
import (
"github.com/weaveworks/scope/report"
)
// HostRenderer is a Renderer which produces a renderable host
// graph from the host topology.
var HostRenderer = MakeReduce(
MakeMap(
MapEndpoint2Host,
EndpointRenderer,
),
MakeMap(
MapX2Host,
ColorConnected(ProcessRenderer),
),
MakeMap(
MapX2Host,
ContainerRenderer,
),
MakeMap(
MapX2Host,
ContainerImageRenderer,
),
MakeMap(
MapX2Host,
PodRenderer,
),
SelectHost,
)
// MapX2Host maps any Nodes to host Nodes.
//
// If this function is given a node without a hostname
// (including other pseudo nodes), it will drop the node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a container graph to get that info.
func MapX2Host(n report.Node, _ report.Networks) report.Nodes {
// Don't propagate all pseudo nodes - we do this in MapEndpoint2Host
if n.Topology == Pseudo {
return report.Nodes{}
}
hostNodeID, timestamp, ok := n.Latest.LookupEntry(report.HostNodeID)
if !ok {
return report.Nodes{}
}
id := report.MakeHostNodeID(report.ExtractHostID(n))
result := NewDerivedNode(id, n).WithTopology(report.Host)
result.Latest = result.Latest.Set(report.HostNodeID, timestamp, hostNodeID)
result.Counters = result.Counters.Add(n.Topology, 1)
return report.Nodes{id: result}
}
// MapEndpoint2Host takes nodes from the endpoint topology and produces
// host nodes or pseudo nodes.
func MapEndpoint2Host(n report.Node, local report.Networks) report.Nodes {
// Nodes without a hostid are treated as pseudo nodes
hostNodeID, timestamp, ok := n.Latest.LookupEntry(report.HostNodeID)
if !ok {
return MapEndpoint2Pseudo(n, local)
}
id := report.MakeHostNodeID(report.ExtractHostID(n))
result := NewDerivedNode(id, n).WithTopology(report.Host)
result.Latest = result.Latest.Set(report.HostNodeID, timestamp, hostNodeID)
result.Counters = result.Counters.Add(n.Topology, 1)
return report.Nodes{id: result}
}

19
render/host_test.go Normal file
View File

@@ -0,0 +1,19 @@
package render_test
import (
"testing"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/render/expected"
"github.com/weaveworks/scope/test"
"github.com/weaveworks/scope/test/fixture"
"github.com/weaveworks/scope/test/reflect"
)
func TestHostRenderer(t *testing.T) {
have := render.HostRenderer.Render(fixture.Report).Prune()
want := expected.RenderedHosts.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}

View File

@@ -2,6 +2,8 @@ package render
import (
"strings"
"github.com/weaveworks/scope/report"
)
// MakePseudoNodeID joins the parts of an id into the id of a pseudonode
@@ -13,3 +15,13 @@ func MakePseudoNodeID(parts ...string) string {
func MakeGroupNodeTopology(originalTopology, key string) string {
return strings.Join([]string{"group", originalTopology, key}, ":")
}
// NewDerivedNode makes a node based on node, but with a new ID
func NewDerivedNode(id string, node report.Node) report.Node {
return node.WithID(id).WithChildren(report.MakeNodeSet(node)).PruneParents()
}
// NewDerivedPseudoNode makes a new pseudo node with the node as a child
func NewDerivedPseudoNode(id string, node report.Node) report.Node {
return NewDerivedNode(id, node).WithTopology(Pseudo)
}

View File

@@ -1,470 +0,0 @@
package render
import (
"fmt"
"net"
"regexp"
"strings"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/endpoint"
"github.com/weaveworks/scope/probe/kubernetes"
"github.com/weaveworks/scope/probe/process"
"github.com/weaveworks/scope/report"
)
// Constants are used in the tests.
const (
UncontainedID = "uncontained"
UncontainedMajor = "Uncontained"
UnmanagedID = "unmanaged"
UnmanagedMajor = "Unmanaged"
TheInternetID = "theinternet"
IncomingInternetID = "in-" + TheInternetID
OutgoingInternetID = "out-" + TheInternetID
InboundMajor = "The Internet"
OutboundMajor = "The Internet"
InboundMinor = "Inbound connections"
OutboundMinor = "Outbound connections"
// Topology for pseudo-nodes and IPs so we can differentiate them at the end
Pseudo = "pseudo"
IP = "IP"
)
// MapFunc is anything which can take an arbitrary Node and
// return a set of other Nodes.
//
// If the output is empty, the node shall be omitted from the rendered topology.
type MapFunc func(report.Node, report.Networks) report.Nodes
// NewDerivedNode makes a node based on node, but with a new ID
func NewDerivedNode(id string, node report.Node) report.Node {
return node.WithID(id).WithChildren(report.MakeNodeSet(node)).PruneParents()
}
// NewDerivedPseudoNode makes a new pseudo node with the node as a child
func NewDerivedPseudoNode(id string, node report.Node) report.Node {
return NewDerivedNode(id, node).WithTopology(Pseudo)
}
func theInternetNode(m report.Node) report.Node {
// emit one internet node for incoming, one for outgoing
if len(m.Adjacency) > 0 {
return NewDerivedPseudoNode(IncomingInternetID, m)
}
return NewDerivedPseudoNode(OutgoingInternetID, m)
}
// MapEndpoint2IP maps endpoint nodes to their IP address, for joining
// with container nodes. We drop endpoint nodes with pids, as they
// will be joined to containers through the process topology, and we
// don't want to double count edges.
func MapEndpoint2IP(m report.Node, local report.Networks) report.Nodes {
// Don't include procspied connections, to prevent double counting
_, ok := m.Latest.Lookup(endpoint.Procspied)
if ok {
return report.Nodes{}
}
scope, addr, port, ok := report.ParseEndpointNodeID(m.ID)
if !ok {
return report.Nodes{}
}
if ip := net.ParseIP(addr); ip != nil && !local.Contains(ip) {
return report.Nodes{TheInternetID: theInternetNode(m)}
}
// We don't always know what port a container is listening on, and
// container-to-container communications can be unambiguously identified
// without ports. OTOH, connections to the host IPs which have been port
// mapped to a container can only be unambiguously identified with the port.
// So we need to emit two nodes, for two different cases.
id := report.MakeScopedEndpointNodeID(scope, addr, "")
idWithPort := report.MakeScopedEndpointNodeID(scope, addr, port)
return report.Nodes{
id: NewDerivedNode(id, m).WithTopology(IP),
idWithPort: NewDerivedNode(idWithPort, m).WithTopology(IP),
}
}
var portMappingMatch = regexp.MustCompile(`([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}):([0-9]+)->([0-9]+)/tcp`)
// MapContainer2IP maps container nodes to their IP addresses (outputs
// multiple nodes). This allows container to be joined directly with
// the endpoint topology.
func MapContainer2IP(m report.Node, _ report.Networks) report.Nodes {
containerID, ok := m.Latest.Lookup(docker.ContainerID)
if !ok {
return report.Nodes{}
}
result := report.Nodes{}
if addrs, ok := m.Sets.Lookup(docker.ContainerIPsWithScopes); ok {
for _, addr := range addrs {
scope, addr, ok := report.ParseAddressNodeID(addr)
if !ok {
continue
}
id := report.MakeScopedEndpointNodeID(scope, addr, "")
result[id] = NewDerivedNode(id, m).
WithTopology(IP).
WithLatests(map[string]string{docker.ContainerID: containerID}).
WithCounters(map[string]int{IP: 1})
}
}
// Also output all the host:port port mappings (see above comment).
// In this case we assume this doesn't need a scope, as they are for host IPs.
ports, _ := m.Sets.Lookup(docker.ContainerPorts)
for _, portMapping := range ports {
if mapping := portMappingMatch.FindStringSubmatch(portMapping); mapping != nil {
ip, port := mapping[1], mapping[2]
id := report.MakeScopedEndpointNodeID("", ip, port)
result[id] = NewDerivedNode(id, m).
WithTopology(IP).
WithLatests(map[string]string{docker.ContainerID: containerID}).
WithCounters(map[string]int{IP: 1})
}
}
return result
}
// MapIP2Container maps IP nodes produced from MapContainer2IP back to
// container nodes. If there is more than one container with a given
// IP, it is dropped.
func MapIP2Container(n report.Node, _ report.Networks) report.Nodes {
// If an IP is shared between multiple containers, we can't
// reliably attribute an connection based on its IP
if count, _ := n.Counters.Lookup(IP); count > 1 {
return report.Nodes{}
}
// Propagate the internet pseudo node
if strings.HasSuffix(n.ID, TheInternetID) {
return report.Nodes{n.ID: n}
}
// If this node is not a container, exclude it.
// This excludes all the nodes we've dragged in from endpoint
// that we failed to join to a container.
containerID, ok := n.Latest.Lookup(docker.ContainerID)
if !ok {
return report.Nodes{}
}
id := report.MakeContainerNodeID(containerID)
return report.Nodes{
id: NewDerivedNode(id, n).
WithTopology(report.Container),
}
}
// MapEndpoint2Pseudo makes internet of host pesudo nodes from a endpoint node.
func MapEndpoint2Pseudo(n report.Node, local report.Networks) report.Nodes {
var node report.Node
addr, ok := n.Latest.Lookup(endpoint.Addr)
if !ok {
return report.Nodes{}
}
if ip := net.ParseIP(addr); ip != nil && !local.Contains(ip) {
// If the dstNodeAddr is not in a network local to this report, we emit an
// internet node
node = theInternetNode(n)
} else {
// due to https://github.com/weaveworks/scope/issues/1323 we are dropping
// all non-internet pseudo nodes for now.
// node = NewDerivedPseudoNode(MakePseudoNodeID(addr), n)
return report.Nodes{}
}
return report.Nodes{node.ID: node}
}
// MapEndpoint2Process maps endpoint Nodes to process
// Nodes.
//
// If this function is given a pseudo node, then it will just return it;
// Pseudo nodes will never have pids in them, and therefore will never
// be able to be turned into a Process node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a process, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a process graph to get that info.
func MapEndpoint2Process(n report.Node, local report.Networks) report.Nodes {
// Nodes without a hostid are treated as pseudo nodes
if _, ok := n.Latest.Lookup(report.HostNodeID); !ok {
return MapEndpoint2Pseudo(n, local)
}
pid, timestamp, ok := n.Latest.LookupEntry(process.PID)
if !ok {
return report.Nodes{}
}
id := report.MakeProcessNodeID(report.ExtractHostID(n), pid)
node := NewDerivedNode(id, n).WithTopology(report.Process)
node.Latest = node.Latest.Set(process.PID, timestamp, pid)
node.Counters = node.Counters.Add(n.Topology, 1)
return report.Nodes{id: node}
}
// MapProcess2Container maps process Nodes to container
// Nodes.
//
// If this function is given a node without a docker_container_id
// (including other pseudo nodes), it will produce an "Uncontained"
// pseudo node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a container graph to get that info.
func MapProcess2Container(n report.Node, _ report.Networks) report.Nodes {
// Propagate pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Otherwise, if the process is not in a container, group it
// into an per-host "Uncontained" node. If for whatever reason
// this node doesn't have a host id in their nodemetadata, it'll
// all get grouped into a single uncontained node.
var (
id string
node report.Node
)
if containerID, ok := n.Latest.Lookup(docker.ContainerID); ok {
id = report.MakeContainerNodeID(containerID)
node = NewDerivedNode(id, n).WithTopology(report.Container)
} else {
id = MakePseudoNodeID(UncontainedID, report.ExtractHostID(n))
node = NewDerivedPseudoNode(id, n)
}
return report.Nodes{id: node}
}
// MapProcess2Name maps process Nodes to Nodes
// for each process name.
//
// This mapper is unlike the other foo2bar mappers as the intention
// is not to join the information with another topology.
func MapProcess2Name(n report.Node, _ report.Networks) report.Nodes {
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
name, timestamp, ok := n.Latest.LookupEntry(process.Name)
if !ok {
return report.Nodes{}
}
node := NewDerivedNode(name, n).WithTopology(MakeGroupNodeTopology(n.Topology, process.Name))
node.Latest = node.Latest.Set(process.Name, timestamp, name)
node.Counters = node.Counters.Add(n.Topology, 1)
return report.Nodes{name: node}
}
// MapContainer2ContainerImage maps container Nodes to container
// image Nodes.
//
// If this function is given a node without a docker_image_id
// (including other pseudo nodes), it will produce an "Uncontained"
// pseudo node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a container graph to get that info.
func MapContainer2ContainerImage(n report.Node, _ report.Networks) report.Nodes {
// Propagate all pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Otherwise, if some some reason the container doesn't have a image_id
// (maybe slightly out of sync reports), just drop it
imageID, timestamp, ok := n.Latest.LookupEntry(docker.ImageID)
if !ok {
return report.Nodes{}
}
// Add container id key to the counters, which will later be counted to produce the minor label
id := report.MakeContainerImageNodeID(imageID)
result := NewDerivedNode(id, n).WithTopology(report.ContainerImage)
result.Latest = result.Latest.Set(docker.ImageID, timestamp, imageID)
result.Counters = result.Counters.Add(n.Topology, 1)
return report.Nodes{id: result}
}
// ImageNameWithoutVersion splits the image name apart, returning the name
// without the version, if possible
func ImageNameWithoutVersion(name string) string {
parts := strings.SplitN(name, "/", 3)
if len(parts) == 3 {
name = fmt.Sprintf("%s/%s", parts[1], parts[2])
}
parts = strings.SplitN(name, ":", 2)
return parts[0]
}
// MapX2Host maps any Nodes to host Nodes.
//
// If this function is given a node without a hostname
// (including other pseudo nodes), it will drop the node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a container graph to get that info.
func MapX2Host(n report.Node, _ report.Networks) report.Nodes {
// Don't propagate all pseudo nodes - we do this in MapEndpoint2Host
if n.Topology == Pseudo {
return report.Nodes{}
}
hostNodeID, timestamp, ok := n.Latest.LookupEntry(report.HostNodeID)
if !ok {
return report.Nodes{}
}
id := report.MakeHostNodeID(report.ExtractHostID(n))
result := NewDerivedNode(id, n).WithTopology(report.Host)
result.Latest = result.Latest.Set(report.HostNodeID, timestamp, hostNodeID)
result.Counters = result.Counters.Add(n.Topology, 1)
return report.Nodes{id: result}
}
// MapEndpoint2Host takes nodes from the endpoint topology and produces
// host nodes or pseudo nodes.
func MapEndpoint2Host(n report.Node, local report.Networks) report.Nodes {
// Nodes without a hostid are treated as pseudo nodes
hostNodeID, timestamp, ok := n.Latest.LookupEntry(report.HostNodeID)
if !ok {
return MapEndpoint2Pseudo(n, local)
}
id := report.MakeHostNodeID(report.ExtractHostID(n))
result := NewDerivedNode(id, n).WithTopology(report.Host)
result.Latest = result.Latest.Set(report.HostNodeID, timestamp, hostNodeID)
result.Counters = result.Counters.Add(n.Topology, 1)
return report.Nodes{id: result}
}
// MapContainer2Pod maps container Nodes to pod
// Nodes.
//
// If this function is given a node without a kubernetes_pod_id
// (including other pseudo nodes), it will produce an "Unmanaged"
// pseudo node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a container graph to get that info.
func MapContainer2Pod(n report.Node, _ report.Networks) report.Nodes {
// Uncontainerd becomes unmanaged in the pods view
if strings.HasPrefix(n.ID, MakePseudoNodeID(UncontainedID)) {
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
// Propagate all pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Otherwise, if some some reason the container doesn't have a pod_id (maybe
// slightly out of sync reports, or its not in a pod), just drop it
namespace, ok := n.Latest.Lookup(kubernetes.Namespace)
if !ok {
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
podID, ok := n.Latest.Lookup(kubernetes.PodID)
if !ok {
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
podName := strings.TrimPrefix(podID, namespace+"/")
id := report.MakePodNodeID(namespace, podName)
// Due to a bug in kubernetes, addon pods on the master node are not returned
// from the API. Adding the namespace and pod name is a workaround until
// https://github.com/kubernetes/kubernetes/issues/14738 is fixed.
return report.Nodes{
id: NewDerivedNode(id, n).
WithTopology(report.Pod).
WithLatests(map[string]string{
kubernetes.Namespace: namespace,
kubernetes.PodName: podName,
}),
}
}
// MapPod2Service maps pod Nodes to service Nodes.
//
// If this function is given a node without a kubernetes_pod_id
// (including other pseudo nodes), it will produce an "Uncontained"
// pseudo node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a pod graph to get that info.
func MapPod2Service(pod report.Node, _ report.Networks) report.Nodes {
// Propagate all pseudo nodes
if pod.Topology == Pseudo {
return report.Nodes{pod.ID: pod}
}
// Otherwise, if some some reason the pod doesn't have a service_ids (maybe
// slightly out of sync reports, or its not in a service), just drop it
namespace, ok := pod.Latest.Lookup(kubernetes.Namespace)
if !ok {
return report.Nodes{}
}
ids, ok := pod.Latest.Lookup(kubernetes.ServiceIDs)
if !ok {
return report.Nodes{}
}
result := report.Nodes{}
for _, serviceID := range strings.Fields(ids) {
serviceName := strings.TrimPrefix(serviceID, namespace+"/")
id := report.MakeServiceNodeID(namespace, serviceName)
node := NewDerivedNode(id, pod).WithTopology(report.Service)
node.Counters = node.Counters.Add(pod.Topology, 1)
result[id] = node
}
return result
}
// MapContainer2Hostname maps container Nodes to 'hostname' renderabled nodes..
func MapContainer2Hostname(n report.Node, _ report.Networks) report.Nodes {
// Propagate all pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Otherwise, if some some reason the container doesn't have a hostname
// (maybe slightly out of sync reports), just drop it
id, timestamp, ok := n.Latest.LookupEntry(docker.ContainerHostname)
if !ok {
return report.Nodes{}
}
node := NewDerivedNode(id, n).WithTopology(MakeGroupNodeTopology(n.Topology, docker.ContainerHostname))
node.Latest = node.Latest.
Set(docker.ContainerHostname, timestamp, id).
Delete(docker.ContainerName) // TODO(paulbellamy): total hack to render these by hostname instead.
node.Counters = node.Counters.Add(n.Topology, 1)
return report.Nodes{id: node}
}

View File

@@ -1,43 +0,0 @@
package render_test
import (
"fmt"
"net"
"testing"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/process"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/report"
)
func TestMapProcess2Container(t *testing.T) {
for _, input := range []testcase{
{"empty", report.MakeNode("empty"), true},
{"basic process", report.MakeNodeWith("basic", map[string]string{process.PID: "201", docker.ContainerID: "a1b2c3"}), true},
{"uncontained", report.MakeNodeWith("uncontained", map[string]string{process.PID: "201", report.HostNodeID: report.MakeHostNodeID("foo")}), true},
} {
testMap(t, render.MapProcess2Container, input)
}
}
type testcase struct {
name string
n report.Node
ok bool
}
func testMap(t *testing.T, f render.MapFunc, input testcase) {
_, ipNet, err := net.ParseCIDR("1.2.3.0/16")
if err != nil {
t.Fatalf(err.Error())
}
localNetworks := report.Networks([]*net.IPNet{ipNet})
if have := f(input.n, localNetworks); input.ok != (len(have) > 0) {
name := input.name
if name == "" {
name = fmt.Sprintf("%v", input.n)
}
t.Errorf("%s: want %v, have %v", name, input.ok, have)
}
}

133
render/pod.go Normal file
View File

@@ -0,0 +1,133 @@
package render
import (
"strings"
"github.com/weaveworks/scope/probe/kubernetes"
"github.com/weaveworks/scope/report"
)
// Constants are used in the tests.
const (
UnmanagedID = "unmanaged"
UnmanagedMajor = "Unmanaged"
)
// PodRenderer is a Renderer which produces a renderable kubernetes
// graph by merging the container graph and the pods topology.
var PodRenderer = MakeReduce(
MakeSilentFilter(
func(n report.Node) bool {
// Drop unconnected pseudo nodes (could appear due to filtering)
_, isConnected := n.Latest.Lookup(IsConnected)
return n.Topology != Pseudo || isConnected
},
ColorConnected(MakeMap(
MapContainer2Pod,
ContainerRenderer,
)),
),
SelectPod,
)
// PodServiceRenderer is a Renderer which produces a renderable kubernetes services
// graph by merging the pods graph and the services topology.
var PodServiceRenderer = MakeReduce(
MakeMap(
MapPod2Service,
PodRenderer,
),
SelectService,
)
// MapContainer2Pod maps container Nodes to pod
// Nodes.
//
// If this function is given a node without a kubernetes_pod_id
// (including other pseudo nodes), it will produce an "Unmanaged"
// pseudo node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a container graph to get that info.
func MapContainer2Pod(n report.Node, _ report.Networks) report.Nodes {
// Uncontainerd becomes unmanaged in the pods view
if strings.HasPrefix(n.ID, MakePseudoNodeID(UncontainedID)) {
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
// Propagate all pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Otherwise, if some some reason the container doesn't have a pod_id (maybe
// slightly out of sync reports, or its not in a pod), just drop it
namespace, ok := n.Latest.Lookup(kubernetes.Namespace)
if !ok {
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
podID, ok := n.Latest.Lookup(kubernetes.PodID)
if !ok {
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
podName := strings.TrimPrefix(podID, namespace+"/")
id := report.MakePodNodeID(namespace, podName)
// Due to a bug in kubernetes, addon pods on the master node are not returned
// from the API. Adding the namespace and pod name is a workaround until
// https://github.com/kubernetes/kubernetes/issues/14738 is fixed.
return report.Nodes{
id: NewDerivedNode(id, n).
WithTopology(report.Pod).
WithLatests(map[string]string{
kubernetes.Namespace: namespace,
kubernetes.PodName: podName,
}),
}
}
// MapPod2Service maps pod Nodes to service Nodes.
//
// If this function is given a node without a kubernetes_pod_id
// (including other pseudo nodes), it will produce an "Uncontained"
// pseudo node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a pod graph to get that info.
func MapPod2Service(pod report.Node, _ report.Networks) report.Nodes {
// Propagate all pseudo nodes
if pod.Topology == Pseudo {
return report.Nodes{pod.ID: pod}
}
// Otherwise, if some some reason the pod doesn't have a service_ids (maybe
// slightly out of sync reports, or its not in a service), just drop it
namespace, ok := pod.Latest.Lookup(kubernetes.Namespace)
if !ok {
return report.Nodes{}
}
ids, ok := pod.Latest.Lookup(kubernetes.ServiceIDs)
if !ok {
return report.Nodes{}
}
result := report.Nodes{}
for _, serviceID := range strings.Fields(ids) {
serviceName := strings.TrimPrefix(serviceID, namespace+"/")
id := report.MakeServiceNodeID(namespace, serviceName)
node := NewDerivedNode(id, pod).WithTopology(report.Service)
node.Counters = node.Counters.Add(pod.Topology, 1)
result[id] = node
}
return result
}

50
render/pod_test.go Normal file
View File

@@ -0,0 +1,50 @@
package render_test
import (
"testing"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/kubernetes"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/render/expected"
"github.com/weaveworks/scope/test"
"github.com/weaveworks/scope/test/fixture"
"github.com/weaveworks/scope/test/reflect"
)
func TestPodRenderer(t *testing.T) {
have := render.PodRenderer.Render(fixture.Report).Prune()
want := expected.RenderedPods.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestPodFilterRenderer(t *testing.T) {
// tag on containers or pod namespace in the topology and ensure
// it is filtered out correctly.
input := fixture.Report.Copy()
input.Pod.Nodes[fixture.ClientPodNodeID] = input.Pod.Nodes[fixture.ClientPodNodeID].WithLatests(map[string]string{
kubernetes.PodID: "pod:kube-system/foo",
kubernetes.Namespace: "kube-system",
kubernetes.PodName: "foo",
})
input.Container.Nodes[fixture.ClientContainerNodeID] = input.Container.Nodes[fixture.ClientContainerNodeID].WithLatests(map[string]string{
docker.LabelPrefix + "io.kubernetes.pod.name": "kube-system/foo",
})
have := render.FilterSystem(render.PodRenderer).Render(input).Prune()
want := expected.RenderedPods.Copy().Prune()
delete(want, fixture.ClientPodNodeID)
delete(want, fixture.ClientContainerNodeID)
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestPodServiceRenderer(t *testing.T) {
have := render.PodServiceRenderer.Render(fixture.Report).Prune()
want := expected.RenderedPodServices.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}

159
render/process.go Normal file
View File

@@ -0,0 +1,159 @@
package render
import (
"net"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/endpoint"
"github.com/weaveworks/scope/probe/process"
"github.com/weaveworks/scope/report"
)
// Constants are used in the tests.
const (
TheInternetID = "theinternet"
IncomingInternetID = "in-" + TheInternetID
OutgoingInternetID = "out-" + TheInternetID
InboundMajor = "The Internet"
OutboundMajor = "The Internet"
InboundMinor = "Inbound connections"
OutboundMinor = "Outbound connections"
// Topology for pseudo-nodes and IPs so we can differentiate them at the end
Pseudo = "pseudo"
)
// EndpointRenderer is a Renderer which produces a renderable endpoint graph.
var EndpointRenderer = FilterNonProcspied(SelectEndpoint)
// ProcessRenderer is a Renderer which produces a renderable process
// graph by merging the endpoint graph and the process topology.
var ProcessRenderer = MakeReduce(
MakeMap(
MapEndpoint2Process,
EndpointRenderer,
),
SelectProcess,
)
// processWithContainerNameRenderer is a Renderer which produces a process
// graph enriched with container names where appropriate
type processWithContainerNameRenderer struct {
Renderer
}
func (r processWithContainerNameRenderer) Render(rpt report.Report) report.Nodes {
processes := r.Renderer.Render(rpt)
containers := SelectContainer.Render(rpt)
outputs := report.Nodes{}
for id, p := range processes {
outputs[id] = p
containerID, timestamp, ok := p.Latest.LookupEntry(docker.ContainerID)
if !ok {
continue
}
container, ok := containers[report.MakeContainerNodeID(containerID)]
if !ok {
continue
}
output := p.Copy()
output.Latest = output.Latest.Set(docker.ContainerID, timestamp, containerID)
if containerName, timestamp, ok := container.Latest.LookupEntry(docker.ContainerName); ok {
output.Latest = output.Latest.Set(docker.ContainerName, timestamp, containerName)
}
outputs[id] = output
}
return outputs
}
// ProcessWithContainerNameRenderer is a Renderer which produces a process
// graph enriched with container names where appropriate
var ProcessWithContainerNameRenderer = processWithContainerNameRenderer{ProcessRenderer}
// ProcessNameRenderer is a Renderer which produces a renderable process
// name graph by munging the progess graph.
var ProcessNameRenderer = MakeMap(
MapProcess2Name,
ProcessRenderer,
)
// MapEndpoint2Pseudo makes internet of host pesudo nodes from a endpoint node.
func MapEndpoint2Pseudo(n report.Node, local report.Networks) report.Nodes {
var node report.Node
addr, ok := n.Latest.Lookup(endpoint.Addr)
if !ok {
return report.Nodes{}
}
if ip := net.ParseIP(addr); ip != nil && !local.Contains(ip) {
// If the dstNodeAddr is not in a network local to this report, we emit an
// internet node
node = theInternetNode(n)
} else {
// due to https://github.com/weaveworks/scope/issues/1323 we are dropping
// all non-internet pseudo nodes for now.
// node = NewDerivedPseudoNode(MakePseudoNodeID(addr), n)
return report.Nodes{}
}
return report.Nodes{node.ID: node}
}
// MapEndpoint2Process maps endpoint Nodes to process
// Nodes.
//
// If this function is given a pseudo node, then it will just return it;
// Pseudo nodes will never have pids in them, and therefore will never
// be able to be turned into a Process node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a process, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a process graph to get that info.
func MapEndpoint2Process(n report.Node, local report.Networks) report.Nodes {
// Nodes without a hostid are treated as pseudo nodes
if _, ok := n.Latest.Lookup(report.HostNodeID); !ok {
return MapEndpoint2Pseudo(n, local)
}
pid, timestamp, ok := n.Latest.LookupEntry(process.PID)
if !ok {
return report.Nodes{}
}
id := report.MakeProcessNodeID(report.ExtractHostID(n), pid)
node := NewDerivedNode(id, n).WithTopology(report.Process)
node.Latest = node.Latest.Set(process.PID, timestamp, pid)
node.Counters = node.Counters.Add(n.Topology, 1)
return report.Nodes{id: node}
}
// MapProcess2Name maps process Nodes to Nodes
// for each process name.
//
// This mapper is unlike the other foo2bar mappers as the intention
// is not to join the information with another topology.
func MapProcess2Name(n report.Node, _ report.Networks) report.Nodes {
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
name, timestamp, ok := n.Latest.LookupEntry(process.Name)
if !ok {
return report.Nodes{}
}
node := NewDerivedNode(name, n).WithTopology(MakeGroupNodeTopology(n.Topology, process.Name))
node.Latest = node.Latest.Set(process.Name, timestamp, name)
node.Counters = node.Counters.Add(n.Topology, 1)
return report.Nodes{name: node}
}
func theInternetNode(m report.Node) report.Node {
// emit one internet node for incoming, one for outgoing
if len(m.Adjacency) > 0 {
return NewDerivedPseudoNode(IncomingInternetID, m)
}
return NewDerivedPseudoNode(OutgoingInternetID, m)
}

35
render/process_test.go Normal file
View File

@@ -0,0 +1,35 @@
package render_test
import (
"testing"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/render/expected"
"github.com/weaveworks/scope/test"
"github.com/weaveworks/scope/test/fixture"
"github.com/weaveworks/scope/test/reflect"
)
func TestEndpointRenderer(t *testing.T) {
have := render.EndpointRenderer.Render(fixture.Report).Prune()
want := expected.RenderedEndpoints.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestProcessRenderer(t *testing.T) {
have := render.ProcessRenderer.Render(fixture.Report).Prune()
want := expected.RenderedProcesses.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestProcessNameRenderer(t *testing.T) {
have := render.ProcessNameRenderer.Render(fixture.Report).Prune()
want := expected.RenderedProcessNames.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}

View File

@@ -4,6 +4,12 @@ import (
"github.com/weaveworks/scope/report"
)
// MapFunc is anything which can take an arbitrary Node and
// return a set of other Nodes.
//
// If the output is empty, the node shall be omitted from the rendered topology.
type MapFunc func(report.Node, report.Networks) report.Nodes
// Renderer is something that can render a report to a set of Nodes.
type Renderer interface {
Render(report.Report) report.Nodes

View File

@@ -1,255 +0,0 @@
package render
import (
"net"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/host"
"github.com/weaveworks/scope/report"
)
// EndpointRenderer is a Renderer which produces a renderable endpoint graph.
var EndpointRenderer = FilterNonProcspied(SelectEndpoint)
// ProcessRenderer is a Renderer which produces a renderable process
// graph by merging the endpoint graph and the process topology.
var ProcessRenderer = MakeReduce(
MakeMap(
MapEndpoint2Process,
EndpointRenderer,
),
SelectProcess,
)
// processWithContainerNameRenderer is a Renderer which produces a process
// graph enriched with container names where appropriate
type processWithContainerNameRenderer struct {
Renderer
}
func (r processWithContainerNameRenderer) Render(rpt report.Report) report.Nodes {
processes := r.Renderer.Render(rpt)
containers := SelectContainer.Render(rpt)
outputs := report.Nodes{}
for id, p := range processes {
outputs[id] = p
containerID, timestamp, ok := p.Latest.LookupEntry(docker.ContainerID)
if !ok {
continue
}
container, ok := containers[report.MakeContainerNodeID(containerID)]
if !ok {
continue
}
output := p.Copy()
output.Latest = output.Latest.Set(docker.ContainerID, timestamp, containerID)
if containerName, timestamp, ok := container.Latest.LookupEntry(docker.ContainerName); ok {
output.Latest = output.Latest.Set(docker.ContainerName, timestamp, containerName)
}
outputs[id] = output
}
return outputs
}
// ProcessWithContainerNameRenderer is a Renderer which produces a process
// graph enriched with container names where appropriate
var ProcessWithContainerNameRenderer = processWithContainerNameRenderer{ProcessRenderer}
// ProcessNameRenderer is a Renderer which produces a renderable process
// name graph by munging the progess graph.
var ProcessNameRenderer = MakeMap(
MapProcess2Name,
ProcessRenderer,
)
// ContainerRenderer is a Renderer which produces a renderable container
// graph by merging the process graph and the container topology.
// NB We only want processes in container _or_ processes with network connections
// but we need to be careful to ensure we only include each edge once, by only
// including the ProcessRenderer once.
var ContainerRenderer = MakeSilentFilter(
func(n report.Node) bool {
// Drop deleted containers
state, ok := n.Latest.Lookup(docker.ContainerState)
return !ok || state != docker.StateDeleted
},
MakeReduce(
MakeSilentFilter(
func(n report.Node) bool {
// Drop unconnected pseudo nodes (could appear due to filtering)
_, isConnected := n.Latest.Lookup(IsConnected)
return n.Topology != Pseudo || isConnected
},
MakeMap(
MapProcess2Container,
ColorConnected(ProcessRenderer),
),
),
// This mapper brings in short lived connections by joining with container IPs.
// We need to be careful to ensure we only include each edge once. Edges brought in
// by the above renders will have a pid, so its enough to filter out any nodes with
// pids.
SilentFilterUnconnected(MakeMap(
MapIP2Container,
MakeReduce(
MakeMap(
MapContainer2IP,
SelectContainer,
),
MakeMap(
MapEndpoint2IP,
SelectEndpoint,
),
),
)),
SelectContainer,
),
)
type containerWithHostIPsRenderer struct {
Renderer
}
// Render produces a process graph where the ips for host network mode are set
// to the host's IPs.
func (r containerWithHostIPsRenderer) Render(rpt report.Report) report.Nodes {
containers := r.Renderer.Render(rpt)
hosts := SelectHost.Render(rpt)
outputs := report.Nodes{}
for id, c := range containers {
outputs[id] = c
networkMode, ok := c.Latest.Lookup(docker.ContainerNetworkMode)
if !ok || networkMode != docker.NetworkModeHost {
continue
}
h, ok := hosts[report.MakeHostNodeID(report.ExtractHostID(c))]
if !ok {
continue
}
newIPs := report.MakeStringSet()
hostNetworks, _ := h.Sets.Lookup(host.LocalNetworks)
for _, cidr := range hostNetworks {
if ip, _, err := net.ParseCIDR(cidr); err == nil {
newIPs = newIPs.Add(ip.String())
}
}
output := c.Copy()
output.Sets = c.Sets.Add(docker.ContainerIPs, newIPs)
outputs[id] = output
}
return outputs
}
// ContainerWithHostIPsRenderer is a Renderer which produces a container graph
// enriched with host IPs on containers where NetworkMode is Host
var ContainerWithHostIPsRenderer = containerWithHostIPsRenderer{ContainerRenderer}
type containerWithImageNameRenderer struct {
Renderer
}
// Render produces a process graph where the minor labels contain the
// container name, if found. It also merges the image node metadata into the
// container metadata.
func (r containerWithImageNameRenderer) Render(rpt report.Report) report.Nodes {
containers := r.Renderer.Render(rpt)
images := SelectContainerImage.Render(rpt)
outputs := report.Nodes{}
for id, c := range containers {
outputs[id] = c
imageID, ok := c.Latest.Lookup(docker.ImageID)
if !ok {
continue
}
image, ok := images[report.MakeContainerImageNodeID(imageID)]
if !ok {
continue
}
output := c.Copy()
output.Latest = image.Latest.Merge(c.Latest)
outputs[id] = output
}
return outputs
}
// ContainerWithImageNameRenderer is a Renderer which produces a container
// graph where the ranks are the image names, not their IDs
var ContainerWithImageNameRenderer = containerWithImageNameRenderer{ContainerWithHostIPsRenderer}
// ContainerImageRenderer is a Renderer which produces a renderable container
// image graph by merging the container graph and the container image topology.
var ContainerImageRenderer = MakeReduce(
MakeMap(
MapContainer2ContainerImage,
ContainerRenderer,
),
SelectContainerImage,
)
// ContainerHostnameRenderer is a Renderer which produces a renderable container
// by hostname graph..
var ContainerHostnameRenderer = MakeMap(
MapContainer2Hostname,
ContainerRenderer,
)
// HostRenderer is a Renderer which produces a renderable host
// graph from the host topology.
var HostRenderer = MakeReduce(
MakeMap(
MapEndpoint2Host,
EndpointRenderer,
),
MakeMap(
MapX2Host,
ColorConnected(ProcessRenderer),
),
MakeMap(
MapX2Host,
ContainerRenderer,
),
MakeMap(
MapX2Host,
ContainerImageRenderer,
),
MakeMap(
MapX2Host,
PodRenderer,
),
SelectHost,
)
// PodRenderer is a Renderer which produces a renderable kubernetes
// graph by merging the container graph and the pods topology.
var PodRenderer = MakeReduce(
MakeSilentFilter(
func(n report.Node) bool {
// Drop unconnected pseudo nodes (could appear due to filtering)
_, isConnected := n.Latest.Lookup(IsConnected)
return n.Topology != Pseudo || isConnected
},
ColorConnected(MakeMap(
MapContainer2Pod,
ContainerRenderer,
)),
),
SelectPod,
)
// PodServiceRenderer is a Renderer which produces a renderable kubernetes services
// graph by merging the pods graph and the services topology.
var PodServiceRenderer = MakeReduce(
MakeMap(
MapPod2Service,
PodRenderer,
),
SelectService,
)

View File

@@ -1,136 +0,0 @@
package render_test
import (
"testing"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/kubernetes"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/render/expected"
"github.com/weaveworks/scope/report"
"github.com/weaveworks/scope/test"
"github.com/weaveworks/scope/test/fixture"
"github.com/weaveworks/scope/test/reflect"
)
func TestEndpointRenderer(t *testing.T) {
have := render.EndpointRenderer.Render(fixture.Report).Prune()
want := expected.RenderedEndpoints.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestProcessRenderer(t *testing.T) {
have := render.ProcessRenderer.Render(fixture.Report).Prune()
want := expected.RenderedProcesses.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestProcessNameRenderer(t *testing.T) {
have := render.ProcessNameRenderer.Render(fixture.Report).Prune()
want := expected.RenderedProcessNames.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestContainerRenderer(t *testing.T) {
have := render.ContainerRenderer.Render(fixture.Report).Prune()
want := expected.RenderedContainers.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestContainerFilterRenderer(t *testing.T) {
// tag on of the containers in the topology and ensure
// it is filtered out correctly.
input := fixture.Report.Copy()
input.Container.Nodes[fixture.ClientContainerNodeID] = input.Container.Nodes[fixture.ClientContainerNodeID].WithLatests(map[string]string{
docker.LabelPrefix + "works.weave.role": "system",
})
have := render.FilterSystem(render.ContainerRenderer).Render(input).Prune()
want := expected.RenderedContainers.Copy().Prune()
delete(want, fixture.ClientContainerNodeID)
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestContainerWithHostIPsRenderer(t *testing.T) {
input := fixture.Report.Copy()
input.Container.Nodes[fixture.ClientContainerNodeID] = input.Container.Nodes[fixture.ClientContainerNodeID].WithLatests(map[string]string{
docker.ContainerNetworkMode: "host",
})
nodes := render.ContainerWithHostIPsRenderer.Render(input)
// Test host network nodes get the host IPs added.
haveNode, ok := nodes[fixture.ClientContainerNodeID]
if !ok {
t.Fatal("Expected output to have the client container node")
}
have, ok := haveNode.Sets.Lookup(docker.ContainerIPs)
if !ok {
t.Fatal("Container had no IPs set.")
}
want := report.MakeStringSet("10.10.10.0")
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestContainerImageRenderer(t *testing.T) {
have := render.ContainerImageRenderer.Render(fixture.Report).Prune()
want := expected.RenderedContainerImages.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestHostRenderer(t *testing.T) {
have := render.HostRenderer.Render(fixture.Report).Prune()
want := expected.RenderedHosts.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestPodRenderer(t *testing.T) {
have := render.PodRenderer.Render(fixture.Report).Prune()
want := expected.RenderedPods.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestPodFilterRenderer(t *testing.T) {
// tag on containers or pod namespace in the topology and ensure
// it is filtered out correctly.
input := fixture.Report.Copy()
input.Pod.Nodes[fixture.ClientPodNodeID] = input.Pod.Nodes[fixture.ClientPodNodeID].WithLatests(map[string]string{
kubernetes.PodID: "pod:kube-system/foo",
kubernetes.Namespace: "kube-system",
kubernetes.PodName: "foo",
})
input.Container.Nodes[fixture.ClientContainerNodeID] = input.Container.Nodes[fixture.ClientContainerNodeID].WithLatests(map[string]string{
docker.LabelPrefix + "io.kubernetes.pod.name": "kube-system/foo",
})
have := render.FilterSystem(render.PodRenderer).Render(input).Prune()
want := expected.RenderedPods.Copy().Prune()
delete(want, fixture.ClientPodNodeID)
delete(want, fixture.ClientContainerNodeID)
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}
func TestPodServiceRenderer(t *testing.T) {
have := render.PodServiceRenderer.Render(fixture.Report).Prune()
want := expected.RenderedPodServices.Prune()
if !reflect.DeepEqual(want, have) {
t.Error(test.Diff(want, have))
}
}

View File

@@ -367,6 +367,7 @@ var (
kubernetes.Namespace: KubernetesNamespace,
kubernetes.PodContainerIDs: ClientContainerID,
kubernetes.ServiceIDs: ServiceID,
report.HostNodeID: ClientHostNodeID,
}).
WithTopology(report.Pod).WithParents(report.EmptySets.
Add("host", report.MakeStringSet(ClientHostNodeID)).
@@ -381,6 +382,7 @@ var (
kubernetes.PodState: "running",
kubernetes.PodContainerIDs: ServerContainerID,
kubernetes.ServiceIDs: ServiceID,
report.HostNodeID: ServerHostNodeID,
}).
WithTopology(report.Pod).WithParents(report.EmptySets.
Add("host", report.MakeStringSet(ServerHostNodeID)).