Merge pull request #2436 from weaveworks/mike/easier-added-topologies

Reduce the number of places topologies are explicitly listed
This commit is contained in:
Mike Lang
2017-04-14 12:49:12 -07:00
committed by GitHub
10 changed files with 236 additions and 335 deletions

View File

@@ -73,30 +73,10 @@ func TestAppClientPublish(t *testing.T) {
// marshalling->unmarshaling is not idempotent due to `json:"omitempty"`
// tags, transforming empty slices into nils. So, we make DeepEqual
// happy by setting empty `json:"omitempty"` entries to nil
rpt.Endpoint = report.MakeTopology()
rpt.Process = report.MakeTopology()
rpt.Container = report.MakeTopology()
rpt.ContainerImage = report.MakeTopology()
rpt.Pod = report.MakeTopology()
rpt.Service = report.MakeTopology()
rpt.Deployment = report.MakeTopology()
rpt.ReplicaSet = report.MakeTopology()
rpt.Host = report.MakeTopology()
rpt.Overlay = report.MakeTopology()
rpt.ECSTask = report.MakeTopology()
rpt.ECSService = report.MakeTopology()
rpt.Endpoint.Controls = nil
rpt.Process.Controls = nil
rpt.Container.Controls = nil
rpt.ContainerImage.Controls = nil
rpt.Pod.Controls = nil
rpt.Service.Controls = nil
rpt.Deployment.Controls = nil
rpt.ReplicaSet.Controls = nil
rpt.Host.Controls = nil
rpt.Overlay.Controls = nil
rpt.ECSTask.Controls = nil
rpt.ECSService.Controls = nil
rpt.WalkTopologies(func(to *report.Topology) {
*to = report.MakeTopology()
to.Controls = nil
})
s := dummyServer(t, token, id, version, rpt, done)
defer s.Close()

View File

@@ -184,6 +184,8 @@ func (r Reporter) Tag(rpt report.Report) (report.Report, error) {
if serviceName, ok := ecsInfo.TaskServiceMap[taskArn]; ok {
serviceID := report.MakeECSServiceNodeID(cluster, serviceName)
parentsSets = parentsSets.Add(report.ECSService, report.MakeStringSet(serviceID))
// in addition, make service parent of task
rpt.ECSTask.Nodes[taskID] = rpt.ECSTask.Nodes[taskID].WithParents(report.MakeSets().Add(report.ECSService, report.MakeStringSet(serviceID)))
}
for _, containerID := range info.ContainerIDs {
if containerNode, ok := rpt.Container.Nodes[containerID]; ok {

View File

@@ -81,18 +81,9 @@ func TestProbe(t *testing.T) {
// tags, transforming empty slices into nils. So, we make DeepEqual
// happy by setting empty `json:"omitempty"` entries to nil
node.Metrics = nil
want.Endpoint.Controls = nil
want.Process.Controls = nil
want.Container.Controls = nil
want.ContainerImage.Controls = nil
want.Pod.Controls = nil
want.Service.Controls = nil
want.Deployment.Controls = nil
want.ReplicaSet.Controls = nil
want.Host.Controls = nil
want.Overlay.Controls = nil
want.ECSTask.Controls = nil
want.ECSService.Controls = nil
want.WalkTopologies(func(t *report.Topology) {
t.Controls = nil
})
want.Endpoint.AddNode(node)
pub := mockPublisher{make(chan report.Report, 10)}

View File

@@ -16,18 +16,7 @@ func (topologyTagger) Name() string { return "Topology" }
// Tag implements Tagger
func (topologyTagger) Tag(r report.Report) (report.Report, error) {
for name, t := range map[string]*report.Topology{
report.Endpoint: &(r.Endpoint),
report.Process: &(r.Process),
report.Container: &(r.Container),
report.ContainerImage: &(r.ContainerImage),
report.Pod: &(r.Pod),
report.Service: &(r.Service),
report.ECSTask: &(r.ECSTask),
report.ECSService: &(r.ECSService),
report.Host: &(r.Host),
report.Overlay: &(r.Overlay),
} {
for name, t := range r.TopologyMap() {
for _, node := range t.Nodes {
t.AddNode(node.WithTopology(name))
}

View File

@@ -134,8 +134,7 @@ var (
{
topologyID: report.Host,
NodeSummaryGroup: NodeSummaryGroup{
TopologyID: "hosts",
Label: "Hosts",
Label: "Hosts",
Columns: []Column{
{ID: host.CPUUsage, Label: "CPU", Datatype: "number"},
{ID: host.MemoryUsage, Label: "Memory", Datatype: "number"},
@@ -145,8 +144,7 @@ var (
{
topologyID: report.Service,
NodeSummaryGroup: NodeSummaryGroup{
TopologyID: "services",
Label: "Services",
Label: "Services",
Columns: []Column{
{ID: report.Pod, Label: "# Pods", Datatype: "number"},
{ID: kubernetes.IP, Label: "IP", Datatype: "ip"},
@@ -156,8 +154,7 @@ var (
{
topologyID: report.ReplicaSet,
NodeSummaryGroup: NodeSummaryGroup{
TopologyID: "replica-sets",
Label: "Replica Sets",
Label: "Replica Sets",
Columns: []Column{
{ID: report.Pod, Label: "# Pods", Datatype: "number"},
{ID: kubernetes.ObservedGeneration, Label: "Observed Gen.", Datatype: "number"},
@@ -167,8 +164,7 @@ var (
{
topologyID: report.Pod,
NodeSummaryGroup: NodeSummaryGroup{
TopologyID: "pods",
Label: "Pods",
Label: "Pods",
Columns: []Column{
{ID: kubernetes.State, Label: "State"},
@@ -180,8 +176,7 @@ var (
{
topologyID: report.ECSService,
NodeSummaryGroup: NodeSummaryGroup{
TopologyID: "ecs-services",
Label: "Services",
Label: "Services",
Columns: []Column{
{ID: awsecs.ServiceRunningCount, Label: "Running", Datatype: "number"},
{ID: awsecs.ServiceDesiredCount, Label: "Desired", Datatype: "number"},
@@ -191,8 +186,7 @@ var (
{
topologyID: report.ECSTask,
NodeSummaryGroup: NodeSummaryGroup{
TopologyID: "ecs-tasks",
Label: "Tasks",
Label: "Tasks",
Columns: []Column{
{ID: awsecs.CreatedAt, Label: "Created At", Datatype: "datetime"},
},
@@ -201,8 +195,7 @@ var (
{
topologyID: report.Container,
NodeSummaryGroup: NodeSummaryGroup{
TopologyID: "containers",
Label: "Containers", Columns: []Column{
Label: "Containers", Columns: []Column{
{ID: docker.CPUTotalUsage, Label: "CPU", Datatype: "number"},
{ID: docker.MemoryUsage, Label: "Memory", Datatype: "number"},
},
@@ -211,8 +204,7 @@ var (
{
topologyID: report.Process,
NodeSummaryGroup: NodeSummaryGroup{
TopologyID: "processes",
Label: "Processes", Columns: []Column{
Label: "Processes", Columns: []Column{
{ID: process.PID, Label: "PID", Datatype: "number"},
{ID: process.CPUUsage, Label: "CPU", Datatype: "number"},
{ID: process.MemoryUsage, Label: "Memory", Datatype: "number"},
@@ -246,13 +238,42 @@ func children(r report.Report, n report.Node) []NodeSummaryGroup {
})
nodeSummaryGroups := []NodeSummaryGroup{}
// Apply specific group specs in the order they're listed
for _, spec := range nodeSummaryGroupSpecs {
if len(summaries[spec.topologyID]) > 0 {
sort.Sort(nodeSummariesByID(summaries[spec.TopologyID]))
group := spec.NodeSummaryGroup
group.Nodes = summaries[spec.topologyID]
nodeSummaryGroups = append(nodeSummaryGroups, group)
if len(summaries[spec.topologyID]) == 0 {
continue
}
apiTopology, ok := primaryAPITopology[spec.topologyID]
if !ok {
continue
}
sort.Sort(nodeSummariesByID(summaries[spec.topologyID]))
group := spec.NodeSummaryGroup
group.Nodes = summaries[spec.topologyID]
group.TopologyID = apiTopology
nodeSummaryGroups = append(nodeSummaryGroups, group)
delete(summaries, spec.topologyID)
}
// As a fallback, in case a topology has no group spec defined, add any remaining at the end
for topologyID, nodeSummaries := range summaries {
if len(nodeSummaries) == 0 {
continue
}
topology, ok := r.Topology(topologyID)
if !ok {
continue
}
apiTopology, ok := primaryAPITopology[topologyID]
if !ok {
continue
}
sort.Sort(nodeSummariesByID(nodeSummaries))
group := NodeSummaryGroup{
TopologyID: apiTopology,
Label: topology.LabelPlural,
Columns: []Column{},
}
nodeSummaryGroups = append(nodeSummaryGroups, group)
}
return nodeSummaryGroups

View File

@@ -16,117 +16,82 @@ type Parent struct {
TopologyID string `json:"topologyId"`
}
func node(t report.Topology) func(string) (report.Node, bool) {
return func(id string) (report.Node, bool) {
n, ok := t.Nodes[id]
return n, ok
}
}
var (
kubernetesParentLabel = latestLookup(kubernetes.Name)
func fake(id string) (report.Node, bool) {
return report.MakeNode(id), true
}
getLabelForTopology = map[string]func(report.Node) string{
report.Container: getRenderableContainerName,
report.Pod: kubernetesParentLabel,
report.ReplicaSet: kubernetesParentLabel,
report.Deployment: kubernetesParentLabel,
report.Service: kubernetesParentLabel,
report.ECSTask: latestLookup(awsecs.TaskFamily),
report.ECSService: ecsServiceParentLabel,
report.ContainerImage: containerImageParentLabel,
report.Host: latestLookup(host.HostName),
}
)
// Parents renders the parents of this report.Node, which have been aggregated
// from the probe reports.
func Parents(r report.Report, n report.Node) (result []Parent) {
topologies := map[string]struct {
node func(id string) (report.Node, bool)
render func(report.Node) Parent
}{
report.Container: {node(r.Container), containerParent},
report.Pod: {node(r.Pod), podParent},
report.ReplicaSet: {node(r.ReplicaSet), replicaSetParent},
report.Deployment: {node(r.Deployment), deploymentParent},
report.Service: {node(r.Service), serviceParent},
report.ECSTask: {node(r.ECSTask), ecsTaskParent},
report.ECSService: {node(r.ECSService), ecsServiceParent},
report.ContainerImage: {fake, containerImageParent},
report.Host: {node(r.Host), hostParent},
}
topologyIDs := []string{}
for topologyID := range topologies {
for topologyID := range getLabelForTopology {
topologyIDs = append(topologyIDs, topologyID)
}
sort.Strings(topologyIDs)
for _, topologyID := range topologyIDs {
t := topologies[topologyID]
getLabel := getLabelForTopology[topologyID]
topology, ok := r.Topology(topologyID)
if !ok {
continue
}
parents, _ := n.Parents.Lookup(topologyID)
for _, id := range parents {
if topologyID == n.Topology && id == n.ID {
continue
}
parent, ok := t.node(id)
var parentNode report.Node
// Special case: container image parents should be empty nodes for some reason
if topologyID == report.ContainerImage {
parentNode = report.MakeNode(id)
} else {
if parent, ok := topology.Nodes[id]; ok {
parentNode = parent
} else {
continue
}
}
apiTopologyID, ok := primaryAPITopology[topologyID]
if !ok {
continue
}
result = append(result, t.render(parent))
result = append(result, Parent{
ID: id,
Label: getLabel(parentNode),
TopologyID: apiTopologyID,
})
}
}
return result
}
func containerParent(n report.Node) Parent {
label := getRenderableContainerName(n)
return Parent{
ID: n.ID,
Label: label,
TopologyID: "containers",
func latestLookup(key string) func(report.Node) string {
return func(n report.Node) string {
value, _ := n.Latest.Lookup(key)
return value
}
}
var (
podParent = kubernetesParent("pods")
replicaSetParent = kubernetesParent("replica-sets")
deploymentParent = kubernetesParent("deployments")
serviceParent = kubernetesParent("services")
)
func kubernetesParent(topology string) func(report.Node) Parent {
return func(n report.Node) Parent {
name, _ := n.Latest.Lookup(kubernetes.Name)
return Parent{
ID: n.ID,
Label: name,
TopologyID: topology,
}
}
}
func ecsTaskParent(n report.Node) Parent {
family, _ := n.Latest.Lookup(awsecs.TaskFamily)
return Parent{
ID: n.ID,
Label: family,
TopologyID: "ecs-tasks",
}
}
func ecsServiceParent(n report.Node) Parent {
func ecsServiceParentLabel(n report.Node) string {
_, name, _ := report.ParseECSServiceNodeID(n.ID)
return Parent{
ID: n.ID,
Label: name,
TopologyID: "ecs-services",
}
return name
}
func containerImageParent(n report.Node) Parent {
func containerImageParentLabel(n report.Node) string {
name, _ := report.ParseContainerImageNodeID(n.ID)
return Parent{
ID: n.ID,
Label: name,
TopologyID: "containers-by-image",
}
}
func hostParent(n report.Node) Parent {
hostName, _ := n.Latest.Lookup(host.HostName)
return Parent{
ID: n.ID,
Label: hostName,
TopologyID: "hosts",
}
return name
}

View File

@@ -73,6 +73,7 @@ var renderers = map[string]func(NodeSummary, report.Node) (NodeSummary, bool){
report.ECSService: ecsServiceNodeSummary,
report.Host: hostNodeSummary,
report.Overlay: weaveNodeSummary,
report.Endpoint: nil, // Do not render
}
var templates = map[string]struct{ Label, LabelMinor string }{
@@ -81,10 +82,31 @@ var templates = map[string]struct{ Label, LabelMinor string }{
render.OutgoingInternetID: {render.OutboundMajor, render.OutboundMinor},
}
// For each report.Topology, map to a 'primary' API topology. This can then be used in a variety of places.
var primaryAPITopology = map[string]string{
report.Process: "processes",
report.Container: "containers",
report.ContainerImage: "containers-by-image",
report.Pod: "pods",
report.ReplicaSet: "replica-sets",
report.Deployment: "deployments",
report.Service: "services",
report.ECSTask: "ecs-tasks",
report.ECSService: "ecs-services",
report.Host: "hosts",
}
// MakeNodeSummary summarizes a node, if possible.
func MakeNodeSummary(r report.Report, n report.Node) (NodeSummary, bool) {
if renderer, ok := renderers[n.Topology]; ok {
return renderer(baseNodeSummary(r, n), n)
// Skip (and don't fall through to fallback) if renderer maps to nil
if renderer != nil {
return renderer(baseNodeSummary(r, n), n)
}
} else if _, ok := r.Topology(n.Topology); ok {
summary := baseNodeSummary(r, n)
summary.Label = n.ID // This is unlikely to look very good, but is a reasonable fallback
return summary, true
}
if strings.HasPrefix(n.Topology, "group:") {
return groupNodeSummary(baseNodeSummary(r, n), r, n)

View File

@@ -1,9 +1,6 @@
package render
import (
"strings"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/report"
)
@@ -13,8 +10,11 @@ var ECSTaskRenderer = ConditionalRenderer(renderECSTopologies,
PropagateSingleMetrics(report.Container),
MakeReduce(
MakeMap(
MapContainer2ECSTask,
ContainerWithImageNameRenderer,
Map2Parent(report.ECSTask, UnmanagedID, nil),
MakeFilter(
IsRunning,
ContainerWithImageNameRenderer,
),
),
SelectECSTask,
),
@@ -27,7 +27,7 @@ var ECSServiceRenderer = ConditionalRenderer(renderECSTopologies,
PropagateSingleMetrics(report.ECSTask),
MakeReduce(
MakeMap(
Map2Parent(report.ECSService),
Map2Parent(report.ECSService, "", nil),
ECSTaskRenderer,
),
SelectECSService,
@@ -35,48 +35,6 @@ var ECSServiceRenderer = ConditionalRenderer(renderECSTopologies,
),
)
// MapContainer2ECSTask maps container Nodes to ECS Task
// Nodes.
//
// If this function is given a node without an ECS Task parent
// (including other pseudo nodes), it will produce an "Unmanaged"
// pseudo node.
//
// TODO: worth merging with MapContainer2Pod?
func MapContainer2ECSTask(n report.Node, _ report.Networks) report.Nodes {
// Uncontained becomes unmanaged in the tasks view
if strings.HasPrefix(n.ID, MakePseudoNodeID(UncontainedID)) {
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
// Propagate all pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Ignore non-running containers
if state, ok := n.Latest.Lookup(docker.ContainerState); ok && state != docker.StateRunning {
return report.Nodes{}
}
taskIDSet, ok := n.Parents.Lookup(report.ECSTask)
if !ok || len(taskIDSet) == 0 {
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
nodeID := taskIDSet[0]
node := NewDerivedNode(nodeID, n).WithTopology(report.ECSTask)
// Propagate parent service
if serviceIDSet, ok := n.Parents.Lookup(report.ECSService); ok {
node = node.WithParents(report.MakeSets().Add(report.ECSService, serviceIDSet))
}
node.Counters = node.Counters.Add(n.Topology, 1)
return report.Nodes{nodeID: node}
}
func renderECSTopologies(rpt report.Report) bool {
return len(rpt.ECSTask.Nodes)+len(rpt.ECSService.Nodes) >= 1
}

View File

@@ -18,6 +18,11 @@ func renderKubernetesTopologies(rpt report.Report) bool {
return len(rpt.Pod.Nodes)+len(rpt.Service.Nodes)+len(rpt.Deployment.Nodes)+len(rpt.ReplicaSet.Nodes) >= 1
}
func isPauseContainer(n report.Node) bool {
image, ok := n.Latest.Lookup(docker.ImageName)
return ok && kubernetes.IsPauseImageName(image)
}
// PodRenderer is a Renderer which produces a renderable kubernetes
// graph by merging the container graph and the pods topology.
var PodRenderer = ConditionalRenderer(renderKubernetesTopologies,
@@ -30,8 +35,14 @@ var PodRenderer = ConditionalRenderer(renderKubernetesTopologies,
PropagateSingleMetrics(report.Container),
MakeReduce(
MakeMap(
MapContainer2Pod,
ContainerWithImageNameRenderer,
Map2Parent(report.Pod, UnmanagedID, nil),
MakeFilter(
ComposeFilterFuncs(
IsRunning,
Complement(isPauseContainer),
),
ContainerWithImageNameRenderer,
),
),
ShortLivedConnectionJoin(SelectPod, MapPod2IP),
SelectPod,
@@ -47,7 +58,7 @@ var PodServiceRenderer = ConditionalRenderer(renderKubernetesTopologies,
PropagateSingleMetrics(report.Pod),
MakeReduce(
MakeMap(
Map2Service,
Map2Parent(report.Service, "", nil),
PodRenderer,
),
SelectService,
@@ -62,7 +73,7 @@ var DeploymentRenderer = ConditionalRenderer(renderKubernetesTopologies,
PropagateSingleMetrics(report.ReplicaSet),
MakeReduce(
MakeMap(
Map2Deployment,
Map2Parent(report.Deployment, "", mapPodCounts),
ReplicaSetRenderer,
),
SelectDeployment,
@@ -77,7 +88,7 @@ var ReplicaSetRenderer = ConditionalRenderer(renderKubernetesTopologies,
PropagateSingleMetrics(report.Pod),
MakeReduce(
MakeMap(
Map2ReplicaSet,
Map2Parent(report.ReplicaSet, "", nil),
PodRenderer,
),
SelectReplicaSet,
@@ -85,54 +96,12 @@ var ReplicaSetRenderer = ConditionalRenderer(renderKubernetesTopologies,
),
)
// MapContainer2Pod maps container Nodes to pod
// Nodes.
//
// If this function is given a node without a kubernetes_pod_id
// (including other pseudo nodes), it will produce an "Unmanaged"
// pseudo node.
//
// Otherwise, this function will produce a node with the correct ID
// format for a container, but without any Major or Minor labels.
// It does not have enough info to do that, and the resulting graph
// must be merged with a container graph to get that info.
func MapContainer2Pod(n report.Node, _ report.Networks) report.Nodes {
// Uncontained becomes unmanaged in the pods view
if strings.HasPrefix(n.ID, MakePseudoNodeID(UncontainedID)) {
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
func mapPodCounts(parent, original report.Node) report.Node {
// When mapping ReplicaSets to Deployments, we want to propagate the Pods counter
if count, ok := original.Counters.Lookup(report.Pod); ok {
parent.Counters = parent.Counters.Add(report.Pod, count)
}
// Propagate all pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Ignore non-running containers
if state, ok := n.Latest.Lookup(docker.ContainerState); ok && state != docker.StateRunning {
return report.Nodes{}
}
// Ignore pause containers
if image, ok := n.Latest.Lookup(docker.ImageName); ok && kubernetes.IsPauseImageName(image) {
return report.Nodes{}
}
// Otherwise, if some some reason the container doesn't have a pod uid (maybe
// slightly out of sync reports, or its not in a pod), make it part of unmanaged.
uid, ok := n.Latest.Lookup(docker.LabelPrefix + "io.kubernetes.pod.uid")
if !ok {
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
id := report.MakePodNodeID(uid)
node := NewDerivedNode(id, n).
WithTopology(report.Pod)
node.Counters = node.Counters.Add(n.Topology, 1)
return report.Nodes{id: node}
return parent
}
// MapPod2IP maps pod nodes to their IP address. This allows pods to
@@ -152,42 +121,50 @@ func MapPod2IP(m report.Node) []string {
return []string{report.MakeScopedEndpointNodeID("", ip, "")}
}
// The various ways of grouping pods
var (
Map2Service = Map2Parent(report.Service)
Map2Deployment = Map2Parent(report.Deployment)
Map2ReplicaSet = Map2Parent(report.ReplicaSet)
)
// Map2Parent maps Nodes to some parent grouping.
func Map2Parent(topology string) func(n report.Node, _ report.Networks) report.Nodes {
// Map2Parent returns a MapFunc which maps Nodes to some parent grouping.
func Map2Parent(
// The topology ID of the parents
topology string,
// Either the ID prefix of the pseudo node to use for nodes without
// any parents in the group, eg. UnmanagedID, or "" to drop nodes without any parents.
noParentsPseudoID string,
// Optional (can be nil) function to modify any parent nodes,
// eg. to copy over details from the original node.
modifyMappedNode func(parent, original report.Node) report.Node,
) MapFunc {
return func(n report.Node, _ report.Networks) report.Nodes {
// Uncontained becomes Unmanaged/whatever if noParentsPseudoID is set
if noParentsPseudoID != "" && strings.HasPrefix(n.ID, MakePseudoNodeID(UncontainedID)) {
id := MakePseudoNodeID(noParentsPseudoID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
// Propagate all pseudo nodes
if n.Topology == Pseudo {
return report.Nodes{n.ID: n}
}
// Otherwise, if some some reason the node doesn't have any of these ids
// (maybe slightly out of sync reports, or its not in this group), just
// drop it
// If some some reason the node doesn't have any of these ids
// (maybe slightly out of sync reports, or its not in this group),
// either drop it or put it in Uncontained/Unmanaged/whatever if one was given
groupIDs, ok := n.Parents.Lookup(topology)
if !ok {
return report.Nodes{}
if !ok || len(groupIDs) == 0 {
if noParentsPseudoID == "" {
return report.Nodes{}
}
id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n))
node := NewDerivedPseudoNode(id, n)
return report.Nodes{id: node}
}
result := report.Nodes{}
for _, id := range groupIDs {
node := NewDerivedNode(id, n).WithTopology(topology)
node.Counters = node.Counters.Add(n.Topology, 1)
// When mapping replica(tionController)s(ets) to deployments
// we must propagate the pod counter.
if n.Topology != report.Pod {
if count, ok := n.Counters.Lookup(report.Pod); ok {
node.Counters = node.Counters.Add(report.Pod, count)
}
if modifyMappedNode != nil {
node = modifyMappedNode(node, n)
}
result[id] = node
}
return result

View File

@@ -177,49 +177,49 @@ func MakeReport() Report {
}
}
// TopologyMap gets a map from topology names to pointers to the respective topologies
func (r *Report) TopologyMap() map[string]*Topology {
return map[string]*Topology{
Endpoint: &r.Endpoint,
Process: &r.Process,
Container: &r.Container,
ContainerImage: &r.ContainerImage,
Pod: &r.Pod,
Service: &r.Service,
Deployment: &r.Deployment,
ReplicaSet: &r.ReplicaSet,
Host: &r.Host,
Overlay: &r.Overlay,
ECSTask: &r.ECSTask,
ECSService: &r.ECSService,
}
}
// Copy returns a value copy of the report.
func (r Report) Copy() Report {
return Report{
Endpoint: r.Endpoint.Copy(),
Process: r.Process.Copy(),
Container: r.Container.Copy(),
ContainerImage: r.ContainerImage.Copy(),
Host: r.Host.Copy(),
Pod: r.Pod.Copy(),
Service: r.Service.Copy(),
Deployment: r.Deployment.Copy(),
ReplicaSet: r.ReplicaSet.Copy(),
Overlay: r.Overlay.Copy(),
ECSTask: r.ECSTask.Copy(),
ECSService: r.ECSService.Copy(),
Sampling: r.Sampling,
Window: r.Window,
Plugins: r.Plugins.Copy(),
ID: fmt.Sprintf("%d", rand.Int63()),
newReport := Report{
Sampling: r.Sampling,
Window: r.Window,
Plugins: r.Plugins.Copy(),
ID: fmt.Sprintf("%d", rand.Int63()),
}
newReport.WalkPairedTopologies(&r, func(newTopology, oldTopology *Topology) {
*newTopology = oldTopology.Copy()
})
return newReport
}
// Merge merges another Report into the receiver and returns the result. The
// original is not modified.
func (r Report) Merge(other Report) Report {
return Report{
Endpoint: r.Endpoint.Merge(other.Endpoint),
Process: r.Process.Merge(other.Process),
Container: r.Container.Merge(other.Container),
ContainerImage: r.ContainerImage.Merge(other.ContainerImage),
Host: r.Host.Merge(other.Host),
Pod: r.Pod.Merge(other.Pod),
Service: r.Service.Merge(other.Service),
Deployment: r.Deployment.Merge(other.Deployment),
ReplicaSet: r.ReplicaSet.Merge(other.ReplicaSet),
Overlay: r.Overlay.Merge(other.Overlay),
ECSTask: r.ECSTask.Merge(other.ECSTask),
ECSService: r.ECSService.Merge(other.ECSService),
Sampling: r.Sampling.Merge(other.Sampling),
Window: r.Window + other.Window,
Plugins: r.Plugins.Merge(other.Plugins),
ID: fmt.Sprintf("%d", rand.Int63()),
}
newReport := r.Copy()
newReport.Sampling = newReport.Sampling.Merge(other.Sampling)
newReport.Window = newReport.Window + other.Window
newReport.Plugins = newReport.Plugins.Merge(other.Plugins)
newReport.WalkPairedTopologies(&other, func(ourTopology, theirTopology *Topology) {
*ourTopology = ourTopology.Merge(*theirTopology)
})
return newReport
}
// Topologies returns a slice of Topologies in this report
@@ -234,37 +234,33 @@ func (r Report) Topologies() []Topology {
// WalkTopologies iterates through the Topologies of the report,
// potentially modifying them
func (r *Report) WalkTopologies(f func(*Topology)) {
f(&r.Endpoint)
f(&r.Process)
f(&r.Container)
f(&r.ContainerImage)
f(&r.Pod)
f(&r.Service)
f(&r.Deployment)
f(&r.ReplicaSet)
f(&r.Host)
f(&r.Overlay)
f(&r.ECSTask)
f(&r.ECSService)
var dummy Report
r.WalkPairedTopologies(&dummy, func(t, _ *Topology) { f(t) })
}
// WalkPairedTopologies iterates through the Topologies of this and another report,
// potentially modifying one or both.
func (r *Report) WalkPairedTopologies(o *Report, f func(*Topology, *Topology)) {
f(&r.Endpoint, &o.Endpoint)
f(&r.Process, &o.Process)
f(&r.Container, &o.Container)
f(&r.ContainerImage, &o.ContainerImage)
f(&r.Pod, &o.Pod)
f(&r.Service, &o.Service)
f(&r.Deployment, &o.Deployment)
f(&r.ReplicaSet, &o.ReplicaSet)
f(&r.Host, &o.Host)
f(&r.Overlay, &o.Overlay)
f(&r.ECSTask, &o.ECSTask)
f(&r.ECSService, &o.ECSService)
}
// Topology gets a topology by name
func (r Report) Topology(name string) (Topology, bool) {
t, ok := map[string]Topology{
Endpoint: r.Endpoint,
Process: r.Process,
Container: r.Container,
ContainerImage: r.ContainerImage,
Pod: r.Pod,
Service: r.Service,
Deployment: r.Deployment,
ReplicaSet: r.ReplicaSet,
Host: r.Host,
Overlay: r.Overlay,
ECSTask: r.ECSTask,
ECSService: r.ECSService,
}[name]
return t, ok
if t, ok := r.TopologyMap()[name]; ok {
return *t, true
}
return Topology{}, false
}
// Validate checks the report for various inconsistencies.