Merge pull request #2772 from weaveworks/no-endpoint-type

get rid of endpoint type indicators
This commit is contained in:
Matthias Radestock
2017-07-31 14:23:30 +01:00
committed by GitHub
10 changed files with 45 additions and 129 deletions

View File

@@ -25,8 +25,6 @@ scope_on "$HOST1" launch --probe.ebpf.connections=false
#
# has_connection_by_id containers "$HOST1" "in-theinternet" "$(node_id containers "$HOST1" nginx)"
#
# endpoints_have_ebpf "$HOST1"
#
# kill %do_connections
scope_end_suite

View File

@@ -20,6 +20,5 @@ list_containers "$HOST1"
list_connections "$HOST1"
has_connection containers "$HOST1" client nginx
endpoints_have_ebpf "$HOST1"
scope_end_suite

View File

@@ -19,6 +19,4 @@ list_connections "$HOST1"
has_connection containers "$HOST1" client nginx
endpoints_have_ebpf "$HOST1"
scope_end_suite

View File

@@ -34,6 +34,4 @@ list_connections "$HOST1"
has_connection containers "$HOST1" client server
endpoints_have_ebpf "$HOST1"
scope_end_suite

View File

@@ -122,32 +122,6 @@ has_connection_by_id() {
assert "curl -s http://$host:4040/api/topology/${view}?system=show | jq -r '.nodes[\"$from_id\"].adjacency | contains([\"$to_id\"])'" true
}
# this checks if ebpf is true on all endpoints on a given host
endpoints_have_ebpf() {
local host="$1"
local timeout="${2:-60}"
local number_of_endpoints=-1
local have_ebpf=-1
local report
for i in $(seq "$timeout"); do
report=$(curl -s "http://${host}:4040/api/report")
number_of_endpoints=$(echo "${report}" | jq -r '.Endpoint.nodes | length')
have_ebpf=$(echo "${report}" | jq -r '.Endpoint.nodes[].latest.eBPF | select(.value != null) | contains({"value": "true"})' | wc -l)
if [[ "$number_of_endpoints" -gt 0 && "$have_ebpf" -gt 0 && "$number_of_endpoints" -eq "$have_ebpf" ]]; then
echo "Found ${number_of_endpoints} endpoints with ebpf enabled"
assert "echo '$have_ebpf'" "$number_of_endpoints"
return
fi
sleep 1
done
echo "Only ${have_ebpf} endpoints of ${number_of_endpoints} have ebpf enabled, should be equal"
echo "Example of one endpoint:"
echo "${report}" | jq -r '[.Endpoint.nodes[]][0]'
assert "echo '$have_ebpf" "$number_of_endpoints"
}
has_connection() {
local view="$1"
local host="$2"

View File

@@ -91,29 +91,19 @@ func (t *connectionTracker) ReportConnections(rpt *report.Report) {
t.useProcfs()
}
// seenTuples contains information about connections seen by
// conntrack
seenTuples := t.performFlowWalk(rpt)
// consult the flowWalker for short-lived (conntracked) connections
seenTuples := map[string]fourTuple{}
t.flowWalker.walkFlows(func(f flow, alive bool) {
tuple := flowToTuple(f)
seenTuples[tuple.key()] = tuple
t.addConnection(rpt, false, tuple, "", nil, nil)
})
if t.conf.WalkProc && t.conf.Scanner != nil {
t.performWalkProc(rpt, hostNodeID, seenTuples)
}
}
// performFlowWalk consults the flowWalker for short-lived connections
func (t *connectionTracker) performFlowWalk(rpt *report.Report) map[string]fourTuple {
seenTuples := map[string]fourTuple{}
extraNodeInfo := map[string]string{
Conntracked: "true",
}
t.flowWalker.walkFlows(func(f flow, alive bool) {
tuple := flowToTuple(f)
seenTuples[tuple.key()] = tuple
t.addConnection(rpt, tuple, "", extraNodeInfo, extraNodeInfo)
})
return seenTuples
}
func (t *connectionTracker) existingFlows() map[string]fourTuple {
seenTuples := map[string]fourTuple{}
if !t.conf.UseConntrack {
@@ -138,19 +128,14 @@ func (t *connectionTracker) performWalkProc(rpt *report.Report, hostNodeID strin
}
for conn := conns.Next(); conn != nil; conn = conns.Next() {
tuple, namespaceID, incoming := connectionTuple(conn, seenTuples)
var (
toNodeInfo = map[string]string{Procspied: "true"}
fromNodeInfo = map[string]string{Procspied: "true"}
)
var toNodeInfo, fromNodeInfo map[string]string
if conn.Proc.PID > 0 {
fromNodeInfo[process.PID] = strconv.FormatUint(uint64(conn.Proc.PID), 10)
fromNodeInfo[report.HostNodeID] = hostNodeID
fromNodeInfo = map[string]string{
process.PID: strconv.FormatUint(uint64(conn.Proc.PID), 10),
report.HostNodeID: hostNodeID,
}
}
if incoming {
tuple.reverse()
toNodeInfo, fromNodeInfo = fromNodeInfo, toNodeInfo
}
t.addConnection(rpt, tuple, namespaceID, fromNodeInfo, toNodeInfo)
t.addConnection(rpt, incoming, tuple, namespaceID, fromNodeInfo, toNodeInfo)
}
return nil
}
@@ -186,28 +171,23 @@ func (t *connectionTracker) getInitialState() {
func (t *connectionTracker) performEbpfTrack(rpt *report.Report, hostNodeID string) error {
t.ebpfTracker.walkConnections(func(e ebpfConnection) {
fromNodeInfo := map[string]string{
EBPF: "true",
}
toNodeInfo := map[string]string{
EBPF: "true",
}
var toNodeInfo, fromNodeInfo map[string]string
if e.pid > 0 {
fromNodeInfo[process.PID] = strconv.Itoa(e.pid)
fromNodeInfo[report.HostNodeID] = hostNodeID
fromNodeInfo = map[string]string{
process.PID: strconv.Itoa(e.pid),
report.HostNodeID: hostNodeID,
}
}
if e.incoming {
t.addConnection(rpt, reverse(e.tuple), e.networkNamespace, toNodeInfo, fromNodeInfo)
} else {
t.addConnection(rpt, e.tuple, e.networkNamespace, fromNodeInfo, toNodeInfo)
}
t.addConnection(rpt, e.incoming, e.tuple, e.networkNamespace, fromNodeInfo, toNodeInfo)
})
return nil
}
func (t *connectionTracker) addConnection(rpt *report.Report, ft fourTuple, namespaceID string, extraFromNode, extraToNode map[string]string) {
func (t *connectionTracker) addConnection(rpt *report.Report, incoming bool, ft fourTuple, namespaceID string, extraFromNode, extraToNode map[string]string) {
if incoming {
ft = reverse(ft)
extraFromNode, extraToNode = extraToNode, extraFromNode
}
var (
fromNode = t.makeEndpointNode(namespaceID, ft.fromAddr, ft.fromPort, extraFromNode)
toNode = t.makeEndpointNode(namespaceID, ft.toAddr, ft.toPort, extraToNode)

View File

@@ -70,8 +70,7 @@ func TestNat(t *testing.T) {
have := report.MakeReport()
originalID := report.MakeEndpointNodeID("host1", "", "10.0.47.1", "80")
have.Endpoint.AddNode(report.MakeNodeWith(originalID, map[string]string{
"foo": "bar",
Procspied: "true",
"foo": "bar",
}))
want := have.Copy()
@@ -79,7 +78,6 @@ func TestNat(t *testing.T) {
want.Endpoint.AddNode(report.MakeNodeWith(wantID, map[string]string{
"copy_of": originalID,
"foo": "bar",
Procspied: "true",
}))
makeNATMapper(ct).applyNAT(have, "host1")
@@ -125,15 +123,13 @@ func TestNat(t *testing.T) {
have := report.MakeReport()
originalID := report.MakeEndpointNodeID("host2", "", "10.0.47.2", "22222")
have.Endpoint.AddNode(report.MakeNodeWith(originalID, map[string]string{
"foo": "baz",
Procspied: "true",
"foo": "baz",
}))
want := have.Copy()
want.Endpoint.AddNode(report.MakeNodeWith(report.MakeEndpointNodeID("host2", "", "2.3.4.5", "22223"), map[string]string{
"copy_of": originalID,
"foo": "baz",
Procspied: "true",
}))
makeNATMapper(ct).applyNAT(have, "host1")

View File

@@ -11,9 +11,6 @@ import (
// Node metadata keys.
const (
Conntracked = "conntracked"
EBPF = "eBPF"
Procspied = "procspied"
ReverseDNSNames = "reverse_dns_names"
SnoopedDNSNames = "snooped_dns_names"
)

View File

@@ -7,7 +7,6 @@ import (
"github.com/weaveworks/common/mtime"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/endpoint"
"github.com/weaveworks/scope/probe/host"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/report"
@@ -51,24 +50,16 @@ var (
rpt = report.Report{
Endpoint: report.Topology{
Nodes: report.Nodes{
randomEndpointNodeID: report.MakeNodeWith(randomEndpointNodeID, map[string]string{
endpoint.Conntracked: "true",
}).
WithAdjacent(serverEndpointNodeID).WithTopology(report.Endpoint),
randomEndpointNodeID: report.MakeNode(randomEndpointNodeID).
WithTopology(report.Endpoint).WithAdjacent(serverEndpointNodeID),
serverEndpointNodeID: report.MakeNodeWith(serverEndpointNodeID, map[string]string{
endpoint.Conntracked: "true",
}).
serverEndpointNodeID: report.MakeNode(serverEndpointNodeID).
WithTopology(report.Endpoint),
container1EndpointNodeID: report.MakeNodeWith(container1EndpointNodeID, map[string]string{
endpoint.Conntracked: "true",
}).
WithAdjacent(duplicatedEndpointNodeID).WithTopology(report.Endpoint),
container1EndpointNodeID: report.MakeNode(container1EndpointNodeID).
WithTopology(report.Endpoint).WithAdjacent(duplicatedEndpointNodeID),
duplicatedEndpointNodeID: report.MakeNodeWith(duplicatedEndpointNodeID, map[string]string{
endpoint.Conntracked: "true",
}).
duplicatedEndpointNodeID: report.MakeNode(duplicatedEndpointNodeID).
WithTopology(report.Endpoint),
},
},

View File

@@ -4,7 +4,6 @@ import (
"time"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/endpoint"
"github.com/weaveworks/scope/probe/host"
"github.com/weaveworks/scope/probe/kubernetes"
"github.com/weaveworks/scope/probe/process"
@@ -128,67 +127,53 @@ var (
// care to test into the fixture. Just be sure to include the bits
// that the mapping funcs extract :)
Client54001NodeID: report.MakeNode(Client54001NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
process.PID: Client1PID,
report.HostNodeID: ClientHostNodeID,
endpoint.Procspied: True,
process.PID: Client1PID,
report.HostNodeID: ClientHostNodeID,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(10),
EgressByteCount: newu64(100),
}),
Client54002NodeID: report.MakeNode(Client54002NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
process.PID: Client2PID,
report.HostNodeID: ClientHostNodeID,
endpoint.Procspied: True,
process.PID: Client2PID,
report.HostNodeID: ClientHostNodeID,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(20),
EgressByteCount: newu64(200),
}),
Server80NodeID: report.MakeNode(Server80NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
process.PID: ServerPID,
report.HostNodeID: ServerHostNodeID,
endpoint.Procspied: True,
process.PID: ServerPID,
report.HostNodeID: ServerHostNodeID,
}),
NonContainerNodeID: report.MakeNode(NonContainerNodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
process.PID: NonContainerPID,
report.HostNodeID: ServerHostNodeID,
endpoint.Procspied: True,
process.PID: NonContainerPID,
report.HostNodeID: ServerHostNodeID,
}).WithAdjacent(GoogleEndpointNodeID),
// Probe pseudo nodes
UnknownClient1NodeID: report.MakeNode(UnknownClient1NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
endpoint.Procspied: True,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
UnknownClient1NodeID: report.MakeNode(UnknownClient1NodeID).WithTopology(report.Endpoint).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(30),
EgressByteCount: newu64(300),
}),
UnknownClient2NodeID: report.MakeNode(UnknownClient2NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
endpoint.Procspied: True,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
UnknownClient2NodeID: report.MakeNode(UnknownClient2NodeID).WithTopology(report.Endpoint).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(40),
EgressByteCount: newu64(400),
}),
UnknownClient3NodeID: report.MakeNode(UnknownClient3NodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
endpoint.Procspied: True,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
UnknownClient3NodeID: report.MakeNode(UnknownClient3NodeID).WithTopology(report.Endpoint).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(50),
EgressByteCount: newu64(500),
}),
RandomClientNodeID: report.MakeNode(RandomClientNodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
endpoint.Procspied: True,
}).WithEdge(Server80NodeID, report.EdgeMetadata{
RandomClientNodeID: report.MakeNode(RandomClientNodeID).WithTopology(report.Endpoint).WithEdge(Server80NodeID, report.EdgeMetadata{
EgressPacketCount: newu64(60),
EgressByteCount: newu64(600),
}),
GoogleEndpointNodeID: report.MakeNode(GoogleEndpointNodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{
endpoint.Procspied: True,
}),
GoogleEndpointNodeID: report.MakeNode(GoogleEndpointNodeID).WithTopology(report.Endpoint),
},
},
Process: report.Topology{