Delete experimental, replace with 'extras'

extras contains fixprobe and example, and ought to build properly
This commit is contained in:
Jonathan Lange
2016-07-07 14:30:08 +01:00
parent 6d527821e7
commit 1c440486f0
113 changed files with 8 additions and 5007 deletions

10
.gitignore vendored
View File

@@ -44,14 +44,8 @@ docker/scope
docker/docker.tgz
docker/weave
docker/runsvinit
experimental/bridge/bridge
experimental/demoprobe/demoprobe
experimental/fixprobe/fixprobe
experimental/fixprobe/*.json
experimental/genreport/genreport
experimental/graphviz/graphviz
experimental/oneshot/oneshot
experimental/_integration/_integration
extras/fixprobe/fixprobe
extras/fixprobe/*.json
*sublime-project
*sublime-workspace
*npm-debug.log

View File

@@ -42,7 +42,7 @@ test:
parallel: true
- cd $SRCDIR; rm -f prog/scope; make RM=:
parallel: true
- cd $SRCDIR/experimental; ./build_on_circle.sh:
- cd $SRCDIR/extras; ./build_on_circle.sh:
parallel: true
- "test -z \"$SECRET_PASSWORD\" || (cd $SRCDIR/integration; ./gce.sh setup && eval $(./gce.sh hosts); ./setup.sh)":
parallel: true

View File

@@ -1,88 +0,0 @@
package integration
import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
)
var components = map[string]string{
"app": "../../app/app",
"bridge": "../bridge/bridge",
"fixprobe": "../fixprobe/fixprobe",
"demoprobe": "../demoprobe/demoprobe",
}
// cmdline is e.g. `experimental/fixprobe/fixprobe -publish.interval=10ms fixture.json`
func start(t *testing.T, cmdline string) *exec.Cmd {
toks := strings.Split(cmdline, " ")
if len(toks) <= 0 {
t.Fatalf("bad cmdline %q", cmdline)
}
component, args := toks[0], toks[1:]
relpath, ok := components[component]
if !ok {
t.Fatalf("%s: unknown", component)
}
if _, err := os.Stat(relpath); err != nil {
t.Fatalf("%s: %s", component, err)
}
cmd := &exec.Cmd{
Dir: filepath.Dir(relpath),
Path: filepath.Base(relpath),
Args: append([]string{filepath.Base(relpath)}, args...),
Stdout: testWriter{t, component},
Stderr: testWriter{t, component},
}
if err := cmd.Start(); err != nil {
t.Fatalf("%s: Start: %s", component, err)
}
return cmd
}
func stop(t *testing.T, c *exec.Cmd) {
done := make(chan struct{})
go func() {
defer close(done)
if err := c.Process.Kill(); err != nil {
t.Fatalf("%s: Kill: %s", filepath.Base(c.Path), err)
}
if _, err := c.Process.Wait(); err != nil {
t.Fatalf("%s: Wait: %s", filepath.Base(c.Path), err)
}
}()
select {
case <-done:
case <-time.After(250 * time.Millisecond):
t.Fatalf("timeout when trying to stop %s", filepath.Base(c.Path))
}
}
type testWriter struct {
*testing.T
component string
}
func (w testWriter) Write(p []byte) (int, error) {
w.T.Logf("<%10s> %s", w.component, p)
return len(p), nil
}
func cwd() string {
cwd, err := os.Getwd()
if err != nil {
panic(err)
}
return cwd
}

View File

@@ -1,79 +0,0 @@
package integration
import (
"fmt"
"io/ioutil"
"net/http"
"testing"
"time"
)
type context int
const (
oneProbe context = iota
twoProbes
)
var (
appPort = 14030
bridgePort = 14020
probePort1 = 14010
probePort2 = 14011
)
func withContext(t *testing.T, c context, tests ...func()) {
var (
publish = 10 * time.Millisecond
batch = 10 * publish
wait = 2 * batch
)
switch c {
case oneProbe:
probe := start(t, fmt.Sprintf(`fixprobe -listen=:%d -publish.interval=%s %s/test_single_report.json`, probePort1, publish, cwd()))
defer stop(t, probe)
time.Sleep(2 * publish)
app := start(t, fmt.Sprintf(`app -http.address=:%d -probes=localhost:%d -batch=%s`, appPort, probePort1, batch))
defer stop(t, app)
case twoProbes:
probe1 := start(t, fmt.Sprintf(`fixprobe -listen=:%d -publish.interval=%s %s/test_single_report.json`, probePort1, publish, cwd()))
defer stop(t, probe1)
probe2 := start(t, fmt.Sprintf(`fixprobe -listen=:%d -publish.interval=%s %s/test_extra_report.json`, probePort2, publish, cwd()))
defer stop(t, probe2)
time.Sleep(2 * publish)
app := start(t, fmt.Sprintf(`app -http.address=:%d -probes=localhost:%d,localhost:%d -batch=%s`, appPort, probePort1, probePort2, batch))
defer stop(t, app)
default:
t.Fatalf("bad context %v", c)
}
time.Sleep(wait)
for _, f := range tests {
f()
}
}
func httpGet(t *testing.T, url string) []byte {
resp, err := http.Get(url)
if err != nil {
t.Fatalf("httpGet: %s", err)
}
defer resp.Body.Close()
if status := resp.StatusCode; status != 200 {
t.Fatalf("httpGet got status %d, expected 200", status)
}
buf, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("httpGet: %s", err)
}
return buf
}

View File

@@ -1,2 +0,0 @@
// Package integration implements integration tests between components.
package integration

View File

@@ -1,94 +0,0 @@
package integration_test
import (
"encoding/json"
"fmt"
"path/filepath"
"reflect"
"testing"
"time"
"github.com/weaveworks/scope/report"
)
func TestComponentsAreAvailable(t *testing.T) {
pause := time.Millisecond
for _, c := range []string{
fmt.Sprintf(`app -http.address=:%d`, appPort),
fmt.Sprintf(`bridge -listen=:%d`, bridgePort),
fmt.Sprintf(`fixprobe -listen=:%d`, probePort1),
fmt.Sprintf(`demoprobe -listen=:%d`, probePort1),
} {
cmd := start(t, c)
time.Sleep(pause)
stop(t, cmd)
t.Logf("%s: OK", filepath.Base(cmd.Path))
}
}
func TestProcesses(t *testing.T) {
withContext(t, oneProbe, func() {
topo := parseTopology(t, httpGet(t, fmt.Sprintf("http://localhost:%d/api/topology/processes", appPort)))
assertAdjacent(t, topo["proc:node-1.2.3.4:apache"], "theinternet", "proc:node-192.168.1.1:wget")
want := map[string]interface{}{"max_conn_count_tcp": float64(19)}
have := parseEdge(t, httpGet(t, fmt.Sprintf("http://localhost:%d/api/topology/processes/%s/%s", appPort, "proc:node-192.168.1.1:wget", "theinternet")))
if !reflect.DeepEqual(have, want) {
t.Errorf("have: %#v, want %#v", have, want)
}
})
}
func TestHosts(t *testing.T) {
withContext(t, oneProbe, func() {
topo := parseTopology(t, httpGet(t, fmt.Sprintf("http://localhost:%d/api/topology/hosts", appPort)))
assertAdjacent(t, topo["host:1_2_3_4"], "theinternet", "host:192_168_1_1")
have := parseEdge(t, httpGet(t, fmt.Sprintf("http://localhost:%d/api/topology/hosts/%s/%s", appPort, "host:192_168_1_1", "theinternet")))
want := map[string]interface{}{
// "window": "15s",
"max_conn_count_tcp": float64(12),
}
if !reflect.DeepEqual(have, want) {
t.Errorf("have: %#v, want %#v", have, want)
}
})
}
func TestMultipleProbes(t *testing.T) {
withContext(t, twoProbes, func() {
topo := parseTopology(t, httpGet(t, fmt.Sprintf("http://localhost:%d/api/topology/processes", appPort)))
assertAdjacent(t, topo["proc:node-1.2.3.4:apache"], "theinternet", "proc:node-192.168.1.1:wget", "proc:node-192.168.1.1:curl")
})
}
func parseTopology(t *testing.T, p []byte) map[string]report.Node {
var r struct {
Nodes map[string]report.Node `json:"nodes"`
}
if err := json.Unmarshal(p, &r); err != nil {
t.Fatalf("parseTopology: %s", err)
}
return r.Nodes
}
func parseEdge(t *testing.T, p []byte) map[string]interface{} {
var edge struct {
Metadata map[string]interface{} `json:"metadata"`
}
if err := json.Unmarshal(p, &edge); err != nil {
t.Fatalf("Err: %v", err)
}
return edge.Metadata
}
func assertAdjacent(t *testing.T, n report.Node, ids ...string) {
want := report.MakeIDList(ids...)
if have := n.Adjacency; !reflect.DeepEqual(want, have) {
t.Fatalf("want adjacency list %v, have %v", want, have)
}
}

View File

@@ -1,63 +0,0 @@
{
"Process": {
"Adjacency": {
"hostX|;1.2.3.4;80": [
";192.168.1.1;10747",
"theinternet"
],
"hostX|;192.168.1.1;10747": [
"theinternet"
]
},
"EdgeMetadatas": {},
"NodeMetadatas": {
";1.2.3.4;80": {
"domain": "node-1.2.3.4",
"name": "apache",
"pid": "4000"
},
";192.168.1.1;10747": {
"domain": "node-192.168.1.1",
"name": "curl",
"pid": "4001"
}
}
},
"Network": {
"Adjacency": {
"hostX|;1.2.3.4": [
";192.168.1.1",
"theinternet"
],
"hostX|;192.168.1.1": [
"theinternet",
";192.168.1.1"
]
},
"EdgeMetadatas": {},
"NodeMetadatas": {
";1.2.3.4": {
"name": "1.2.3.4"
},
";192.168.1.1": {
"name": "192.168.1.1"
}
}
},
"HostMetadatas": {
"hostX": {
"Timestamp": "2014-10-28T23:12:23.451728464Z",
"Hostname": "host-x",
"Addresses": null,
"LocalNets": [
{
"IP": "192.168.0.0",
"Mask": "//8AAA=="
}
],
"OS": "linux"
}
}
}

View File

@@ -1,71 +0,0 @@
{
"Process": {
"Adjacency": {
"hostX|;1.2.3.4;80": [
";192.168.1.1;10746",
"theinternet"
],
"hostX|;192.168.1.1;10746": [
"theinternet"
]
},
"EdgeMetadatas": {
";192.168.1.1;10746|theinternet": {
"WithConnCountTCP": true,
}
},
"NodeMetadatas": {
";1.2.3.4;80": {
"domain": "node-1.2.3.4",
"name": "apache",
"pid": "4000"
},
";192.168.1.1;10746": {
"domain": "node-192.168.1.1",
"name": "wget",
"pid": "4000"
}
}
},
"Network": {
"Adjacency": {
"hostX|;1.2.3.4": [
";192.168.1.1",
"theinternet"
],
"hostX|;192.168.1.1": [
"theinternet",
";192.168.1.1"
]
},
"EdgeMetadatas": {
";192.168.1.1|theinternet": {
"WithConnCountTCP": true,
}
},
"NodeMetadatas": {
";1.2.3.4": {
"name": "1_2_3_4"
},
";192.168.1.1": {
"name": "192_168_1_1"
}
}
},
"HostMetadatas": {
"hostX": {
"Timestamp": "2014-10-28T23:12:23.451728464Z",
"Hostname": "host-x",
"Addresses": null,
"LocalNets": [
{
"IP": "192.168.0.0",
"Mask": "//8AAA=="
}
],
"OS": "linux"
}
}
}

View File

@@ -1,158 +0,0 @@
#!/bin/bash
# This script has a bunch of GCE-related functions:
# ./gce.sh setup - starts two VMs on GCE and configures them to run our integration tests
# . ./gce.sh; ./run_all.sh - set a bunch of environment variables for the tests
# ./gce.sh destroy - tear down the VMs
# ./gce.sh make_template - make a fresh VM template; update TEMPLATE_NAME first!
set -ex
KEY_FILE=/tmp/gce_private_key.json
SSH_KEY_FILE=$HOME/.ssh/gce_ssh_key
PROJECT=${PROJECT:-positive-cocoa-90213}
IMAGE=ubuntu-14-04
ZONE=us-central1-a
NUM_HOSTS=2
# Setup authentication
gcloud auth activate-service-account --key-file $KEY_FILE
gcloud config set project $PROJECT
function vm_names {
local names=
for i in $(seq 1 $NUM_HOSTS); do
names="twilkie-test-$i $names"
done
echo "$names"
}
# Delete all vms in this account
function destroy {
names="$(vm_names)"
for i in {0..10}; do
# gcloud instances delete can sometimes hang.
case $(set +e; timeout 60s /bin/bash -c "gcloud compute instances delete --zone $ZONE -q $names >/dev/null 2>&1 || true"; echo $?) in
0)
return 0
;;
124)
# 124 means it timed out
break
;;
*)
return 1
esac
done
}
function external_ip {
gcloud compute instances list $1 --format=yaml | grep "^ natIP\:" | cut -d: -f2 | tr -d ' '
}
function internal_ip {
gcloud compute instances list $1 --format=yaml | grep "^ networkIP\:" | cut -d: -f2 | tr -d ' '
}
function try_connect {
for i in {0..10}; do
ssh -t $1 true && return
sleep 2
done
}
function install_on {
name=$1
otherpeers=$2
ssh -t $name sudo bash -x -s <<EOF
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9;
echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;
apt-get update -qq;
apt-get install -q -y --force-yes --no-install-recommends lxc-docker ethtool emacs23-nox git make binutils mercurial libpcap-dev gcc;
usermod -a -G docker vagrant;
echo 'DOCKER_OPTS="-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375"' >> /etc/default/docker;
service docker restart;
wget -q https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz;
tar -C /usr/local -xzf go1.4.2.linux-amd64.tar.gz;
/usr/local/go/bin/go clean -i net
/usr/local/go/bin/go install -tags netgo std
EOF
ssh -t $name bash -x -s <<EOF
echo "export PATH=$PATH:/usr/local/go/bin:~/bin" >>~/.profile
echo "export GOPATH=$HOME" >>~/.profile
. ~/.profile
EOF
ssh -t $name bash -x -s <<EOF
sudo curl -L git.io/weave -o /usr/local/bin/weave
sudo chmod a+x /usr/local/bin/weave
weave launch $otherpeers
EOF
}
# Create new set of VMS
function setup {
destroy
names="$(vm_names)"
gcloud compute instances create $names --image $IMAGE --zone $ZONE
gcloud compute config-ssh --ssh-key-file $SSH_KEY_FILE
for name in $names; do
hostname="$name.$ZONE.$PROJECT"
# Add the remote ip to the local /etc/hosts
sudo -- sh -c "echo \"$(external_ip $name) $hostname\" >>/etc/hosts"
try_connect $hostname
# Add the local ips to the remote /etc/hosts
otherpeers=""
for othername in $names; do
otherip="$(internal_ip $othername)"
otherpeers="$otherip $otherpeers"
entry="$otherip $othername.$ZONE.$PROJECT"
ssh -t "$hostname" "sudo -- sh -c \"echo \\\"$entry\\\" >>/etc/hosts\""
done
install_on $hostname "$otherpeers"
done
for name in $names; do
hostname="$name.$ZONE.$PROJECT"
ssh -t $hostname bash -x -s <<EOF
. ~/.profile
git clone http://github.com/weaveworks/scope.git
cd scope
git checkout master
make deps
make RUN_FLAGS=
./scope launch
EOF
done
}
function hosts {
hosts=
args=
for name in $(vm_names); do
hostname="$name.$ZONE.$PROJECT"
hosts="$hostname $hosts"
args="--add-host=$hostname:$(internal_ip $name) $args"
done
export SSH=ssh
export HOSTS="$hosts"
export WEAVE_DOCKER_ARGS="$args"
}
case "$1" in
setup)
setup
;;
hosts)
hosts
;;
destroy)
destroy
;;
esac

View File

@@ -1,117 +0,0 @@
package main
import (
"flag"
"fmt"
"log"
"math/rand"
"net"
"strconv"
"time"
"github.com/weaveworks/scope/common/xfer"
"github.com/weaveworks/scope/probe/appclient"
"github.com/weaveworks/scope/probe/process"
"github.com/weaveworks/scope/report"
)
func main() {
var (
publish = flag.String("publish", fmt.Sprintf("localhost:%d", xfer.AppPort), "publish target")
publishInterval = flag.Duration("publish.interval", 1*time.Second, "publish (output) interval")
hostCount = flag.Int("hostcount", 10, "Number of demo hosts to generate")
)
flag.Parse()
client, err := appclient.NewAppClient(appclient.ProbeConfig{
Token: "demoprobe",
ProbeID: "demoprobe",
Insecure: false,
}, *publish, *publish, nil)
if err != nil {
log.Fatal(err)
}
rp := appclient.NewReportPublisher(client, false)
rand.Seed(time.Now().UnixNano())
for range time.Tick(*publishInterval) {
if err := rp.Publish(demoReport(*hostCount)); err != nil {
log.Print(err)
}
}
}
func demoReport(nodeCount int) report.Report {
r := report.MakeReport()
// Make up some plausible IPv4 numbers.
hosts := []string{}
ip := [4]int{192, 168, 1, 1}
for range make([]struct{}, nodeCount) {
hosts = append(hosts, fmt.Sprintf("%d.%d.%d.%d", ip[0], ip[1], ip[2], ip[3]))
ip[3]++
if ip[3] > 200 {
ip[2]++
ip[3] = 1
}
}
hosts = append(hosts, []string{"1.2.3.4", "2.3.4.5"}...) // Some non-local ones, too.
_, localNet, err := net.ParseCIDR("192.168.0.0/16")
if err != nil {
panic(err)
}
type conn struct {
srcProc, dstProc string
dstPort int
}
procPool := []conn{
{srcProc: "curl", dstPort: 80, dstProc: "apache"},
{srcProc: "wget", dstPort: 80, dstProc: "apache"},
{srcProc: "curl", dstPort: 80, dstProc: "nginx"},
{srcProc: "curl", dstPort: 8080, dstProc: "app1"},
{srcProc: "nginx", dstPort: 8080, dstProc: "app1"},
{srcProc: "nginx", dstPort: 8080, dstProc: "app2"},
{srcProc: "nginx", dstPort: 8080, dstProc: "app3"},
}
connectionCount := nodeCount * 2
for i := 0; i < connectionCount; i++ {
var (
c = procPool[rand.Intn(len(procPool))]
src = hosts[rand.Intn(len(hosts))]
dst = hosts[rand.Intn(len(hosts))]
srcPort = rand.Intn(50000) + 10000
srcPortID = report.MakeEndpointNodeID("", src, strconv.Itoa(srcPort))
dstPortID = report.MakeEndpointNodeID("", dst, strconv.Itoa(c.dstPort))
)
// Endpoint topology
r.Endpoint = r.Endpoint.AddNode(report.MakeNodeWith(srcPortID, map[string]string{
process.PID: "4000",
"name": c.srcProc,
"domain": "node-" + src,
}).
WithEdge(dstPortID, report.EdgeMetadata{}))
r.Endpoint = r.Endpoint.AddNode(report.MakeNodeWith(dstPortID, map[string]string{
process.PID: "4000",
"name": c.dstProc,
"domain": "node-" + dst,
}).
WithEdge(srcPortID, report.EdgeMetadata{}))
// Host data
r.Host = r.Host.AddNode(report.MakeNodeWith("hostX", map[string]string{
"ts": time.Now().UTC().Format(time.RFC3339Nano),
"host_name": "host-x",
"local_networks": localNet.String(),
"os": "linux",
}))
}
return r
}
func newu64(value uint64) *uint64 { return &value }

View File

@@ -1,19 +0,0 @@
.PHONY: all vet lint build test clean
all: build test vet lint
vet:
go vet ./...
lint:
golint .
build:
go build
test:
go test
clean:
go clean

View File

@@ -1,206 +0,0 @@
package main
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"regexp"
"strconv"
"strings"
)
const (
perms = 0777
)
var (
lineMatcher = regexp.MustCompile(`^\s*[a-z\-]+\-(\d+)\s+\[(\d{3})] (?:\.|1){4} ([\d\.]+): (.*)$`)
enterMatcher = regexp.MustCompile(`^([\w_]+)\((.*)\)$`)
argMatcher = regexp.MustCompile(`(\w+): (\w+)`)
exitMatcher = regexp.MustCompile(`^([\w_]+) -> (\w+)$`)
)
// Ftrace is a tracer using ftrace...
type Ftrace struct {
ftraceRoot string
root string
outstanding map[int]*syscall // map from pid (readlly tid) to outstanding syscall
}
type syscall struct {
pid int
cpu int
ts float64
name string
args map[string]string
returnCode int64
}
func findDebugFS() (string, error) {
contents, err := ioutil.ReadFile("/proc/mounts")
if err != nil {
return "", err
}
scanner := bufio.NewScanner(bytes.NewBuffer(contents))
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
if len(fields) < 3 {
continue
}
if fields[2] == "debugfs" {
return fields[1], nil
}
}
if err := scanner.Err(); err != nil {
return "", err
}
return "", fmt.Errorf("Not found")
}
// NewFtracer constucts a new Ftrace instance.
func NewFtracer() (*Ftrace, error) {
root, err := findDebugFS()
if err != nil {
return nil, err
}
scopeRoot := path.Join(root, "tracing", "instances", "scope")
if err := os.Mkdir(scopeRoot, perms); err != nil && os.IsExist(err) {
if err := os.Remove(scopeRoot); err != nil {
return nil, err
}
if err := os.Mkdir(scopeRoot, perms); err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
return &Ftrace{
ftraceRoot: root,
root: scopeRoot,
outstanding: map[int]*syscall{},
}, nil
}
func (f *Ftrace) destroy() error {
return os.Remove(f.root)
}
func (f *Ftrace) enableTracing() error {
// need to enable tracing at root to get trace_pipe to block in my instance. Weird
if err := ioutil.WriteFile(path.Join(f.ftraceRoot, "tracing", "tracing_on"), []byte("1"), perms); err != nil {
return err
}
return ioutil.WriteFile(path.Join(f.root, "tracing_on"), []byte("1"), perms)
}
func (f *Ftrace) disableTracing() error {
if err := ioutil.WriteFile(path.Join(f.root, "tracing_on"), []byte("0"), perms); err != nil {
return err
}
return ioutil.WriteFile(path.Join(f.ftraceRoot, "tracing", "tracing_on"), []byte("1"), perms)
}
func (f *Ftrace) enableEvent(class, event string) error {
return ioutil.WriteFile(path.Join(f.root, "events", class, event, "enable"), []byte("1"), perms)
}
func mustAtoi(a string) int {
i, err := strconv.Atoi(a)
if err != nil {
panic(err)
}
return i
}
func mustAtof(a string) float64 {
i, err := strconv.ParseFloat(a, 64)
if err != nil {
panic(err)
}
return i
}
func (f *Ftrace) events(out chan<- *syscall) {
file, err := os.Open(path.Join(f.root, "trace_pipe"))
if err != nil {
panic(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
matches := lineMatcher.FindStringSubmatch(scanner.Text())
if matches == nil {
continue
}
pid := mustAtoi(matches[1])
log := matches[4]
if enterMatches := enterMatcher.FindStringSubmatch(log); enterMatches != nil {
name := enterMatches[1]
args := map[string]string{}
for _, arg := range argMatcher.FindAllStringSubmatch(enterMatches[2], -1) {
args[arg[1]] = arg[2]
}
s := &syscall{
pid: pid,
cpu: mustAtoi(matches[2]),
ts: mustAtof(matches[3]),
name: strings.TrimPrefix(name, "sys_"),
args: args,
}
f.outstanding[pid] = s
} else if exitMatches := exitMatcher.FindStringSubmatch(log); exitMatches != nil {
s, ok := f.outstanding[pid]
if !ok {
continue
}
delete(f.outstanding, pid)
returnCode, err := strconv.ParseUint(exitMatches[2], 0, 64)
if err != nil {
panic(err)
}
s.returnCode = int64(returnCode)
out <- s
} else {
fmt.Printf("Unmatched: %s", log)
}
}
if err := scanner.Err(); err != nil {
panic(err)
}
}
func (f *Ftrace) start() error {
for _, e := range []struct{ class, event string }{
{"syscalls", "sys_enter_socket"},
{"syscalls", "sys_exit_socket"},
{"syscalls", "sys_enter_connect"},
{"syscalls", "sys_exit_connect"},
{"syscalls", "sys_enter_accept"},
{"syscalls", "sys_exit_accept"},
{"syscalls", "sys_enter_accept4"},
{"syscalls", "sys_exit_accept4"},
{"syscalls", "sys_enter_close"},
{"syscalls", "sys_exit_close"},
} {
if err := f.enableEvent(e.class, e.event); err != nil {
return err
}
}
return f.enableTracing()
}
func (f *Ftrace) stop() error {
defer f.destroy()
return f.disableTracing()
}

View File

@@ -1,127 +0,0 @@
package main
import (
"fmt"
"strconv"
"time"
"github.com/bluele/gcache"
)
const cacheSize = 500
// On every connect and accept, we lookup the local addr
// As this is expensive, we cache the result
var fdAddrCache = gcache.New(cacheSize).LRU().Expiration(15 * time.Second).Build()
type fdCacheKey struct {
pid int
fd int
}
type fdCacheValue struct {
addr uint32
port uint16
}
func getCachedLocalAddr(pid, fd int) (uint32, uint16, error) {
key := fdCacheKey{pid, fd}
val, err := fdAddrCache.Get(key)
if val != nil {
return val.(fdCacheValue).addr, val.(fdCacheValue).port, nil
}
addr, port, err := getLocalAddr(pid, fd)
if err != nil {
return 0, 0, err
}
fdAddrCache.Set(key, fdCacheValue{addr, port})
return addr, port, nil
}
// On every connect or accept, we cache the syscall that caused
// it for matching with a connection from conntrack
var syscallCache = gcache.New(cacheSize).LRU().Expiration(15 * time.Second).Build()
type syscallCacheKey struct {
localAddr uint32
localPort uint16
}
type syscallCacheValue *syscall
// One ever conntrack connection, we cache it by local addr, port to match with
// a future syscall
var conntrackCache = gcache.New(cacheSize).LRU().Expiration(15 * time.Second).Build()
type conntrackCacheKey syscallCacheKey
// And keep a list of successfully matched connection, for us to close out
// when we get the close syscall
func main() {
ftrace, err := NewFtracer()
if err != nil {
panic(err)
}
ftrace.start()
defer ftrace.stop()
syscalls := make(chan *syscall, 100)
go ftrace.events(syscalls)
onSocket := func(s *syscall) {
}
onConnection := func(s *syscall) {
if s.returnCode != 0 {
return
}
fdStr, ok := s.args["fd"]
if !ok {
panic("no pid")
}
fd64, err := strconv.ParseInt(fdStr, 32, 16)
if err != nil {
panic(err)
}
fd := int(fd64)
addr, port, err := getCachedLocalAddr(s.pid, fd)
if err != nil {
fmt.Printf("Failed to get local addr for pid=%d fd=%d: %v\n", s.pid, fd, err)
return
}
fmt.Printf("%+v %d %d\n", s, addr, port)
syscallCache.Set(syscallCacheKey{addr, port}, s)
}
onAccept := func(s *syscall) {
}
onClose := func(s *syscall) {
}
fmt.Println("Started")
for {
select {
case s := <-syscalls:
switch s.name {
case "socket":
onSocket(s)
case "connect":
onConnection(s)
case "accept", "accept4":
onAccept(s)
case "close":
onClose(s)
}
}
}
}

View File

@@ -1,94 +0,0 @@
package main
import (
"bufio"
"fmt"
"os"
"regexp"
"strconv"
)
const (
socketPattern = `^socket:\[(\d+)\]$`
tcpPattern = `^\s*(?P<fd>\d+): (?P<localaddr>[A-F0-9]{8}):(?P<localport>[A-F0-9]{4}) ` +
`(?P<remoteaddr>[A-F0-9]{8}):(?P<remoteport>[A-F0-9]{4}) (?:[A-F0-9]{2}) (?:[A-F0-9]{8}):(?:[A-F0-9]{8}) ` +
`(?:[A-F0-9]{2}):(?:[A-F0-9]{8}) (?:[A-F0-9]{8}) \s+(?:\d+) \s+(?:\d+) (?P<inode>\d+)`
)
var (
socketRegex = regexp.MustCompile(socketPattern)
tcpRegexp = regexp.MustCompile(tcpPattern)
)
func getLocalAddr(pid, fd int) (addr uint32, port uint16, err error) {
var (
socket string
match []string
inode int
tcpFile *os.File
scanner *bufio.Scanner
candidate int
port64 int64
addr64 int64
)
socket, err = os.Readlink(fmt.Sprintf("/proc/%d/fd/%d", pid, fd))
if err != nil {
return
}
match = socketRegex.FindStringSubmatch(socket)
if match == nil {
err = fmt.Errorf("Fd %d not a socket", fd)
return
}
inode, err = strconv.Atoi(match[1])
if err != nil {
return
}
tcpFile, err = os.Open(fmt.Sprintf("/proc/%d/net/tcp", pid))
if err != nil {
return
}
defer tcpFile.Close()
scanner = bufio.NewScanner(tcpFile)
for scanner.Scan() {
match = tcpRegexp.FindStringSubmatch(scanner.Text())
if match == nil {
continue
}
candidate, err = strconv.Atoi(match[6])
if err != nil {
return
}
if candidate != inode {
continue
}
addr64, err = strconv.ParseInt(match[2], 16, 32)
if err != nil {
return
}
addr = uint32(addr64)
// use a 32 bit int for target, at the result is a uint16
port64, err = strconv.ParseInt(match[3], 16, 32)
if err != nil {
return
}
port = uint16(port64)
return
}
if err = scanner.Err(); err != nil {
return
}
err = fmt.Errorf("Fd %d not found for proc %d", fd, pid)
return
}

View File

@@ -1,19 +0,0 @@
.PHONY: all vet lint build test clean
all: build test vet lint
vet:
go vet ./...
lint:
golint .
build:
go build
test:
go test
clean:
go clean

View File

@@ -1,92 +0,0 @@
package main
import (
"fmt"
"math/rand"
"net"
"strconv"
"time"
"github.com/weaveworks/scope/report"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// DemoReport makes up a report.
func DemoReport(nodeCount int) report.Report {
r := report.MakeReport()
// Make up some plausible IPv4 numbers
hosts := []string{}
ip := [4]int{192, 168, 1, 1}
for range make([]struct{}, nodeCount) {
hosts = append(hosts, fmt.Sprintf("%d.%d.%d.%d", ip[0], ip[1], ip[2], ip[3]))
ip[3]++
if ip[3] > 200 {
ip[2]++
ip[3] = 1
}
}
// Some non-local ones.
hosts = append(hosts, []string{"1.2.3.4", "2.3.4.5"}...)
_, localNet, err := net.ParseCIDR("192.168.0.0/16")
if err != nil {
panic(err)
}
type conn struct {
srcProc, dstProc string
dstPort int
}
procPool := []conn{
{srcProc: "curl", dstPort: 80, dstProc: "apache"},
{srcProc: "wget", dstPort: 80, dstProc: "apache"},
{srcProc: "curl", dstPort: 80, dstProc: "nginx"},
{srcProc: "curl", dstPort: 8080, dstProc: "app1"},
{srcProc: "nginx", dstPort: 8080, dstProc: "app1"},
{srcProc: "nginx", dstPort: 8080, dstProc: "app2"},
{srcProc: "nginx", dstPort: 8080, dstProc: "app3"},
}
connectionCount := nodeCount * 8
for i := 0; i < connectionCount; i++ {
var (
c = procPool[rand.Intn(len(procPool))]
src = hosts[rand.Intn(len(hosts))]
dst = hosts[rand.Intn(len(hosts))]
srcPort = rand.Intn(50000) + 10000
srcPortID = report.MakeEndpointNodeID("", src, strconv.Itoa(srcPort))
dstPortID = report.MakeEndpointNodeID("", dst, strconv.Itoa(c.dstPort))
)
// Endpoint topology
r.Endpoint = r.Endpoint.AddNode(report.MakeNodeWith(srcPortID, map[string]string{
"pid": "4000",
"name": c.srcProc,
"domain": "node-" + src,
}).
WithEdge(dstPortID, report.EdgeMetadata{}))
r.Endpoint = r.Endpoint.AddNode(report.MakeNodeWith(dstPortID, map[string]string{
"pid": "4000",
"name": c.dstProc,
"domain": "node-" + dst,
}).
WithEdge(srcPortID, report.EdgeMetadata{}))
// Host data
r.Host = r.Host.AddNode(report.MakeNodeWith("hostX", map[string]string{
"ts": time.Now().UTC().Format(time.RFC3339Nano),
"host_name": "host-x",
"local_networks": localNet.String(),
"os": "linux",
}))
}
return r
}
func newu64(value uint64) *uint64 { return &value }

View File

@@ -1,16 +0,0 @@
package main
import (
"encoding/json"
"flag"
"log"
"os"
)
func main() {
nodes := flag.Int("nodes", 10, "node count")
flag.Parse()
if err := json.NewEncoder(os.Stdout).Encode(DemoReport(*nodes)); err != nil {
log.Print(err)
}
}

View File

@@ -1,122 +0,0 @@
package main
import (
"bytes"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"github.com/weaveworks/scope/render/detailed"
"github.com/weaveworks/scope/report"
)
func dot(w io.Writer, t detailed.NodeSummaries) {
fmt.Fprintf(w, "digraph G {\n")
fmt.Fprintf(w, "\toutputorder=edgesfirst;\n")
fmt.Fprintf(w, "\toverlap=scale;\n")
fmt.Fprintf(w, "\tnode [style=filled];\n")
fmt.Fprintf(w, "\t\n")
for id, rn := range t {
label := rn.Label
if len(label) > 20 {
label = label[:20] + "..."
}
fmt.Fprintf(w, "\t%q [label=%q];\n", id, label)
for _, other := range rn.Adjacency {
fmt.Fprintf(w, "\t%q -> %q;\n", id, other)
}
fmt.Fprintf(w, "\t\n")
}
fmt.Fprintf(w, "}\n")
}
func handleHTML(rpt report.Report) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
logRequest("HTML", r)
format := `<html><head></head><body><center><img src="/svg?%s" style="margin:5%;"/></center></body></html>`
fmt.Fprintf(w, format, r.URL.RawQuery)
}
}
func handleDot(rpt report.Report) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
logRequest("Dot", r)
r.ParseForm()
var (
engine = getDefault(r.Form, "engine", "twopi")
topology = getDefault(r.Form, "topology", "containers")
)
log.Printf("engine=%s topology=%s", engine, topology)
t, err := renderTo(rpt, topology)
if err != nil {
log.Print(err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
log.Printf("render %s to %d node(s)", topology, len(t))
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
dot(w, t)
}
}
func handleSVG(rpt report.Report) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
logRequest("SVG", r)
r.ParseForm()
var (
engine = getDefault(r.Form, "engine", "twopi")
topology = getDefault(r.Form, "topology", "containers")
)
log.Printf("engine=%s topology=%s", engine, topology)
t, err := renderTo(rpt, topology)
if err != nil {
log.Print(err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
log.Printf("render %s to %d node(s)", topology, len(t))
vizcmd, err := exec.LookPath(engine)
if err != nil {
log.Print(err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
var buf bytes.Buffer
dot(&buf, t)
w.Header().Set("Content-Type", "image/svg+xml")
if err := (&exec.Cmd{
Path: vizcmd,
Args: []string{vizcmd, "-Tsvg"},
Stdin: &buf,
Stdout: w,
Stderr: os.Stderr,
}).Run(); err != nil {
log.Print(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
func logRequest(what string, r *http.Request) {
log.Printf("> %s %s %s %s", what, r.RemoteAddr, r.Method, r.URL.String())
}
func getDefault(v url.Values, key, def string) string {
if val := v.Get(key); val != "" {
return val
}
return def
}

File diff suppressed because one or more lines are too long

View File

@@ -1,31 +0,0 @@
package main
import (
"encoding/json"
"flag"
"log"
"net/http"
"os"
"github.com/weaveworks/scope/report"
)
func main() {
var (
listen = flag.String("listen", ":8080", "HTTP listen address")
)
flag.Parse()
log.Printf("reading /api/report from stdin...")
var rpt report.Report
if err := json.NewDecoder(os.Stdin).Decode(&rpt); err != nil {
log.Fatal(err)
}
http.HandleFunc("/", handleHTML(rpt))
http.HandleFunc("/dot", handleDot(rpt))
http.HandleFunc("/svg", handleSVG(rpt))
http.HandleFunc("/favicon.ico", func(w http.ResponseWriter, _ *http.Request) { http.Error(w, "Stop it", http.StatusTeapot) })
log.Printf("listening on %s", *listen)
http.ListenAndServe(*listen, nil)
}

View File

@@ -1,23 +0,0 @@
package main
import (
"fmt"
"github.com/weaveworks/scope/render"
"github.com/weaveworks/scope/render/detailed"
"github.com/weaveworks/scope/report"
)
func renderTo(rpt report.Report, topology string) (detailed.NodeSummaries, error) {
renderer, ok := map[string]render.Renderer{
"processes": render.FilterUnconnected(render.ProcessWithContainerNameRenderer),
"processes-by-name": render.FilterUnconnected(render.ProcessNameRenderer),
"containers": render.ContainerWithImageNameRenderer,
"containers-by-image": render.ContainerImageRenderer,
"hosts": render.HostRenderer,
}[topology]
if !ok {
return detailed.NodeSummaries{}, fmt.Errorf("unknown topology %v", topology)
}
return detailed.Summaries(rpt, renderer.Render(rpt, nil)), nil
}

View File

@@ -1,20 +0,0 @@
#!/bin/bash
export SSH_AUTH_SOCK=
remote() {
host=$1
shift
echo "[$host] Running: $@" >&2
gcloud compute ssh $host --command "$@"
}
for h in $(gcloud compute instances list | grep -v NAME | awk '{print $1}') ; do
cat ~/work/weave/repos/scope/scope.tar | remote $h "sudo docker load"
cat ~/work/weave/repos/scope/scope | remote $h "sudo tee /usr/local/bin/scope >/dev/null; sudo chmod a+x /usr/local/bin/scope"
if $(echo $h | grep -q "master") ; then
remote $h "sudo scope stop ; sudo DOCKER_BRIDGE=cbr0 scope launch --probe.docker.bridge cbr0 --probe.kubernetes true"
else
remote $h "sudo scope stop ; sudo DOCKER_BRIDGE=cbr0 scope launch --no-app --probe.docker.bridge cbr0 kubernetes-master"
fi
done

View File

@@ -1,4 +0,0 @@
collection/collection
query/query
control/control
static/static

View File

@@ -1,11 +0,0 @@
BUILD_IN_CONTAINER=true
all: .frontend.uptodate
.frontend.uptodate: frontend/*
docker build -t weaveworks/scope-frontend frontend/
touch $@
clean:
go clean ./..
rm -f .*.uptodate

View File

@@ -1,12 +0,0 @@
FROM ubuntu
MAINTAINER Weaveworks Inc <help@weave.works>
RUN apt-get update && \
apt-get install -y nginx && \
rm -rf /var/lib/apt/lists/*
RUN rm /etc/nginx/sites-available/default && \
ln -sf /dev/stdout /var/log/nginx/access.log && \
ln -sf /dev/stderr /var/log/nginx/error.log
COPY default.conf /etc/nginx/conf.d/
COPY api.json /home/weave/
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]

View File

@@ -1 +0,0 @@
{"id": "sccopeservice", "version": "Multimagic"}

View File

@@ -1,51 +0,0 @@
server {
# Listen on port 80 insecurely until the probes all support https
listen 80;
resolver dns.weave.local;
# topology (from UI) websockets
location ~ ^/api/topology/[^/]+/ws {
proxy_pass http://query.weave.local$request_uri;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# control (from probe) websockets
location = /api/control/ws {
proxy_pass http://controls.weave.local$request_uri;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
# control (from probe & UI) websockets
location ~ ^/api/pipe/[^/]+(/probe)? {
proxy_pass http://pipes.weave.local$request_uri;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
location = /api/report {
proxy_pass http://collection.weave.local$request_uri;
}
location /api/topology {
proxy_pass http://query.weave.local$request_uri;
}
location /api/control {
proxy_pass http://controls.weave.local$request_uri;
}
# Static version file
location = /api {
alias /home/weave/api.json;
}
# The rest will be served by the collection server (any one can do it)
location / {
proxy_pass http://collection.weave.local$request_uri;
}
}

View File

@@ -1,58 +0,0 @@
#!/bin/bash
set -eux
eval $(weave env)
CHECKPOINT_DISABLE=true
start_container() {
local replicas=$1
local image=$2
local basename=$3
shift 3
local hostname=${basename}.weave.local
local docker_args=
while [ "$#" -gt 0 ]; do
case "$1" in
--)
shift
break
;;
*)
docker_args="${docker_args} $1"
shift
;;
esac
done
local container_args="$@"
for i in $(seq ${replicas}); do
if docker inspect ${basename}${i} >/dev/null 2>&1; then
docker rm -f ${basename}${i}
fi
docker run -d -e CHECKPOINT_DISABLE --name=${basename}${i} --hostname=${hostname} \
${docker_args} ${image} ${container_args}
done
}
# These are the infrastructure bits - do not use these containers in production
start_container 1 deangiberson/aws-dynamodb-local dynamodb
start_container 1 pakohan/elasticmq sqs
start_container 1 progrium/consul consul -p 8400:8400 -p 8500:8500 -p 8600:53/udp -- -server -bootstrap -ui-dir /ui
# These are the micro services
common_args="--no-probe --app.weave.addr= --app.http.address=:80"
start_container 2 weaveworks/scope collection -- ${common_args} \
--app.collector=dynamodb://abc:123@dynamodb.weave.local:8000 \
--app.aws.create.tables=true
start_container 2 weaveworks/scope query -- ${common_args} \
--app.collector=dynamodb://abc:123@dynamodb.weave.local:8000
start_container 2 weaveworks/scope controls -- ${common_args} \
--app.control.router=sqs://abc:123@sqs.weave.local:9324
start_container 2 weaveworks/scope pipes -- ${common_args} \
--app.pipe.router=consul://consul.weave.local:8500/pipes/ \
--app.consul.inf=ethwe
# And we bring it all together with a reverse proxy
start_container 1 weaveworks/scope-frontend frontend --add-host=dns.weave.local:$(weave docker-bridge-ip) --publish=4040:80

View File

@@ -1 +0,0 @@
pipeclient

View File

@@ -1,77 +0,0 @@
package main
import (
"log"
"os"
"github.com/gorilla/websocket"
//"golang.org/x/crypto/ssh/terminal"
"github.com/davecgh/go-spew/spew"
)
func main() {
if len(os.Args) < 2 {
log.Fatal("must specify url")
}
url := os.Args[1]
log.Printf("Connecting to %s", url)
//oldState, err := terminal.MakeRaw(0)
//if err != nil {
// panic(err)
//}
//defer terminal.Restore(0, oldState)
dialer := websocket.Dialer{}
conn, _, err := dialer.Dial(url, nil)
if err != nil {
panic(err)
}
defer conn.Close()
readQuit := make(chan struct{})
writeQuit := make(chan struct{})
// Read-from-UI loop
go func() {
defer close(readQuit)
for {
_, buf, err := conn.ReadMessage() // TODO type should be binary message
if err != nil {
log.Printf("Error reading websocket: %v", err)
return
}
spew.Dump(buf)
if _, err := os.Stdout.Write(buf); err != nil {
log.Printf("Error writing stdout: %v", err)
return
}
}
}()
// Write-to-UI loop
go func() {
defer close(writeQuit)
buf := make([]byte, 1024)
for {
n, err := os.Stdin.Read(buf)
if err != nil {
log.Printf("Error reading stdin: %v", err)
return
}
if err := conn.WriteMessage(websocket.BinaryMessage, buf[:n]); err != nil {
log.Printf("Error writing websocket: %v", err)
return
}
}
}()
// block until one of the goroutines exits
// this convoluted mechanism is to ensure we only close the websocket once.
select {
case <-readQuit:
case <-writeQuit:
}
}

File diff suppressed because one or more lines are too long

View File

@@ -1,332 +0,0 @@
package sniff
import (
"io"
"log"
"net"
"strconv"
"sync/atomic"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/weaveworks/scope/report"
)
/*
It's important to adjust GOMAXPROCS when using sniffer:
if *captureEnabled {
var sniffers int
for _, iface := range strings.Split(*captureInterfaces, ",") {
source, err := sniff.NewSource(iface)
if err != nil {
log.Printf("warning: %v", err)
continue
}
defer source.Close()
log.Printf("capturing packets on %s", iface)
reporters = append(reporters, sniff.New(hostID, localNets, source, *captureOn, *captureOff))
sniffers++
}
// Packet capture can block OS threads on Linux, so we need to provide
// sufficient overhead in GOMAXPROCS.
if have, want := runtime.GOMAXPROCS(-1), (sniffers + 1); have < want {
runtime.GOMAXPROCS(want)
}
}
func interfaces() string {
ifaces, err := net.Interfaces()
if err != nil {
log.Print(err)
return ""
}
a := make([]string, 0, len(ifaces))
for _, iface := range ifaces {
a = append(a, iface.Name)
}
return strings.Join(a, ",")
}
Also, the capture on/off sampling methodology is probably not worth keeping.
*/
// Sniffer is a packet-sniffing reporter.
type Sniffer struct {
hostID string
localNets report.Networks
reports chan chan report.Report
parser *gopacket.DecodingLayerParser
decoded []gopacket.LayerType
eth layers.Ethernet
ip4 layers.IPv4
ip6 layers.IPv6
tcp layers.TCP
udp layers.UDP
icmp4 layers.ICMPv4
icmp6 layers.ICMPv6
}
// New returns a new sniffing reporter that samples traffic by turning its
// packet capture facilities on and off. Note that the on and off durations
// represent a way to bound CPU burn. Effective sample rate needs to be
// calculated as (packets decoded / packets observed).
func New(hostID string, localNets report.Networks, src gopacket.ZeroCopyPacketDataSource, on, off time.Duration) *Sniffer {
s := &Sniffer{
hostID: hostID,
localNets: localNets,
reports: make(chan chan report.Report),
}
s.parser = gopacket.NewDecodingLayerParser(
layers.LayerTypeEthernet,
&s.eth, &s.ip4, &s.ip6, &s.tcp, &s.udp, &s.icmp4, &s.icmp6,
)
go s.loop(src, on, off)
return s
}
// Report implements the Reporter interface.
func (s *Sniffer) Report() (report.Report, error) {
c := make(chan report.Report)
s.reports <- c
return <-c, nil
}
func (s *Sniffer) loop(src gopacket.ZeroCopyPacketDataSource, on, off time.Duration) {
var (
process = uint64(1) // initially enabled
total = uint64(0) // total packets seen
count = uint64(0) // count of packets captured
packets = make(chan Packet, 1024) // decoded packets
rpt = report.MakeReport() // the report we build
turnOn = (<-chan time.Time)(nil) // signal to start capture (initially enabled)
turnOff = time.After(on) // signal to stop capture
done = make(chan struct{}) // when src is finished, we're done too
)
// As a special case, if our off duty cycle is zero, i.e. 100% sample
// rate, we simply disable the turn-off signal channel.
if off == 0 {
turnOff = nil
}
go func() {
s.read(src, packets, &process, &total, &count)
close(done)
}()
for {
select {
case p := <-packets:
s.Merge(p, &rpt)
case <-turnOn:
atomic.StoreUint64(&process, 1) // enable packet capture
turnOn = nil // disable the on switch
turnOff = time.After(on) // enable the off switch
case <-turnOff:
atomic.StoreUint64(&process, 0) // disable packet capture
turnOn = time.After(off) // enable the on switch
turnOff = nil // disable the off switch
case c := <-s.reports:
rpt.Sampling.Count = atomic.LoadUint64(&count)
rpt.Sampling.Total = atomic.LoadUint64(&total)
interpolateCounts(rpt)
c <- rpt
atomic.StoreUint64(&count, 0)
atomic.StoreUint64(&total, 0)
rpt = report.MakeReport()
case <-done:
return
}
}
}
// interpolateCounts compensates for sampling by artificially inflating counts
// throughout the report. It should be run once for each report, within the
// probe, before it gets emitted into the rest of the system.
func interpolateCounts(r report.Report) {
rate := r.Sampling.Rate()
if rate >= 1.0 {
return
}
factor := 1.0 / rate
for _, topology := range r.Topologies() {
for _, nmd := range topology.Nodes {
nmd.Edges.ForEach(func(_ string, emd report.EdgeMetadata) {
if emd.EgressPacketCount != nil {
*emd.EgressPacketCount = uint64(float64(*emd.EgressPacketCount) * factor)
}
if emd.IngressPacketCount != nil {
*emd.IngressPacketCount = uint64(float64(*emd.IngressPacketCount) * factor)
}
if emd.EgressByteCount != nil {
*emd.EgressByteCount = uint64(float64(*emd.EgressByteCount) * factor)
}
if emd.IngressByteCount != nil {
*emd.IngressByteCount = uint64(float64(*emd.IngressByteCount) * factor)
}
})
}
}
}
// Packet is an intermediate, decoded form of a packet, with the information
// that the Scope data model cares about. Designed to decouple the packet data
// source loop, which should be as fast as possible, and the process of
// merging the packet information to a report, which may take some time and
// allocations.
type Packet struct {
SrcIP, DstIP string
SrcPort, DstPort string
Network, Transport int // byte counts
}
func (s *Sniffer) read(src gopacket.ZeroCopyPacketDataSource, dst chan Packet, process, total, count *uint64) {
var (
data []byte
err error
)
for {
data, _, err = src.ZeroCopyReadPacketData()
if err == io.EOF {
return // done
}
if err != nil {
log.Printf("sniffer: read: %v", err)
continue
}
atomic.AddUint64(total, 1)
if atomic.LoadUint64(process) == 0 {
continue
}
if err := s.parser.DecodeLayers(data, &s.decoded); err != nil {
// We'll always get an error when we encounter a layer type for
// which we haven't configured a decoder.
}
var p Packet
for _, t := range s.decoded {
switch t {
case layers.LayerTypeEthernet:
//
case layers.LayerTypeICMPv4:
p.Network += len(s.icmp4.Payload)
case layers.LayerTypeICMPv6:
p.Network += len(s.icmp6.Payload)
case layers.LayerTypeIPv4:
p.SrcIP = s.ip4.SrcIP.String()
p.DstIP = s.ip4.DstIP.String()
p.Network += len(s.ip4.Payload)
case layers.LayerTypeIPv6:
p.SrcIP = s.ip6.SrcIP.String()
p.DstIP = s.ip6.DstIP.String()
p.Network += len(s.ip6.Payload)
case layers.LayerTypeTCP:
p.SrcPort = strconv.Itoa(int(s.tcp.SrcPort))
p.DstPort = strconv.Itoa(int(s.tcp.DstPort))
p.Transport += len(s.tcp.Payload)
case layers.LayerTypeUDP:
p.SrcPort = strconv.Itoa(int(s.udp.SrcPort))
p.DstPort = strconv.Itoa(int(s.udp.DstPort))
p.Transport += len(s.udp.Payload)
}
}
select {
case dst <- p:
atomic.AddUint64(count, 1)
default:
log.Printf("sniffer dropped packet")
}
}
}
// Merge puts the packet into the report.
//
// Note that, for the moment, we encode bidirectional traffic as ingress and
// egress traffic on a single edge whose src is local and dst is remote. That
// is, if we see a packet from the remote addr 9.8.7.6 to the local addr
// 1.2.3.4, we apply it as *ingress* on the edge (1.2.3.4 -> 9.8.7.6).
func (s *Sniffer) Merge(p Packet, rpt *report.Report) {
if p.SrcIP == "" || p.DstIP == "" {
return
}
// One end of the traffic has to be local. Otherwise, we don't know how to
// construct the edge.
//
// If we need to get around this limitation, we may be able to change the
// semantics of the report, and allow the src side of edges to be from
// anywhere. But that will have ramifications throughout Scope (read: it
// may violate implicit invariants) and needs to be thought through.
var (
srcLocal = s.localNets.Contains(net.ParseIP(p.SrcIP))
dstLocal = s.localNets.Contains(net.ParseIP(p.DstIP))
localIP string
remoteIP string
localPort string
remotePort string
egress bool
)
switch {
case srcLocal && !dstLocal:
localIP, localPort, remoteIP, remotePort, egress = p.SrcIP, p.SrcPort, p.DstIP, p.DstPort, true
case !srcLocal && dstLocal:
localIP, localPort, remoteIP, remotePort, egress = p.DstIP, p.DstPort, p.SrcIP, p.SrcPort, false
case srcLocal && dstLocal:
localIP, localPort, remoteIP, remotePort, egress = p.SrcIP, p.SrcPort, p.DstIP, p.DstPort, true // loopback
case !srcLocal && !dstLocal:
log.Printf("sniffer ignoring remote-to-remote (%s -> %s) traffic", p.SrcIP, p.DstIP)
return
}
addAdjacency := func(t report.Topology, srcNodeID, dstNodeID string) report.Topology {
result := t.AddNode(report.MakeNode(srcNodeID).WithAdjacent(dstNodeID))
result = result.AddNode(report.MakeNode(dstNodeID))
return result
}
// If we have ports, we can add to the endpoint topology, too.
if p.SrcPort != "" && p.DstPort != "" {
var (
srcNodeID = report.MakeEndpointNodeID(s.hostID, localIP, localPort)
dstNodeID = report.MakeEndpointNodeID(s.hostID, remoteIP, remotePort)
)
rpt.Endpoint = addAdjacency(rpt.Endpoint, srcNodeID, dstNodeID)
node := rpt.Endpoint.Nodes[srcNodeID]
emd, _ := node.Edges.Lookup(dstNodeID)
if egress {
if emd.EgressPacketCount == nil {
emd.EgressPacketCount = new(uint64)
}
*emd.EgressPacketCount++
if emd.EgressByteCount == nil {
emd.EgressByteCount = new(uint64)
}
*emd.EgressByteCount += uint64(p.Transport)
} else {
if emd.IngressPacketCount == nil {
emd.IngressPacketCount = new(uint64)
}
*emd.IngressPacketCount++
if emd.IngressByteCount == nil {
emd.IngressByteCount = new(uint64)
}
*emd.IngressByteCount += uint64(p.Transport)
}
rpt.Endpoint.Nodes[srcNodeID] = node.WithEdge(dstNodeID, emd)
}
}

View File

@@ -1,52 +0,0 @@
package sniff
import (
"testing"
"github.com/weaveworks/scope/report"
)
func TestInterpolateCounts(t *testing.T) {
var (
hostID = "macbook-air"
srcNodeID = report.MakeEndpointNodeID(hostID, "1.2.3.4", "5678")
dstNodeID = report.MakeEndpointNodeID(hostID, "5.6.7.8", "9012")
samplingCount = uint64(200)
samplingTotal = uint64(2345)
packetCount = uint64(123)
byteCount = uint64(4096)
)
r := report.MakeReport()
r.Sampling.Count = samplingCount
r.Sampling.Total = samplingTotal
r.Endpoint.AddNode(report.MakeNode(srcNodeID).WithEdge(dstNodeID, report.EdgeMetadata{
EgressPacketCount: newu64(packetCount),
IngressPacketCount: newu64(packetCount),
EgressByteCount: newu64(byteCount),
IngressByteCount: newu64(byteCount),
}))
interpolateCounts(r)
var (
rate = float64(samplingCount) / float64(samplingTotal)
factor = 1.0 / rate
apply = func(v uint64) uint64 { return uint64(factor * float64(v)) }
emd = r.Endpoint.Nodes[srcNodeID].Edges[dstNodeID]
)
if want, have := apply(packetCount), (*emd.EgressPacketCount); want != have {
t.Errorf("want %d packets, have %d", want, have)
}
if want, have := apply(packetCount), (*emd.IngressPacketCount); want != have {
t.Errorf("want %d packets, have %d", want, have)
}
if want, have := apply(byteCount), (*emd.EgressByteCount); want != have {
t.Errorf("want %d bytes, have %d", want, have)
}
if want, have := apply(byteCount), (*emd.IngressByteCount); want != have {
t.Errorf("want %d bytes, have %d", want, have)
}
}
func newu64(value uint64) *uint64 { return &value }

View File

@@ -1,125 +0,0 @@
package sniff_test
import (
"io"
"net"
"reflect"
"sync"
"testing"
"time"
"github.com/google/gopacket"
"github.com/weaveworks/scope/experimental/sniff"
"github.com/weaveworks/scope/report"
"github.com/weaveworks/scope/test"
)
func TestSnifferShutdown(t *testing.T) {
var (
hostID = "abcd"
src = newMockSource([]byte{}, nil)
on = time.Millisecond
off = time.Millisecond
s = sniff.New(hostID, report.Networks{}, src, on, off)
)
// Stopping the source should terminate the sniffer.
src.Close()
time.Sleep(10 * time.Millisecond)
// Try to get a report from the sniffer. It should block forever, as the
// loop goroutine should have exited.
report := make(chan struct{})
go func() { _, _ = s.Report(); close(report) }()
select {
case <-time.After(time.Millisecond):
case <-report:
t.Errorf("shouldn't get report after Close")
}
}
func TestMerge(t *testing.T) {
var (
hostID = "xyz"
src = newMockSource([]byte{}, nil)
on = time.Millisecond
off = time.Millisecond
rpt = report.MakeReport()
p = sniff.Packet{
SrcIP: "1.0.0.0",
SrcPort: "1000",
DstIP: "2.0.0.0",
DstPort: "2000",
Network: 512,
Transport: 256,
}
_, ipnet, _ = net.ParseCIDR(p.SrcIP + "/24") // ;)
localNets = report.Networks([]*net.IPNet{ipnet})
)
sniff.New(hostID, localNets, src, on, off).Merge(p, &rpt)
var (
srcEndpointNodeID = report.MakeEndpointNodeID(hostID, p.SrcIP, p.SrcPort)
dstEndpointNodeID = report.MakeEndpointNodeID(hostID, p.DstIP, p.DstPort)
)
if want, have := (report.Topology{
Nodes: report.Nodes{
srcEndpointNodeID: report.MakeNode(srcEndpointNodeID).WithEdge(dstEndpointNodeID, report.EdgeMetadata{
EgressPacketCount: newu64(1),
EgressByteCount: newu64(256),
}),
dstEndpointNodeID: report.MakeNode(dstEndpointNodeID),
},
}), rpt.Endpoint; !reflect.DeepEqual(want, have) {
t.Errorf("%s", test.Diff(want, have))
}
var (
srcAddressNodeID = report.MakeAddressNodeID(hostID, p.SrcIP)
dstAddressNodeID = report.MakeAddressNodeID(hostID, p.DstIP)
)
if want, have := (report.Topology{
Nodes: report.Nodes{
srcAddressNodeID: report.MakeNode(srcAddressNodeID).WithEdge(dstAddressNodeID, report.EdgeMetadata{
EgressPacketCount: newu64(1),
EgressByteCount: newu64(512),
}),
dstAddressNodeID: report.MakeNode(dstAddressNodeID),
},
}), rpt.Address; !reflect.DeepEqual(want, have) {
t.Errorf("%s", test.Diff(want, have))
}
}
type mockSource struct {
mtx sync.RWMutex
data []byte
err error
}
func newMockSource(data []byte, err error) *mockSource {
return &mockSource{
data: data,
err: err,
}
}
func (s *mockSource) ZeroCopyReadPacketData() ([]byte, gopacket.CaptureInfo, error) {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.data, gopacket.CaptureInfo{
Timestamp: time.Now(),
CaptureLength: len(s.data),
Length: len(s.data),
}, s.err
}
func (s *mockSource) Close() {
s.mtx.Lock()
defer s.mtx.Unlock()
s.err = io.EOF
}
func newu64(value uint64) *uint64 { return &value }

View File

@@ -1,24 +0,0 @@
package sniff
import (
"github.com/google/gopacket"
"github.com/google/gopacket/pcap"
)
// Source describes a packet data source that can be terminated.
type Source interface {
gopacket.ZeroCopyPacketDataSource
Close()
}
const (
snaplen = 65535
promisc = true
timeout = pcap.BlockForever
)
// NewSource returns a live packet data source via the passed device
// (interface).
func NewSource(device string) (Source, error) {
return pcap.OpenLive(device, snaplen, promisc, timeout)
}

View File

@@ -1,91 +0,0 @@
package main
import (
"flag"
"io"
"log"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/pcap"
)
func main() {
var (
device = flag.String("device", "eth0", "device to sniff")
)
flag.Parse()
const (
snaplen = 1024 * 1024
promisc = true
timeout = pcap.BlockForever
)
handle, err := pcap.OpenLive(*device, snaplen, promisc, timeout)
if err != nil {
log.Fatal(err)
}
go func() {
time.Sleep(5 * time.Second)
handle.Close()
}()
var (
eth layers.Ethernet
ip4 layers.IPv4
ip6 layers.IPv6
tcp layers.TCP
udp layers.UDP
icmp4 layers.ICMPv4
icmp6 layers.ICMPv6
)
parser := gopacket.NewDecodingLayerParser(
layers.LayerTypeEthernet,
&eth, &ip4, &ip6, &tcp, &udp, &icmp4, &icmp6,
)
decoded := []gopacket.LayerType{}
done := make(chan struct{})
go func() {
defer close(done)
for {
data, ci, err := handle.ZeroCopyReadPacketData()
if err == io.EOF {
log.Print("read: EOF")
return
}
if err != nil {
log.Printf("read: %v", err)
continue
}
log.Println(ci.Timestamp.String())
err = parser.DecodeLayers(data, &decoded)
if err != nil {
log.Printf("Error in DecodeLayers: %v", err)
continue
}
for _, t := range decoded {
switch t {
case layers.LayerTypeEthernet:
log.Println(" Ethernet", eth.EthernetType, eth.SrcMAC, eth.DstMAC, eth.Length)
case layers.LayerTypeIPv6:
log.Println(" IP6", ip6.Version, ip6.SrcIP, ip6.DstIP, ip6.Length, ip6.TrafficClass)
case layers.LayerTypeIPv4:
log.Println(" IP4", ip4.Version, ip4.SrcIP, ip4.DstIP, ip4.Length, ip4.TTL, ip4.TOS)
case layers.LayerTypeTCP:
log.Println(" TCP", tcp.SrcPort, tcp.DstPort, tcp.Seq, tcp.Ack, tcp.Window)
case layers.LayerTypeUDP:
log.Println(" UDP", udp.SrcPort, udp.DstPort, udp.Length)
case layers.LayerTypeICMPv4:
log.Println(" ICMP4", icmp4.Id, icmp4.Seq)
case layers.LayerTypeICMPv6:
log.Println(" ICMP6")
}
}
log.Println()
}
}()
<-done
}

View File

@@ -1,3 +0,0 @@
main/main
tracer.tar
main/static.go

View File

@@ -1,31 +0,0 @@
BUILD_IN_CONTAINER=true
tracer.tar: main/main main/Dockerfile
docker build -t tomwilkie/tracer main/
docker save tomwilkie/tracer:latest >$@
main/main: main/*.go main/static.go ptrace/*.go
ifeq ($(BUILD_IN_CONTAINER),true)
main/main:
docker run -ti \
-v $(shell pwd)/../../:/go/src/github.com/weaveworks/scope \
-e GOARCH -e GOOS -e CIRCLECI -e CIRCLE_BUILD_NUM -e CIRCLE_NODE_TOTAL \
-e CIRCLE_NODE_INDEX -e COVERDIR -e SLOW \
weaveworks/scope-backend-build SCOPE_VERSION=$(SCOPE_VERSION) GO_BUILD_INSTALL_DEPS=$(GO_BUILD_INSTALL_DEPS) -C experimental/tracer $@
else
main/main:
#go get -tags netgo ./$(@D)
go build -ldflags "-extldflags \"-static\"" -tags netgo -o $@ ./$(@D)
endif
main/static.go: ui/*
esc -o main/static.go -prefix ui ui
clean:
go clean ./..
rm -f main/static.go tracer.tar main/main

View File

@@ -1,15 +0,0 @@
Tracer is an prototype for doing container-centric distributed request tracing without applications modifications.
It its very early. Ask Tom for a demo.
Run tracer:
- make
- ./tracer.sh start
TODO:
- <s>need to stich traces together</s>
- deal with persistent connections
- make it work for goroutines
- test with jvm based app
- find way to get local ip address for an incoming connection
- make the container/process trace start/stop more reliable

View File

@@ -1,6 +0,0 @@
FROM gliderlabs/alpine
MAINTAINER Weaveworks Inc <help@weave.works>
WORKDIR /home/weave
COPY ./main /home/weave/
EXPOSE 4050
ENTRYPOINT ["/home/weave/main"]

View File

@@ -1,150 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"log"
"mime"
"net/http"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/weaveworks/scope/probe/docker"
"github.com/weaveworks/scope/probe/process"
)
func respondWith(w http.ResponseWriter, code int, response interface{}) {
w.Header().Set("Content-Type", "application/json")
w.Header().Add("Cache-Control", "no-cache")
w.WriteHeader(code)
if err := json.NewEncoder(w).Encode(response); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
log.Printf("Error handling http request: %v", err.Error())
}
}
func (t *tracer) pidsForContainer(id string) ([]int, error) {
var container docker.Container
t.docker.WalkContainers(func(c docker.Container) {
if c.ID() == id {
container = c
}
})
if container == nil {
return []int{}, fmt.Errorf("Not Found")
}
pidTree, err := process.NewTree(process.NewWalker("/proc"))
if err != nil {
return []int{}, err
}
return pidTree.GetChildren(container.PID())
}
// Container is the type exported by the HTTP API.
type Container struct {
ID string
Name string
PIDs []int
}
func (t *tracer) http(port int) {
router := mux.NewRouter()
router.Methods("GET").Path("/container").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
pidTree, err := process.NewTree(process.NewWalker("/proc"))
if err != nil {
respondWith(w, http.StatusBadRequest, err.Error())
return
}
containers := []Container{}
t.docker.WalkContainers(func(container docker.Container) {
children, _ := pidTree.GetChildren(container.PID())
out := Container{
Name: strings.TrimPrefix(container.Container().Name, "/"),
ID: container.ID(),
PIDs: children,
}
containers = append(containers, out)
})
respondWith(w, http.StatusOK, containers)
})
router.Methods("POST").Path("/container/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
children, err := t.pidsForContainer(id)
if err != nil {
respondWith(w, http.StatusBadRequest, err.Error())
return
}
for _, pid := range children {
t.ptrace.TraceProcess(pid)
}
w.WriteHeader(204)
})
router.Methods("DELETE").Path("/container/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
id := mux.Vars(r)["id"]
children, err := t.pidsForContainer(id)
if err != nil {
respondWith(w, http.StatusBadRequest, err.Error())
return
}
for _, pid := range children {
t.ptrace.StopTracing(pid)
}
w.WriteHeader(204)
})
router.Methods("GET").Path("/pid").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
respondWith(w, http.StatusOK, t.ptrace.AttachedPIDs())
})
router.Methods("POST").Path("/pid/{pid:\\d+}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
pid, err := strconv.Atoi(mux.Vars(r)["pid"])
if err != nil {
respondWith(w, http.StatusBadRequest, err.Error())
return
}
t.ptrace.TraceProcess(pid)
w.WriteHeader(204)
})
router.Methods("DELETE").Path("/pid/{pid:\\d+}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
pid, err := strconv.Atoi(mux.Vars(r)["pid"])
if err != nil {
respondWith(w, http.StatusBadRequest, err.Error())
return
}
t.ptrace.StopTracing(pid)
w.WriteHeader(204)
})
router.Methods("GET").Path("/traces").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
respondWith(w, http.StatusOK, t.store.Traces())
})
mime.AddExtensionType(".svg", "image/svg+xml")
router.Methods("GET").PathPrefix("/").Handler(http.FileServer(FS(false))) // everything else is static
log.Printf("Launching HTTP API on port %d", port)
srv := &http.Server{
Addr: fmt.Sprintf(":%d", port),
Handler: router,
}
if err := srv.ListenAndServe(); err != nil {
log.Printf("Unable to create http listener: %v", err)
}
}

View File

@@ -1,70 +0,0 @@
package main
import (
"log"
_ "net/http/pprof"
"os"
"os/signal"
"runtime"
"syscall"
"time"
"github.com/weaveworks/scope/experimental/tracer/ptrace"
"github.com/weaveworks/scope/probe/docker"
)
const (
procRoot = "/proc"
pollInterval = 10 * time.Second
)
type tracer struct {
ptrace ptrace.PTracer
store *store
docker docker.Registry
}
func (t *tracer) Stop() {
log.Printf("Shutting down...")
t.ptrace.Stop()
t.docker.Stop()
log.Printf("Done.")
}
func main() {
dockerRegistry, err := docker.NewRegistry(pollInterval, nil, false, "")
if err != nil {
log.Fatalf("Could start docker watcher: %v", err)
}
store := newStore()
tracer := tracer{
store: store,
ptrace: ptrace.NewPTracer(store),
docker: dockerRegistry,
}
defer tracer.Stop()
go tracer.http(6060)
<-handleSignals()
}
func handleSignals() chan struct{} {
quit := make(chan struct{}, 10)
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGQUIT, syscall.SIGINT, syscall.SIGTERM)
buf := make([]byte, 1<<20)
for {
sig := <-sigs
switch sig {
case syscall.SIGINT, syscall.SIGTERM:
quit <- struct{}{}
case syscall.SIGQUIT:
stacklen := runtime.Stack(buf, true)
log.Printf("=== received SIGQUIT ===\n*** goroutine dump...\n%s\n*** end\n", buf[:stacklen])
}
}
}()
return quit
}

View File

@@ -1,180 +0,0 @@
package main
import (
"fmt"
"math/rand"
"sort"
"sync"
"github.com/msackman/skiplist"
"github.com/weaveworks/scope/experimental/tracer/ptrace"
)
const epsilon = int64(5) * 1000 // milliseconds
// Traces are indexed by from addr, from port, and start time.
type key struct {
fromAddr uint32
fromPort uint16
startTime int64
}
func (k key) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%x.%x.%x\"", k.fromAddr, k.fromPort, k.startTime)), nil
}
type trace struct {
PID int
Key key
ServerDetails *ptrace.ConnectionDetails
ClientDetails *ptrace.ConnectionDetails
Children []*trace
Level int
}
type byKey []*trace
func (a byKey) Len() int { return len(a) }
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool { return a[i].Key.startTime < a[j].Key.startTime }
type store struct {
sync.RWMutex
traces *skiplist.SkipList
}
func newKey(fd *ptrace.Fd) key {
var fromAddr uint32
for _, b := range fd.FromAddr.To4() {
fromAddr <<= 8
fromAddr |= uint32(b)
}
return key{fromAddr, fd.FromPort, fd.Start}
}
func (k key) LessThan(other skiplist.Comparable) bool {
r := other.(key)
if k.fromAddr != r.fromAddr {
return k.fromAddr > r.fromAddr
}
if k.fromPort != r.fromPort {
return k.fromPort < r.fromPort
}
if k.Equal(other) {
return false
}
return k.startTime < r.startTime
}
func (k key) Equal(other skiplist.Comparable) bool {
r := other.(key)
if k.fromAddr != r.fromAddr || k.fromPort != r.fromPort {
return false
}
diff := k.startTime - r.startTime
return -epsilon < diff && diff < epsilon
}
func newStore() *store {
return &store{traces: skiplist.New(rand.New(rand.NewSource(0)))}
}
func (t *trace) addChild(child *trace) {
// find the child we're supposed to be replacing
for i, candidate := range t.Children {
if !candidate.Key.Equal(skiplist.Comparable(child.Key)) {
continue
}
// Fix up some fields
child.ClientDetails = candidate.ClientDetails
IncrementLevel(child, t.Level+1)
// Overwrite old record
t.Children[i] = child
return
}
}
func (s *store) RecordConnection(pid int, connection *ptrace.Fd) {
s.Lock()
defer s.Unlock()
newTrace := &trace{
PID: pid,
Key: newKey(connection),
ServerDetails: &connection.ConnectionDetails,
}
for _, child := range connection.Children {
newTrace.Children = append(newTrace.Children, &trace{
Level: 1,
Key: newKey(child),
ClientDetails: &child.ConnectionDetails,
})
}
// First, see if this new conneciton is a child of an existing connection.
// This indicates we have a parent connection to attach to.
// If not, insert this connection.
if parentNode := s.traces.Get(newTrace.Key); parentNode != nil {
parentTrace := parentNode.Value.(*trace)
parentTrace.addChild(newTrace)
parentNode.Remove()
} else {
s.traces.Insert(newTrace.Key, newTrace)
}
// Next, see if we already know about the child connections
// If not, insert each of our children.
for _, child := range newTrace.Children {
if childNode := s.traces.Get(child.Key); childNode != nil {
childTrace := childNode.Value.(*trace)
newTrace.addChild(childTrace)
childNode.Remove()
} else {
s.traces.Insert(child.Key, newTrace)
}
}
}
// IncrementLevel ...
func IncrementLevel(trace *trace, increment int) {
trace.Level += increment
for _, child := range trace.Children {
IncrementLevel(child, increment)
}
}
func (s *store) Traces() []*trace {
s.RLock()
defer s.RUnlock()
traces := []*trace{}
var cur = s.traces.First()
for cur != nil {
key := cur.Key.(key)
trace := cur.Value.(*trace)
if trace.Key == key {
traces = append(traces, trace)
}
cur = cur.Next()
}
sort.Sort(byKey(traces))
// only return last 15 traces
start := 0
if len(traces) > 15 {
start = len(traces) - 15
}
traces = traces[start:]
return traces
}

View File

@@ -1,198 +0,0 @@
package ptrace
import (
"bufio"
"encoding/hex"
"fmt"
"net"
"os"
"regexp"
"strconv"
"time"
)
const (
listening = iota
incoming
outgoing
)
const (
socketPattern = `^socket:\[(\d+)\]$`
tcpPattern = `^\s*(?P<fd>\d+): (?P<localaddr>[A-F0-9]{8}):(?P<localport>[A-F0-9]{4}) ` +
`(?P<remoteaddr>[A-F0-9]{8}):(?P<remoteport>[A-F0-9]{4}) (?:[A-F0-9]{2}) (?:[A-F0-9]{8}):(?:[A-F0-9]{8}) ` +
`(?:[A-F0-9]{2}):(?:[A-F0-9]{8}) (?:[A-F0-9]{8}) \s+(?:\d+) \s+(?:\d+) (?P<inode>\d+)`
)
var (
socketRegex = regexp.MustCompile(socketPattern)
tcpRegexp = regexp.MustCompile(tcpPattern)
)
// ConnectionDetails ...
type ConnectionDetails struct {
direction int
Start int64
Stop int64
sent int64
received int64
FromAddr net.IP
FromPort uint16
ToAddr net.IP
ToPort uint16
}
// Fd represents a connect and subsequent connections caused by it.
type Fd struct {
fd int
closed bool
ConnectionDetails
// Fds are connections, and can have a causal-link to other Fds
Children []*Fd
}
func getLocalAddr(pid, fd int) (addr net.IP, port uint16, err error) {
var (
socket string
match []string
inode int
tcpFile *os.File
scanner *bufio.Scanner
candidate int
port64 int64
)
socket, err = os.Readlink(fmt.Sprintf("/proc/%d/fd/%d", pid, fd))
if err != nil {
return
}
match = socketRegex.FindStringSubmatch(socket)
if match == nil {
err = fmt.Errorf("Fd %d not a socket", fd)
return
}
inode, err = strconv.Atoi(match[1])
if err != nil {
return
}
tcpFile, err = os.Open(fmt.Sprintf("/proc/%d/net/tcp", pid))
if err != nil {
return
}
defer tcpFile.Close()
scanner = bufio.NewScanner(tcpFile)
for scanner.Scan() {
match = tcpRegexp.FindStringSubmatch(scanner.Text())
if match == nil {
continue
}
candidate, err = strconv.Atoi(match[6])
if err != nil {
return
}
if candidate != inode {
continue
}
addr = make([]byte, 4)
if _, err = hex.Decode(addr, []byte(match[2])); err != nil {
return
}
addr[0], addr[1], addr[2], addr[3] = addr[3], addr[2], addr[1], addr[0]
// use a 32 bit int for target, at the result is a uint16
port64, err = strconv.ParseInt(match[3], 16, 32)
if err != nil {
return
}
port = uint16(port64)
return
}
if err = scanner.Err(); err != nil {
return
}
err = fmt.Errorf("Fd %d not found for proc %d", fd, pid)
return
}
// in milliseconds
func now() int64 {
return time.Now().UnixNano() / 1000000
}
// We want to get the listening address from /proc
func newListeningFd(pid, fd int) (*Fd, error) {
localAddr, localPort, err := getLocalAddr(pid, fd)
if err != nil {
return nil, err
}
return &Fd{
fd: fd,
ConnectionDetails: ConnectionDetails{
direction: listening,
Start: now(),
ToAddr: localAddr,
ToPort: uint16(localPort),
},
}, nil
}
// We intercepted a connect syscall
func newConnectionFd(pid, fd int, remoteAddr net.IP, remotePort uint16) (*Fd, error) {
localAddr, localPort, err := getLocalAddr(pid, fd)
if err != nil {
return nil, err
}
return &Fd{
fd: fd,
ConnectionDetails: ConnectionDetails{
direction: outgoing,
Start: now(),
FromAddr: localAddr,
FromPort: uint16(localPort),
ToAddr: remoteAddr,
ToPort: remotePort,
},
}, nil
}
// We got a new connection on a listening socket
func (fd *Fd) newConnection(addr net.IP, port uint16, newFd int) (*Fd, error) {
if fd.direction != listening {
return nil, fmt.Errorf("New connection on non-listening fd!")
}
return &Fd{
fd: newFd,
ConnectionDetails: ConnectionDetails{
direction: incoming,
Start: now(),
ToAddr: fd.ToAddr,
ToPort: fd.ToPort,
FromAddr: addr,
FromPort: port,
},
}, nil
}
func (fd *Fd) close() {
fd.closed = true
fd.Stop = now()
}

View File

@@ -1,99 +0,0 @@
package ptrace
import (
"fmt"
"io/ioutil"
"log"
"strconv"
"sync"
)
type process struct {
sync.Mutex
pid int
detaching bool
detached chan struct{}
tracer *PTracer
threads map[int]*thread
fds map[int]*Fd
}
func newProcess(pid int, tracer *PTracer) *process {
return &process{
pid: pid,
tracer: tracer,
threads: make(map[int]*thread),
fds: make(map[int]*Fd),
detached: make(chan struct{}),
}
}
func (p *process) trace() {
go p.loop()
}
// This doesn't actually guarantees we follow all the threads. Oops.
func (p *process) loop() {
var (
attached int
)
log.Printf("Tracing process %d", p.pid)
for {
ps, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/task", p.pid))
if err != nil {
log.Printf("ReadDir failed, pid=%d, err=%v", p.pid, err)
return
}
attached = 0
for _, file := range ps {
pid, err := strconv.Atoi(file.Name())
if err != nil {
log.Printf("'%s' is not a pid: %v", file.Name(), err)
attached++
continue
}
p.Lock()
t, ok := p.threads[pid]
if !ok {
t = p.tracer.traceThread(pid, p)
p.threads[pid] = t
}
p.Unlock()
if !t.attached {
continue
}
attached++
}
// When we successfully attach to all threads
// we can be sure to catch new clones, so we
// can quit.
if attached == len(ps) {
break
}
}
log.Printf("Successfully attached to %d threads", attached)
}
func (p *process) newThread(thread *thread) {
p.Lock()
defer p.Unlock()
p.threads[thread.tid] = thread
}
func (p *process) newFd(fd *Fd) error {
_, ok := p.fds[fd.fd]
if ok {
return fmt.Errorf("New fd %d, alread exists!", fd.fd)
}
p.fds[fd.fd] = fd
return nil
}

View File

@@ -1,337 +0,0 @@
package ptrace
import (
"bufio"
"fmt"
"log"
"os"
"runtime"
"strconv"
"strings"
"syscall"
)
const (
ptraceOptions = syscall.PTRACE_O_TRACESYSGOOD | syscall.PTRACE_O_TRACECLONE
ptraceTracesysgoodBit = 0x80
)
// Store ...
type Store interface {
RecordConnection(int, *Fd)
}
// PTracer ptrace processed and threads
type PTracer struct {
// All ptrace calls must come from the
// same thread. So we wait on a separate
// thread.
ops chan func()
stopped chan stopped
quit chan struct{}
childAttached chan struct{} //used to signal the wait loop
threads map[int]*thread
processes map[int]*process
store Store
}
type stopped struct {
pid int
status syscall.WaitStatus
}
// NewPTracer creates a new ptracer.
func NewPTracer(store Store) PTracer {
t := PTracer{
ops: make(chan func()),
stopped: make(chan stopped),
quit: make(chan struct{}),
childAttached: make(chan struct{}),
threads: make(map[int]*thread),
processes: make(map[int]*process),
store: store,
}
go t.waitLoop()
go t.loop()
return t
}
// Stop stop stop
func (t *PTracer) Stop() {
out := make(chan []int)
t.ops <- func() {
pids := []int{}
for pid := range t.processes {
pids = append(pids, pid)
}
out <- pids
}
for _, pid := range <-out {
t.StopTracing(pid)
}
t.quit <- struct{}{}
}
// TraceProcess starts tracing the given pid
func (t *PTracer) TraceProcess(pid int) {
result := make(chan struct{})
t.ops <- func() {
process := newProcess(pid, t)
t.processes[pid] = process
process.trace()
result <- struct{}{}
}
<-result
return
}
// StopTracing stops tracing all threads for the given pid
func (t *PTracer) StopTracing(pid int) error {
log.Printf("Detaching from %d", pid)
errors := make(chan error)
processes := make(chan *process)
t.ops <- func() {
// send sigstop to all threads
process, ok := t.processes[pid]
if !ok {
errors <- fmt.Errorf("PID %d not found", pid)
return
}
// This flag tells the thread to detach when it next stops
process.detaching = true
// Now send sigstop to all threads.
for _, thread := range process.threads {
log.Printf("sending SIGSTOP to %d", thread.tid)
if err := syscall.Tgkill(pid, thread.tid, syscall.SIGSTOP); err != nil {
errors <- err
return
}
}
processes <- process
}
select {
case err := <-errors:
return err
case process := <-processes:
<-process.detached
return nil
}
}
// AttachedPIDs list the currently attached processes.
func (t *PTracer) AttachedPIDs() []int {
result := make(chan []int)
t.ops <- func() {
var pids []int
for pid := range t.processes {
pids = append(pids, pid)
}
result <- pids
}
return <-result
}
func (t *PTracer) traceThread(pid int, process *process) *thread {
result := make(chan *thread)
t.ops <- func() {
thread := newThread(pid, process, t)
t.threads[pid] = thread
err := syscall.PtraceAttach(pid)
if err != nil {
log.Printf("Attach %d failed: %v", pid, err)
return
}
result <- thread
select {
case t.childAttached <- struct{}{}:
default:
}
}
return <-result
}
func (t *PTracer) waitLoop() {
var (
status syscall.WaitStatus
pid int
err error
)
for {
//log.Printf("Waiting...")
pid, err = syscall.Wait4(-1, &status, syscall.WALL, nil)
if err != nil && err.(syscall.Errno) == syscall.ECHILD {
//log.Printf( "No children to wait4")
<-t.childAttached
continue
}
if err != nil {
log.Printf(" Wait failed: %v %d", err, err.(syscall.Errno))
return
}
//log.Printf(" PID %d stopped with signal %#x", pid, status)
t.stopped <- stopped{pid, status}
}
}
func (t *PTracer) loop() {
runtime.LockOSThread()
for {
select {
case op := <-t.ops:
op()
case stopped := <-t.stopped:
t.handleStopped(stopped.pid, stopped.status)
case <-t.quit:
return
}
}
}
func (t *PTracer) handleStopped(pid int, status syscall.WaitStatus) {
signal := syscall.Signal(0)
target, err := t.thread(pid)
if err != nil {
log.Printf("thread failed: %v", err)
return
}
if !target.attached {
target.attached = true
err = syscall.PtraceSetOptions(pid, ptraceOptions)
if err != nil {
log.Printf("SetOptions failed, pid=%d, err=%v", pid, err)
return
}
} else if status.Stopped() && status.StopSignal() == syscall.SIGTRAP|ptraceTracesysgoodBit {
// pid entered Syscall-enter-stop or syscall-exit-stop
target.syscallStopped()
} else if status.Stopped() && status.StopSignal() == syscall.SIGTRAP {
// pid entered PTRACE_EVENT stop
switch status.TrapCause() {
case syscall.PTRACE_EVENT_CLONE:
err := target.handleClone(pid)
if err != nil {
log.Printf("clone failed: %v", err)
return
}
default:
log.Printf("Unknown PTRACE_EVENT %d for pid %d", status.TrapCause(), pid)
}
} else if status.Exited() || status.Signaled() {
// "tracer can safely assume pid will exit"
t.threadExited(target)
return
} else if status.Stopped() {
// tracee received a non-trace related signal
signal = status.StopSignal()
if signal == syscall.SIGSTOP && target.process.detaching {
t.detachThread(target)
return
}
} else {
// unknown stop - shouldn't happen!
log.Printf("Pid %d random stop with status %x", pid, status)
}
// Restart stopped caller in syscall trap mode.
// log.Printf("Restarting pid %d with signal %d", pid, int(signal))
err = syscall.PtraceSyscall(pid, int(signal))
if err != nil {
log.Printf("PtraceSyscall failed, pid=%d, err=%v", pid, err)
}
}
func (t *PTracer) detachThread(thread *thread) {
syscall.PtraceDetach(thread.tid)
process := thread.process
delete(process.threads, thread.tid)
delete(t.threads, thread.tid)
if len(process.threads) == 0 {
delete(t.processes, process.pid)
close(process.detached)
log.Printf("Process %d detached", process.pid)
}
}
func pidForTid(tid int) (pid int, err error) {
var (
status *os.File
scanner *bufio.Scanner
splits []string
)
status, err = os.Open(fmt.Sprintf("/proc/%d/status", tid))
if err != nil {
return
}
defer status.Close()
scanner = bufio.NewScanner(status)
for scanner.Scan() {
splits = strings.Split(scanner.Text(), ":")
if splits[0] != "Tgid" {
continue
}
pid, err = strconv.Atoi(strings.TrimSpace(splits[1]))
return
}
if err = scanner.Err(); err != nil {
return
}
err = fmt.Errorf("Pid not found for proc %d", tid)
return
}
func (t *PTracer) thread(tid int) (*thread, error) {
// unfortunately we can't propage fd affinitiy as we
// can reliably determin who clone'd new threads, as
// the pids returned by the ptrace calls are in the process'
// namespace, not ours.
thread, ok := t.threads[tid]
if !ok {
pid, err := pidForTid(tid)
if err != nil {
return nil, err
}
proc, ok := t.processes[pid]
if !ok {
return nil, fmt.Errorf("Got new thread %d for unknown process", tid)
}
thread = newThread(tid, proc, t)
t.threads[tid] = thread
log.Printf("New thread reported, tid=%d, pid=%d", tid, pid)
}
return thread, nil
}
func (t *PTracer) threadExited(thread *thread) {
thread.handleExit()
delete(t.threads, thread.tid)
if thread.process != nil {
delete(thread.process.threads, thread.tid)
}
}

View File

@@ -1,291 +0,0 @@
package ptrace
import (
"fmt"
"log"
"net"
"syscall"
"unsafe"
)
// Syscall numbers
const (
READ = 0
WRITE = 1
OPEN = 2
CLOSE = 3
STAT = 4
MMAP = 9
MPROTECT = 10
MUNMAP = 11
SELECT = 23
MADVISE = 28
SOCKET = 41
CONNECT = 42
ACCEPT = 43
SENDTO = 44
RECVFROM = 45
SHUTDOWN = 48
CLONE = 56
GETTIMEOFDAY = 96
GETID = 186
FUTEX = 202
SETROBUSTLIST = 273
ACCEPT4 = 288
)
// States for a given thread
const (
NORMAL = iota
INSYSCALL
)
type thread struct {
tid int
attached bool
process *process // might be nil!
tracer *PTracer
state int
callRegs syscall.PtraceRegs
resultRegs syscall.PtraceRegs
currentIncoming map[int]*Fd
currentOutgoing map[int]*Fd
closedOutgoing []*Fd
}
func newThread(pid int, process *process, tracer *PTracer) *thread {
t := &thread{
tid: pid,
process: process,
tracer: tracer,
currentIncoming: map[int]*Fd{},
currentOutgoing: map[int]*Fd{},
}
return t
}
// trace thread calls this
func (t *thread) syscallStopped() {
var err error
if t.state == NORMAL {
if err = syscall.PtraceGetRegs(t.tid, &t.callRegs); err != nil {
t.logf("GetRegs failed, pid=%d, err=%v", t.tid, err)
}
t.state = INSYSCALL
return
}
t.state = NORMAL
if err = syscall.PtraceGetRegs(t.tid, &t.resultRegs); err != nil {
t.logf("GetRegs failed, pid=%d, err=%v", t.tid, err)
return
}
if t.process == nil {
t.logf("Got syscall, but don't know parent process!")
return
}
switch t.callRegs.Orig_rax {
case ACCEPT, ACCEPT4:
t.handleAccept(&t.callRegs, &t.resultRegs)
case CLOSE:
t.handleClose(&t.callRegs, &t.resultRegs)
case CONNECT:
t.handleConnect(&t.callRegs, &t.resultRegs)
case READ, WRITE, RECVFROM, SENDTO:
t.handleIO(&t.callRegs, &t.resultRegs)
// we can ignore these syscalls
case SETROBUSTLIST, GETID, MMAP, MPROTECT, MADVISE, SOCKET, CLONE, STAT, SELECT:
return
case OPEN, FUTEX, SHUTDOWN, GETTIMEOFDAY, MUNMAP:
return
default:
t.logf("syscall(%d)", t.callRegs.Orig_rax)
}
}
func ntohl(b uint16) uint16 {
return (b << 8) | ((b & 0xff00) >> 8)
}
func (t *thread) getSocketAddress(ptr uintptr) (addr net.IP, port uint16, err error) {
var (
buf = make([]byte, syscall.SizeofSockaddrAny)
read int
)
if ptr == 0 {
err = fmt.Errorf("Null ptr")
return
}
read, err = syscall.PtracePeekData(t.tid, ptr, buf)
if read != syscall.SizeofSockaddrAny || err != nil {
return
}
var sockaddr4 = (*syscall.RawSockaddrInet4)(unsafe.Pointer(&buf[0]))
if sockaddr4.Family != syscall.AF_INET {
return
}
addr = net.IP(sockaddr4.Addr[0:])
port = ntohl(sockaddr4.Port)
return
}
func (t *thread) handleAccept(call, result *syscall.PtraceRegs) {
var (
err error
ok bool
listeningFdNum int
connectionFdNum int
addrPtr uintptr
addr net.IP
port uint16
listeningFd *Fd
connection *Fd
)
listeningFdNum = int(result.Rdi)
connectionFdNum = int(result.Rax)
addrPtr = uintptr(result.Rsi)
addr, port, err = t.getSocketAddress(addrPtr)
if err != nil {
t.logf("failed to read sockaddr: %v", err)
return
}
listeningFd, ok = t.process.fds[listeningFdNum]
if !ok {
listeningFd, err = newListeningFd(t.process.pid, listeningFdNum)
if err != nil {
t.logf("Failed to read listening port: %v", err)
return
}
t.process.fds[listeningFdNum] = listeningFd
}
connection, err = listeningFd.newConnection(addr, port, connectionFdNum)
if err != nil {
t.logf("Failed to create connection fd: %v", err)
return
}
t.process.newFd(connection)
t.logf("Accepted connection from %s:%d -> %s:%d on fd %d, new fd %d",
addr, port, connection.ToAddr, connection.ToPort, listeningFdNum, connectionFdNum)
}
func (t *thread) handleConnect(call, result *syscall.PtraceRegs) {
fd := int(result.Rdi)
ptr := result.Rsi
addr, port, err := t.getSocketAddress(uintptr(ptr))
if err != nil {
t.logf("failed to read sockaddr: %v", err)
return
}
connection, err := newConnectionFd(t.process.pid, fd, addr, port)
if err != nil {
t.logf("Failed to create connection fd: %v", err)
return
}
t.process.newFd(connection)
t.logf("Made connection from %s:%d -> %s:%d on fd %d",
connection.FromAddr, connection.FromPort,
connection.ToAddr, connection.ToPort, fd)
}
func (t *thread) handleClose(call, result *syscall.PtraceRegs) {
fdNum := int(call.Rdi)
fd, ok := t.process.fds[fdNum]
if !ok {
t.logf("Got close unknown fd %d", fdNum)
return
}
//t.logf("Closing fd %d", fdNum)
fd.close()
// if this connection was incoming, add it to 'the registry'
if fd.direction == incoming {
// collect all the outgoing connections this thread has made
// and treat them as caused by this incoming connections
for _, outgoing := range t.currentOutgoing {
//t.logf("Fd %d caused %d", fdNum, outgoing.fd)
fd.Children = append(fd.Children, outgoing)
}
t.currentOutgoing = map[int]*Fd{}
for _, outgoing := range t.closedOutgoing {
//t.logf("Fd %d caused %d", fdNum, outgoing.fd)
fd.Children = append(fd.Children, outgoing)
}
t.closedOutgoing = []*Fd{}
t.tracer.store.RecordConnection(t.process.pid, fd)
}
// now make sure we've remove it from everywhere
delete(t.process.fds, fdNum)
for _, thread := range t.process.threads {
delete(thread.currentIncoming, fdNum)
if _, ok := thread.currentOutgoing[fdNum]; ok {
thread.closedOutgoing = append(thread.closedOutgoing, fd)
}
delete(thread.currentOutgoing, fdNum)
}
}
func (t *thread) handleIO(call, result *syscall.PtraceRegs) {
fdNum := int(call.Rdi)
fd, ok := t.process.fds[fdNum]
if !ok {
//t.logf("IO on unknown fd %d", fdNum)
return
}
if fd.direction == incoming {
//t.logf("IO on incoming connection %d; setting affinity", fdNum)
t.currentIncoming[fdNum] = fd
} else {
//t.logf("IO on outgoing connection %d; setting affinity", fdNum)
t.currentOutgoing[fdNum] = fd
}
}
func (t *thread) handleClone(pid int) error {
// We can't use the pid in the process, as it may be in a namespace
newPid, err := syscall.PtraceGetEventMsg(pid)
if err != nil {
log.Printf("PtraceGetEventMsg failed: %v", err)
return err
}
t.logf("New thread clone'd, pid=%d", newPid)
return nil
}
func (t *thread) handleExit() {
t.logf("Exiting")
}
func (t *thread) logf(fmt string, args ...interface{}) {
log.Printf("[thread %d] "+fmt, append([]interface{}{t.tid}, args...)...)
}

View File

@@ -1,49 +0,0 @@
#!/bin/bash
set -eu
usage() {
echo "$0"
}
PORT=6060
CONTAINER_NAME=weavetracer
IMAGE_NAME=tomwilkie/tracer
[ $# -gt 0 ] || usage
COMMAND=$1
shift 1
case "$COMMAND" in
launch)
docker rm -f $CONTAINER_NAME || true
docker run --privileged --net=host --pid=host -ti -v /var/run/docker.sock:/var/run/docker.sock \
--name $CONTAINER_NAME $IMAGE_NAME
;;
stop)
docker stop $CONTAINER_NAME || true
docker rm $CONTAINER_NAME >/dev/null || true
;;
attach)
PID=$1
if [ -z "${PID##*[!0-9]*}" ]; then
PID=$(pgrep $PID)
fi
curl -X POST http://localhost:$PORT/pid/$PID
;;
detach)
PID=$1
if [ -z "${PID##*[!0-9]*}" ]; then
PID=$(pgrep $PID)
fi
curl -X DELETE http://localhost:$PORT/pid/$PID
;;
traces)
curl http://localhost:$PORT/trace
;;
esac

View File

@@ -1,4 +0,0 @@
container.json
traces.json
node_modules

View File

@@ -1,10 +0,0 @@
Develop UI
##########
* npm install
* npm start
`server.js` serves a fake backend. You can add `container.json` and
`traces.json` to this directory to serve them as /traces and /container
respectively.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 741 B

View File

@@ -1,393 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>Weave Tracer</title>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://cask.scotch.io/bootstrap-4.0-flex.css">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.4.0/css/font-awesome.min.css">
<link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet' type='text/css'>
<!-- Latest compiled and minified JavaScript -->
<script src="http://code.jquery.com/jquery-2.1.4.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/handlebars.js/3.0.3/handlebars.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.10.6/moment.min.js"></script>
<script src="sprintf.min.js"></script>
<meta name="viewport" content="width=device-width, initial-scale=1">
<script>
$(function () {
var currentContainer = null;
var expandedTrace = null;
var containersByID = {};
var containersByPID = {};
var containers = [];
var traces = [];
Handlebars.registerHelper('isSelected', function(input) {
if (currentContainer && input === currentContainer.ID) {
return 'class=selected';
}
});
Handlebars.registerHelper('isExpanded', function(options) {
if (expandedTrace && this.Key === expandedTrace) {
return options.fn(this)
}
});
function containerName(trace) {
var container = containersByPID[trace.PID]
if (!container) {
return sprintf("%s:%d", trace.ToAddr, trace.ToPort)
}
return sprintf("%s (%d)", container.Name, trace.PID)
}
Handlebars.registerHelper('containerName', containerName);
Handlebars.registerHelper('spaces', function(input) {
return new Array(input + 1).join("> ");
});
Handlebars.registerHelper('ts', function(input) {
var ts = moment(input).format("LTS")
return new Handlebars.SafeString(ts);
});
Handlebars.registerHelper('duration', function(input) {
var durationText = formatDuration(input);
return new Handlebars.SafeString(durationText);
});
function numChildren(input) {
if (input.Children === null) {
return 0
}
var count = input.Children.length
$.each(input.Children, function(i, child) {
count += numChildren(child)
})
return count
}
Handlebars.registerHelper('count', function(input) {
return sprintf("%d", numChildren(input));
});
Handlebars.registerHelper('childTitle', function() {
var duration = formatDuration(this);
return '[' + duration + '] ' + containerName(this);
});
Handlebars.registerHelper('childWrapperStyle', function() {
var parentSpan = this.ParentStop - this.ParentStart; // = 100%
var span = (this.Stop - this.Start) / parentSpan * 100;
var offset = (this.Start - this.ParentStart) / parentSpan * 100;
return 'width:' + span + '%; left:' + offset + '%;';
});
Handlebars.registerHelper('childStyle', function() {
var color = shadeColor(weaveRed, this.Level / 5);
return 'width: 100%; background-color:' + color;
});
Handlebars.registerPartial('traces', $("#traces").html());
Handlebars.registerPartial('children', $("#children").html());
Handlebars.registerPartial('childrenDetails', $("#childrenDetails").html());
function render() {
var template = $('script#process-template').text();
template = Handlebars.compile(template);
var rendered = template({
containers: containers,
container: currentContainer,
traces: traces
});
$('body').html(rendered);
}
function updateContainers() {
$.get("/container").done(function (data) {
data.sort(function (a, b) {
if (a.Name > b.Name) {
return 1;
}
if (a.Name < b.Name) {
return -1;
}
// a must be equal to b
return 0;
});
containers = data;
containersByID = {};
containersByPID = {};
$.each(data, function(i, container) {
containersByID[container.ID] = container
$.each(container.PIDs, function(i, pid) {
containersByPID[pid] = container
});
});
// auto-select first container
if (containers.length && currentContainer === null) {
currentContainer = containersByID[containers[0].ID];
}
render();
window.setTimeout(updateContainers, 5 * 1000);
});
}
updateContainers()
var weaveRed = '#FF4B19';
function shadeColor(color, percent) {
var f=parseInt(color.slice(1),16),t=percent<0?0:255,p=percent<0?percent*-1:percent,R=f>>16,G=f>>8&0x00FF,B=f&0x0000FF;
return "#"+(0x1000000+(Math.round((t-R)*p)+R)*0x10000+(Math.round((t-G)*p)+G)*0x100+(Math.round((t-B)*p)+B)).toString(16).slice(1);
}
function formatDuration(input) {
var ms = input.Stop - input.Start
if (ms < 60000) {
return sprintf("%0.2fs", ms / 1000);
}
var ds = moment.duration(ms).humanize();
return ds;
}
function addParentTimeToTrace(trace) {
var details = trace.ServerDetails || trace.ClientDetails;
trace.Children && $.each(trace.Children, function(i, childTrace) {
childTrace.ParentStart = details.Start;
childTrace.ParentStop = details.Stop;
addParentTimeToTrace(childTrace);
});
}
function fetchTraces() {
$.get("/traces").done(function (data) {
traces = data;
traces && $.each(traces, function(i, trace) {
addParentTimeToTrace(trace);
});
render();
window.setTimeout(fetchTraces, 2 * 1000);
});
}
fetchTraces();
$("body").on("click", "ul.containers li", function() {
var container = containersByID[$(this).attr("id")]
currentContainer = container
render()
})
$("body").on("click", "div.mainview button.start", function() {
var id = $(this).parent().data("containerId")
var container = containersByID[id]
$.post(sprintf("/container/%s", container.ID))
})
$("body").on("click", "div.mainview button.stop", function() {
var id = $(this).parent().data("containerId")
var container = containersByID[id]
$.ajax({
url: sprintf("/container/%s", container.ID),
type: 'DELETE',
});
})
$("body").on("click", "table tr.trace", function() {
var key = $(this).data("key");
if (expandedTrace === key) {
expandedTrace = null;
} else {
expandedTrace = key;
}
render()
})
})
</script>
<style>
body {
height: 100%;
width: 100%;
color: #46466a;
font-family: "Roboto", sans-serif;
}
.logo {
margin-bottom: 30px;
}
.container-fluid {
background: linear-gradient(30deg, #e2e2ec 0%, #fafafc 100%);
padding: 30px 45px;
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
overflow-y: scroll;
}
ul.containers li {
cursor: pointer;
list-style: none;
font-size: 85%;
margin-bottom: 0.5rem;
opacity: 0.7;
}
ul.containers li.selected {
opacity: 1;
}
.heading {
margin: 10px 40px;
opacity: 0.4;
text-transform: uppercase;
}
.btn-default {
text-transform: uppercase;
margin-top: 3px;
opacity: 0.8;
}
h2 {
font-weight: normal;
margin-left: 1.5rem;
margin-right: 2rem;
margin-bottom: 1rem;
}
table {
width: 100%;
}
th {
text-transform: uppercase;
opacity: 0.4;
font-weight: normal;
}
tr.trace {
cursor: pointer;
}
.mainview {
margin-top: 2rem;
}
.table td {
font-size: 80%;
font-family: monospace;
padding-left: 0.5rem;
}
.table-striped tbody tr:nth-of-type(odd) {
background-color: #e2e2ec;
}
.childBox {
padding: 0.25rem;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.childBoxWrapper {
position: absolute;
}
.childRow {
position: relative;
height: 100px;
}
</style>
<script type="text/x-handlebars-template" id="traces">
{{#.}}
<tr class="trace" data-key="{{Key}}">
{{#if ClientDetails}}
{{#with ClientDetails}}
<td>{{spaces ../Level}}{{ts Start}}</td>
<td>{{containerName . PID=../PID}}</td>
<td>{{duration .}}</td>
<td>{{FromAddr}}:{{FromPort}}</td>
<td>{{ToAddr}}:{{ToPort}}</td>
<td>{{count ../.}}</td>
{{/with}}
{{else}}
{{#with ServerDetails}}
<td>{{spaces ../Level}}{{ts Start}}</td>
<td>{{containerName ../.}}</td>
<td>{{duration .}}</td>
<td>{{FromAddr}}:{{FromPort}}</td>
<td>{{ToAddr}}:{{ToPort}}</td>
<td>{{count ../.}}</td>
{{/with}}
{{/if}}
</tr>
{{#isExpanded}}
<tr>
<td colspan="6" style="padding: 0 0.5rem;">
<div style="width: 100%; background-color: #FF4B19; height: 4px;"></div>
{{>children Children}}
</td>
</tr>
{{/isExpanded}}
{{/.}}
</script>
<script type="text/x-handlebars-template" id="childrenDetails">
<div style="{{childWrapperStyle}}" class="childBoxWrapper">
<div title="{{childTitle}}" style="{{childStyle}}" class="childBox">
{{childTitle}}
</div>
<div class="childRow">
{{>children Children}}
</div>
</div>
</script>
<script type="text/x-handlebars-template" id="children">
<div class="childRow">
{{#.}}
{{#if ClientDetails}}
{{>childrenDetails ClientDetails Level=../Level PID=../PID Children=../Children ParentStart=../ParentStart ParentStop=../ParentStop}}
{{else}}
{{>childrenDetails ServerDetails Level=../Level PID=../PID Children=../Children ParentStart=../ParentStart ParentStop=../ParentStop}}
{{/if}}
{{/.}}
</div>
</script>
<script type="text/x-handlebars-template" id="process-template">
<div class="container-fluid">
<div class="row">
<div class="col-md-4">
<div class="logo"><img src="logo.svg" width="300"/></div>
<div class="heading">Containers</div>
<ul class="containers">
{{#containers}}
<li {{isSelected ID}} id={{ID}}>{{Name}}</li>
{{/containers}}
</ul>
</div>
<div class="col-md-8 mainview">
{{#if container}}
<h2 class="pull-left">{{container.Name}}</h2>
<div class="btn-group btn-group-sm" role="group" data-container-id="{{container.ID}}">
<button type="button" class="btn btn-default start">
<span class="fa fa-play" aria-hidden="true"></span> Start</button>
<button type="button" class="btn btn-default stop">
<span class="fa fa-stop" aria-hidden="true"></span> Stop</button>
</div>
<table class="table table-sm">
<thead><tr>
<th width="15%">Start time</th>
<th width="25%">Container</th>
<th width="15%">Duration</th>
<th width="15%">From</th>
<th width="15%">To</th>
<th width="15%">Sub-traces</th>
</tr></thead>
<tbody>
{{>traces traces}}
</tbody>
</table>
{{/if}}
</div>
</div>
</div>
</script>
</head>
<body>
</body>
</html>

View File

@@ -1,54 +0,0 @@
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="960px" height="173px" viewBox="0 0 960 173" enable-background="new 0 0 960 173" xml:space="preserve">
<path fill="#32324B" d="M51.937,95.165l75.419-67.366c-5.989-4.707-12.71-8.52-19.981-11.211l-55.438,49.52V95.165z"/>
<path fill="#32324B" d="M30.265,85.465l-20.431,18.25c1.86,7.57,4.88,14.683,8.87,21.135l11.561-10.326V85.465z"/>
<path fill="#00D2FF" d="M92.276,30.074V12.768C88.815,12.27,85.282,12,81.685,12c-3.766,0-7.465,0.286-11.079,0.828v36.604
L92.276,30.074z"/>
<path fill="#00D2FF" d="M92.276,131.874V59.133L70.605,78.49v80.682c3.614,0.543,7.313,0.828,11.079,0.828
c4.41,0,8.723-0.407,12.921-1.147l58.033-51.838c1.971-6.664,3.046-13.712,3.046-21.015c0-3.439-0.254-6.817-0.708-10.132
L92.276,131.874z"/>
<path fill="#FF4B19" d="M92.276,110.518l58.14-51.933c-2.77-6.938-6.551-13.358-11.175-19.076L92.276,81.46V110.518z"/>
<path fill="#FF4B19" d="M70.605,100.817l-18.668,16.676V18.242c-8.086,3.555-15.409,8.513-21.672,14.567V139.19
c4.885,4.724,10.409,8.787,16.444,12.03l23.896-21.345V100.817z"/>
<polygon fill="#32324B" points="262.563,101.099 276.389,49.22 294.955,49.22 274.414,121.377 252.556,121.377 240.311,72.79
228.065,121.377 206.207,121.377 185.666,49.22 204.232,49.22 218.058,101.099 231.752,49.22 248.869,49.22 "/>
<path fill="#32324B" d="M363.429,97.676c-2.106,14.352-13.167,24.623-32.128,24.623c-20.146,0-35.025-12.114-35.025-36.605
c0-24.622,15.406-37.395,35.025-37.395c21.726,0,33.182,15.933,33.182,37.263v3.819h-49.772c0,8.031,3.291,18.17,16.327,18.17
c7.242,0,12.904-3.555,14.353-10.27L363.429,97.676z M345.654,76.608c-0.659-10.008-7.11-13.694-14.484-13.694
c-8.427,0-14.879,5.135-15.801,13.694H345.654z"/>
<path fill="#32324B" d="M417.628,74.634v-2.502c0-5.662-2.37-9.351-13.036-9.351c-13.298,0-13.694,7.375-13.694,9.877h-17.117
c0-10.666,4.477-24.359,31.338-24.359c25.676,0,30.285,12.771,30.285,23.174v39.766c0,2.897,0.131,5.267,0.395,7.11l0.527,3.028
h-18.172v-7.241c-5.134,5.134-12.245,8.163-22.384,8.163c-14.221,0-25.018-8.296-25.018-22.648c0-16.59,15.67-20.146,21.99-21.199
L417.628,74.634z M417.628,88.195l-6.979,1.054c-3.819,0.658-8.427,1.315-11.192,1.843c-3.029,0.527-5.662,1.186-7.637,2.765
c-1.844,1.449-2.765,3.425-2.765,5.926c0,2.107,0.79,8.69,10.666,8.69c5.793,0,10.928-2.105,13.693-4.872
c3.556-3.555,4.214-8.032,4.214-11.587V88.195z"/>
<polygon fill="#32324B" points="486.495,121.377 462.399,121.377 438.698,49.221 458.186,49.221 474.775,104.392 491.499,49.221
510.459,49.221 "/>
<path fill="#32324B" d="M578.273,97.676c-2.106,14.352-13.167,24.623-32.128,24.623c-20.146,0-35.025-12.114-35.025-36.605
c0-24.622,15.406-37.395,35.025-37.395c21.726,0,33.182,15.933,33.182,37.263v3.819h-49.772c0,8.031,3.291,18.17,16.327,18.17
c7.242,0,12.904-3.555,14.354-10.27L578.273,97.676z M560.498,76.608c-0.659-10.008-7.109-13.694-14.483-13.694
c-8.428,0-14.88,5.135-15.802,13.694H560.498z"/>
<g>
<path fill="#32324B" d="M615.355,120.575c-11.88,0-15.841-5.017-15.841-16.765V58.797h-11.616v-9.108h11.616V27.776h10.825v21.912
h18.216v9.108H610.34v42.241c0,8.977,1.979,10.164,9.107,10.164c1.98,0,7.524-0.396,8.185-0.396v8.976
C627.632,119.782,622.22,120.575,615.355,120.575z"/>
<path fill="#32324B" d="M674.095,60.777c-12.672,0-21.517,4.355-21.517,20.988v38.413h-10.824v-70.49h10.692v12.145
c4.62-8.977,13.992-12.145,21.385-12.145h5.016v11.089H674.095z"/>
<path fill="#32324B" d="M732.703,75.957v-3.828c0-9.768-4.224-14.388-16.633-14.388c-13.332,0-17.028,6.072-17.556,13.2H687.69
c0-9.108,5.147-22.177,28.116-22.177c24.157,0,27.721,12.54,27.721,23.761v36.301c0,3.301,0.132,8.58,0.924,11.353h-10.956
c0,0-0.264-3.828-0.264-9.504c-3.433,4.487-10.297,10.428-24.685,10.428c-13.597,0-23.761-7.92-23.761-21.12
c0-14.652,11.748-18.613,18.876-20.064C710.395,78.598,732.703,75.957,732.703,75.957z M732.703,84.67
c0,0-15.312,1.979-23.101,3.168c-7.524,1.056-13.597,3.564-13.597,11.88c0,6.996,4.753,12.276,14.389,12.276
c13.332,0,22.309-7.656,22.309-19.272V84.67z"/>
<path fill="#32324B" d="M790.649,111.994c10.429,0,17.953-5.939,19.009-16.632h10.957c-1.98,17.028-13.597,25.74-29.966,25.74
c-18.744,0-32.076-12.012-32.076-35.905c0-23.76,13.464-36.433,32.209-36.433c16.104,0,27.721,8.712,29.568,25.213h-10.956
c-1.452-11.353-9.24-16.104-18.877-16.104c-12.012,0-20.856,8.448-20.856,27.324C769.661,104.471,778.901,111.994,790.649,111.994z
"/>
<path fill="#32324B" d="M895.062,96.022c-1.452,12.408-10.032,25.08-30.229,25.08c-18.745,0-32.341-12.804-32.341-36.037
c0-21.912,13.464-36.301,32.209-36.301c19.8,0,30.757,14.784,30.757,38.018h-51.878c0.265,13.332,5.809,25.212,21.385,25.212
c11.484,0,18.217-7.128,19.141-16.104L895.062,96.022z M883.71,78.201c-1.056-14.916-9.636-20.328-19.272-20.328
c-10.824,0-19.141,7.26-20.46,20.328H883.71z"/>
<path fill="#32324B" d="M942.976,60.777c-12.672,0-21.517,4.355-21.517,20.988v38.413h-10.824v-70.49h10.692v12.145
c4.62-8.977,13.992-12.145,21.385-12.145h5.016v11.089H942.976z"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 4.9 KiB

View File

@@ -1,15 +0,0 @@
{
"name": "scope-tracer",
"version": "1.0.0",
"description": "",
"main": "server.js",
"scripts": {
"start": "node server.js",
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "David Kaltschmidt <david.kaltschmidt@gmail.com>",
"license": "UNLICENSED",
"devDependencies": {
"express": "^4.13.3"
}
}

View File

@@ -1,25 +0,0 @@
var express = require('express')
var app = express();
app.get('/', function(req, res) {
res.sendFile(__dirname + '/index.html');
});
app.get('/container', function(req, res) {
res.sendFile(__dirname + '/container.json');
});
app.get('/traces', function(req, res) {
res.sendFile(__dirname + '/traces.json');
});
app.use(express.static('./'));
var port = process.env.PORT || 4050;
var server = app.listen(port, function () {
var host = server.address().address;
var port = server.address().port;
console.log('Scope Tracer UI listening at http://%s:%s', host, port);
});

View File

@@ -1,4 +0,0 @@
/*! sprintf-js | Alexandru Marasteanu <hello@alexei.ro> (http://alexei.ro/) | BSD-3-Clause */
!function(a){function b(){var a=arguments[0],c=b.cache;return c[a]&&c.hasOwnProperty(a)||(c[a]=b.parse(a)),b.format.call(null,c[a],arguments)}function c(a){return Object.prototype.toString.call(a).slice(8,-1).toLowerCase()}function d(a,b){return Array(b+1).join(a)}var e={not_string:/[^s]/,number:/[dief]/,json:/[j]/,not_json:/[^j]/,text:/^[^\x25]+/,modulo:/^\x25{2}/,placeholder:/^\x25(?:([1-9]\d*)\$|\(([^\)]+)\))?(\+)?(0|'[^$])?(-)?(\d+)?(?:\.(\d+))?([b-fijosuxX])/,key:/^([a-z_][a-z_\d]*)/i,key_access:/^\.([a-z_][a-z_\d]*)/i,index_access:/^\[(\d+)\]/,sign:/^[\+\-]/};b.format=function(a,f){var g,h,i,j,k,l,m,n=1,o=a.length,p="",q=[],r=!0,s="";for(h=0;o>h;h++)if(p=c(a[h]),"string"===p)q[q.length]=a[h];else if("array"===p){if(j=a[h],j[2])for(g=f[n],i=0;i<j[2].length;i++){if(!g.hasOwnProperty(j[2][i]))throw new Error(b("[sprintf] property '%s' does not exist",j[2][i]));g=g[j[2][i]]}else g=j[1]?f[j[1]]:f[n++];if("function"==c(g)&&(g=g()),e.not_string.test(j[8])&&e.not_json.test(j[8])&&"number"!=c(g)&&isNaN(g))throw new TypeError(b("[sprintf] expecting number but found %s",c(g)));switch(e.number.test(j[8])&&(r=g>=0),j[8]){case"b":g=g.toString(2);break;case"c":g=String.fromCharCode(g);break;case"d":case"i":g=parseInt(g,10);break;case"j":g=JSON.stringify(g,null,j[6]?parseInt(j[6]):0);break;case"e":g=j[7]?g.toExponential(j[7]):g.toExponential();break;case"f":g=j[7]?parseFloat(g).toFixed(j[7]):parseFloat(g);break;case"o":g=g.toString(8);break;case"s":g=(g=String(g))&&j[7]?g.substring(0,j[7]):g;break;case"u":g>>>=0;break;case"x":g=g.toString(16);break;case"X":g=g.toString(16).toUpperCase()}e.json.test(j[8])?q[q.length]=g:(!e.number.test(j[8])||r&&!j[3]?s="":(s=r?"+":"-",g=g.toString().replace(e.sign,"")),l=j[4]?"0"===j[4]?"0":j[4].charAt(1):" ",m=j[6]-(s+g).length,k=j[6]&&m>0?d(l,m):"",q[q.length]=j[5]?s+g+k:"0"===l?s+k+g:k+s+g)}return q.join("")},b.cache={},b.parse=function(a){for(var b=a,c=[],d=[],f=0;b;){if(null!==(c=e.text.exec(b)))d[d.length]=c[0];else if(null!==(c=e.modulo.exec(b)))d[d.length]="%";else{if(null===(c=e.placeholder.exec(b)))throw new SyntaxError("[sprintf] unexpected placeholder");if(c[2]){f|=1;var g=[],h=c[2],i=[];if(null===(i=e.key.exec(h)))throw new SyntaxError("[sprintf] failed to parse named argument key");for(g[g.length]=i[1];""!==(h=h.substring(i[0].length));)if(null!==(i=e.key_access.exec(h)))g[g.length]=i[1];else{if(null===(i=e.index_access.exec(h)))throw new SyntaxError("[sprintf] failed to parse named argument key");g[g.length]=i[1]}c[2]=g}else f|=2;if(3===f)throw new Error("[sprintf] mixing positional and named placeholders is not (yet) supported");d[d.length]=c}b=b.substring(c[0].length)}return d};var f=function(a,c,d){return d=(c||[]).slice(0),d.splice(0,0,a),b.apply(null,d)};"undefined"!=typeof exports?(exports.sprintf=b,exports.vsprintf=f):(a.sprintf=b,a.vsprintf=f,"function"==typeof define&&define.amd&&define(function(){return{sprintf:b,vsprintf:f}}))}("undefined"==typeof window?this:window);
//# sourceMappingURL=sprintf.min.js.map

View File

@@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Matthew Sackman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,2 +0,0 @@
Skiplist implementation for Go. Comes with tests. Currently no docs,
but it's pretty obvious stuff.

View File

@@ -1,429 +0,0 @@
package skiplist
import (
"fmt"
"math"
"math/rand"
"strings"
)
const (
p = 0.3
defaultDepth = 2
)
type Comparable interface {
LessThan(Comparable) bool
Equal(Comparable) bool
}
type SkipList struct {
length uint
terminus *Node
levelProbabilities []float32
curCapacity uint
curDepth uint
nodes []Node
localRand *rand.Rand
}
type Node struct {
Key Comparable
Value interface{}
heightRand float32
prev *Node
nexts []*Node
skiplist *SkipList
}
func New(rng *rand.Rand) *SkipList {
depth := defaultDepth
terminus := &Node{
heightRand: 0,
nexts: make([]*Node, depth),
}
terminus.prev = terminus
for idx := 0; idx < len(terminus.nexts); idx++ {
terminus.nexts[idx] = terminus
}
s := &SkipList{
length: 0,
terminus: terminus,
curDepth: uint(depth),
localRand: rng,
}
s.levelProbabilities = []float32{p}
terminus.skiplist = s
s.determineCapacity()
// s.validate()
return s
}
func (s *SkipList) determineCapacity() {
base := float64(1.0) / p
capacity := math.Pow(base, float64(s.curDepth))
s.curCapacity = uint(math.Floor(capacity))
}
func (s *SkipList) chooseNumLevels() (float32, int) {
r := s.localRand.Float32()
max := len(s.levelProbabilities)
for idx := 0; idx < max; idx++ {
if r > s.levelProbabilities[idx] {
return r, idx + 1
}
}
return r, max + 1
}
func (s *SkipList) ensureCapacity() {
// defer s.validate()
if s.length < s.curCapacity {
return
}
threshold := p * s.levelProbabilities[s.curDepth-2]
s.curDepth++
s.levelProbabilities = append(s.levelProbabilities, threshold)
s.determineCapacity()
// cur and next are just used to walk through the list at lvl. prev
// records the last node that made it up to the new level.
cur := s.terminus
lvl := len(cur.nexts) - 1
prev := cur
for {
next := cur.nexts[lvl]
if cur.heightRand <= threshold {
cur.nexts = append(cur.nexts, s.terminus)
prev.nexts[lvl+1] = cur
prev = cur
}
if next == s.terminus {
break
} else {
cur = next
}
}
}
func (s *SkipList) getNode() *Node {
l := len(s.nodes)
if l == 0 {
l = int(s.curCapacity)
s.nodes = make([]Node, l)
}
l--
n := &s.nodes[l]
s.nodes = s.nodes[:l]
return n
}
func (s *SkipList) getEqOrLessThan(cur *Node, k Comparable, captureDescent bool) (*Node, []*Node) {
// defer s.validate()
if s.length == 0 {
return s.terminus, nil
}
if cur != s.terminus {
if k.Equal(cur.Key) {
return cur, nil
}
if k.LessThan(cur.Key) {
return s.getEqOrLessThan(s.terminus, k, captureDescent)
}
}
// 1. Travel, not-descending, as far as possible
lvl := len(cur.nexts) - 1
for {
n := cur.nexts[lvl]
if n == s.terminus {
break
}
if n.Key.LessThan(k) {
cur = n
lvl = len(cur.nexts) - 1
} else if n.Key.Equal(k) {
return n, nil
} else {
break
}
}
// 2. Now descend as needed
var descent []*Node
if captureDescent {
descent = make([]*Node, lvl+1)
descent[lvl] = cur
}
for lvl--; lvl >= 0; lvl-- {
for {
n := cur.nexts[lvl]
if n == s.terminus {
break
}
if n.Key.LessThan(k) {
cur = n
} else if n.Key.Equal(k) {
return n, descent
} else {
break
}
}
if captureDescent {
descent[lvl] = cur
}
}
return cur, descent
}
func (s *SkipList) insert(cur *Node, k Comparable, v interface{}, n *Node) *Node {
// defer s.validate()
// do this first even though we may not need to - if we do it after
// the getEqOrLessThan call, we may break descent.
s.ensureCapacity()
cur, descent := s.getEqOrLessThan(cur, k, true)
if cur != s.terminus && cur.Key.Equal(k) {
cur.Value = v
return cur
}
// We didn't find k, so cur will be the node immediately prior to
// where k should go.
heightRand, height := s.chooseNumLevels()
if n == nil {
n = s.getNode()
}
n.Key = k
n.Value = v
n.heightRand = heightRand
n.nexts = make([]*Node, height)
n.prev = cur
n.skiplist = s
if len(cur.nexts) >= len(n.nexts) {
for idx := 0; idx < len(n.nexts); idx++ {
n.nexts[idx] = cur.nexts[idx]
cur.nexts[idx] = n
}
} else {
// Descent may capture only part of the path: it may be shorter
// than levels (in the case where the original cur is !=
// s.terminus) and we reached the correct location without
// travelling up very far. However, because we didn't find k, we
// know that all the "lower" levels of descent will be populated
// (where "lower" is "closer to [0]"), so we just need to fill in
// the "top".
if len(n.nexts) > len(descent) {
_, extra := s.getEqOrLessThan(s.terminus, descent[len(descent)-1].Key, true)
// Aside: because we know we'll find that Key, all the lower
// indices of extra will be nil.
descent = append(descent, extra[len(descent):]...)
}
for idx := 0; idx < len(n.nexts); idx++ {
n.nexts[idx] = descent[idx].nexts[idx]
descent[idx].nexts[idx] = n
}
}
n._next().prev = n
s.length++
return n
}
func (s *SkipList) remove(cur *Node, k Comparable) interface{} {
// defer s.validate()
n, _ := s.getEqOrLessThan(cur, k, false)
if n == s.terminus || !n.Key.Equal(k) {
return nil
}
s.removeNode(n)
n.nullify()
return n.Value
}
func (s *SkipList) removeNode(n *Node) {
// defer s.validate()
p := n.prev
n._next().prev = p
s.length--
for idx := 0; idx < len(p.nexts) && idx < len(n.nexts); idx++ {
p.nexts[idx] = n.nexts[idx]
}
if len(p.nexts) < len(n.nexts) {
_, descent := s.getEqOrLessThan(s.terminus, p.Key, true)
// because we know we're going to find Key, the lower indices
// of descent will be nil. But we know p == n.prev, so all of
// those pointers will be to n anyway, which we've already
// dealt with in the previous loop.
for idx := len(p.nexts); idx < len(n.nexts); idx++ {
descent[idx].nexts[idx] = n.nexts[idx]
}
}
}
func (s *SkipList) reposition(cur *Node, k Comparable) {
// defer s.validate()
needsMove := false
if cur != s.terminus {
if cur.prev != s.terminus && !cur.prev.Key.LessThan(k) {
needsMove = true
} else if n := cur._next(); n != s.terminus && !k.LessThan(n.Key) {
needsMove = true
}
}
if needsMove {
s.removeNode(cur)
cur.Key = k
s.insert(cur.prev, cur.Key, cur.Value, cur)
}
}
func (s *SkipList) First() *Node {
return s.terminus.Next()
}
func (s *SkipList) Last() *Node {
return s.terminus.Prev()
}
func (s *SkipList) Insert(k Comparable, v interface{}) *Node {
return s.insert(s.terminus, k, v, nil)
}
func (s *SkipList) Get(k Comparable) *Node {
return s.terminus.Get(k)
}
func (s *SkipList) Remove(k Comparable) interface{} {
return s.remove(s.terminus, k)
}
func (s *SkipList) Len() uint {
return s.length
}
// NB: this destroys t. Do not use t after this.
func (s *SkipList) Merge(t *SkipList) {
// defer s.validate()
cur := s.terminus
for n := t.First(); n != nil; {
m := n.Next() // need to save this out before we destroy it in the insert
cur = s.insert(cur, n.Key, n.Value, n)
n = m
}
}
func (s *SkipList) validate() {
visited := make(map[*Node]bool, int(s.length))
cur := s.terminus
visited[cur] = true
l := uint(0)
for {
if cur != s.terminus {
l++
}
if cur._next().prev != cur {
panic(fmt.Sprintf("Node (%v) has next pointer to %v, which has prev pointer to %v", cur, cur._next(), cur._next().prev))
}
if cur.prev._next() != cur {
panic(fmt.Sprintf("Node (%v) has prev pointer to %v, which has next pointer to %v", cur, cur.prev, cur.prev._next()))
}
for h, n := range cur.nexts {
if h >= len(n.nexts) {
panic(fmt.Sprintf("Node (%v) has next pointer at level %v pointing down to node (%v) which has %v height", cur, h, n, len(n.nexts)))
}
}
n := cur._next()
if n == s.terminus {
break
}
if visited[n] {
panic(fmt.Sprintf("Node (%v) has next as %v which is already visited!", cur, n))
}
if cur != s.terminus && !cur.Key.LessThan(n.Key) {
panic(fmt.Sprintf("Node keys in wrong order: expecting %v < %v", cur.Key, n.Key))
}
if n.prev != cur {
panic(fmt.Sprintf("Node (%v) has next (%v) which does not point back correctly", cur, n))
}
cur = n
}
if l != s.length {
panic(fmt.Sprintf("length is wrong: counted %v but length is %v", l, s.length))
}
}
func (n *Node) Get(k Comparable) *Node {
m, _ := n.skiplist.getEqOrLessThan(n, k, false)
if m != n.skiplist.terminus && m.Key.Equal(k) {
return m
} else {
return nil
}
}
func (n *Node) Insert(k Comparable, v interface{}) *Node {
return n.skiplist.insert(n, k, v, nil)
}
func (n *Node) Remove() interface{} {
return n.skiplist.remove(n, n.Key)
}
func (n *Node) _next() *Node {
return n.nexts[0]
}
func (n *Node) Next() *Node {
if m := n.nexts[0]; m != n.skiplist.terminus {
return m
} else {
return nil
}
}
func (n *Node) Prev() *Node {
if m := n.prev; m != n.skiplist.terminus {
return m
} else {
return nil
}
}
func (n *Node) Reposition(k Comparable) {
n.skiplist.reposition(n, k)
}
func (n *Node) nullify() {
// this is called when n is removed from the skiplist. It's really
// just to ensure that if someone has a reference to n lying
// around, they can't use it.
n.prev = nil
n.nexts = nil
n.skiplist = nil
}
func (s *SkipList) String() string {
strs := make([]string, 1, s.length+1)
strs[0] = fmt.Sprint(s.terminus)
for cur := s.terminus._next(); cur != s.terminus; cur = cur._next() {
strs = append(strs, fmt.Sprint(cur))
}
return fmt.Sprintf("Skiplist of length %v (counted: %v), levelProbabilities %v, and nodes:\n\t[%v]",
s.length, len(strs)-1, s.levelProbabilities, strings.Join(strs, ",\n\t "))
}
func (n *Node) String() string {
strs := make([]string, len(n.nexts))
for idx := 0; idx < len(strs); idx++ {
strs[idx] = fmt.Sprint(n.nexts[idx].Key)
}
return fmt.Sprintf("%v -> %v (nexts: [%v])", n.Key, n.Value, strings.Join(strs, ", "))
}

View File

@@ -1,315 +0,0 @@
package skiplist
import (
"math/rand"
"os"
"strconv"
"testing"
)
type intKey int
func (sk intKey) LessThan(b Comparable) bool {
return sk < b.(intKey)
}
func (sk intKey) Equal(b Comparable) bool {
return sk == b.(intKey)
}
const (
keyValsLen = 1000000
)
var (
rng = rand.New(rand.NewSource(0))
keyVals = make(map[intKey]string)
indices = make([]intKey, keyValsLen)
)
func TestAddFromEmptyRoot(t *testing.T) {
for idx := 0; idx < 10; idx++ {
s := New(rng)
for idy := 0; idy < idx; idy++ {
k := indices[idy]
v := keyVals[k]
n := s.Insert(k, v)
if n.Key != k || n.Value != v {
t.Fatal("Key or Value of inserted pair changed!", k, v, n)
}
if m := s.Get(k); n != m {
t.Fatal("Node changed for just inserted value:", n, m)
}
if s.Len() != uint(idy+1) {
t.Fatal("Incorrect length")
}
}
}
}
func TestAddFromEmptyRel(t *testing.T) {
for idx := 0; idx < 10; idx++ {
s := New(rng)
var n *Node
for idy := 0; idy < idx; idy++ {
k := indices[idy]
v := keyVals[k]
if n == nil {
n = s.Insert(k, v)
} else {
n = n.Insert(k, v)
}
if n.Key != k || n.Value != v {
t.Fatal("Key or Value of inserted pair changed!", k, v, n)
}
if m := n.Get(k); n != m {
t.Fatal("Node changed for just inserted value:", n, m)
}
if p := n.Prev(); p != nil {
if m := p.Get(k); n != m {
t.Fatal("Node changed for just inserted value:", n, m)
}
}
if s.Len() != uint(idy+1) {
t.Fatal("Incorrect length")
}
}
}
}
func TestDupInsert(t *testing.T) {
s := New(rng)
for idx := 0; idx < 10; idx++ {
for idy := 0; idy < idx; idy++ {
k := indices[idy]
v := keyVals[k]
if n := s.Insert(k, v); n.Key != k || n.Value != v {
t.Fatal("Key or Value of inserted pair changed!", k, v, n)
}
}
}
}
func TestGetMissingEmpty(t *testing.T) {
s := New(rng)
for idx := 0; idx < 10; idx++ {
k := indices[idx]
if n := s.Get(k); n != nil {
t.Fatal("Expected not to find elem")
}
}
}
func TestGetMissingNonEmpty(t *testing.T) {
s := New(rng)
for idx := 0; idx < 20; idx++ {
k := indices[idx]
v := keyVals[k]
if idx%2 == 0 {
s.Insert(k, v)
} else {
if n := s.Get(k); n != nil {
t.Fatal("Expected not to find elem")
}
}
}
}
func TestRemoveRoot(t *testing.T) {
s := New(rng)
for idx := 0; idx < 20; idx++ {
k := indices[idx]
v := keyVals[k]
s.Insert(k, v)
}
for idx := 0; idx < 20; idx++ {
k := indices[idx]
v := keyVals[k]
if u := s.Remove(k); u != v {
t.Fatal("Wrong value returned:", u, v)
}
if int(s.Len()) != 19-idx {
t.Fatal("Wrong length")
}
}
}
func TestRemoveSelf(t *testing.T) {
s := New(rng)
ns := []*Node{}
for idx := 0; idx < 20; idx++ {
k := indices[idx]
v := keyVals[k]
ns = append(ns, s.Insert(k, v))
}
for idx, n := range ns {
k := indices[idx]
v := keyVals[k]
if u := n.Remove(); u != v {
t.Fatal("Wrong value returned:", u, v)
}
if int(s.Len()) != len(ns)-idx-1 {
t.Fatal("Wrong length")
}
}
}
func TestRemoveMissing(t *testing.T) {
s := New(rng)
for idx := 0; idx < 20; idx++ {
k := indices[idx]
v := keyVals[k]
s.Insert(k, v)
}
for idx := 0; idx < 20; idx++ {
for idy := 0; idy < idx; idy++ {
k := indices[idy]
v := keyVals[k]
u := s.Remove(k)
if idy+1 == idx {
if u != v {
t.Fatal("Wrong value returned:", u, v)
}
} else {
if u != nil {
t.Fatal("Wrong value returned - expected nil:", u)
}
}
}
}
if s.Len() != 1 {
t.Fatal("Wrong length")
}
}
func TestFirstLast(t *testing.T) {
s := New(rng)
if s.First() != nil || s.Last() != nil {
t.Fatal("Expected nil for First and Last on empty list")
}
var min, max intKey
for idx := 0; idx < 200; idx++ {
k := indices[idx]
v := keyVals[k]
s.Insert(k, v)
if idx == 0 {
min, max = k, k
} else {
if k < min {
min = k
}
if k > max {
max = k
}
}
}
if f := s.First(); f.Key != min {
t.Fatal("Did not get minimum key back for first", min, f)
}
if l := s.Last(); l.Key != max {
t.Fatal("Did not get maximum key back for last", max, l)
}
}
func TestMerge(t *testing.T) {
s0 := New(rng)
s1 := New(rng)
lim := 1000
for idx := 0; idx < lim; idx++ {
if idx%2 == 0 {
s0.Insert(intKey(idx), idx)
} else {
s1.Insert(intKey(idx), idx)
}
}
s0.Merge(s1)
if int(s0.Len()) != lim {
t.Fatal("Wrong len after merge", s0.Len())
}
cur := s0.First()
for idx := 0; idx < lim; idx++ {
if cur.Value.(int) != idx {
t.Fatal("Wrong value: ", cur.Value)
}
if cur != s0.Get(cur.Key) {
t.Fatal("Internal failure: ", cur)
}
cur = cur.Next()
}
}
func TestReposition(t *testing.T) {
s := New(rng)
lim := 200
for idx := 0; idx < lim; idx++ {
s.Insert(intKey(idx*5), idx)
}
for idx := lim - 1; idx >= 0; idx-- {
n := s.Get(intKey(idx * 5))
if n == nil {
t.Fatal("Unable to find node")
}
n.Reposition(intKey((idx * 5) - 11))
}
n := s.First()
for idx := 0; idx < lim; idx++ {
if n.Value != idx {
t.Fatal("Wrong value", idx, n)
}
n = n.Next()
}
if n != nil {
t.Fatal("Too many values!")
}
}
func BenchmarkAdd08192(b *testing.B) { benchmarkAdd(8192, b) }
func BenchmarkAdd16384(b *testing.B) { benchmarkAdd(16384, b) }
func BenchmarkAdd32768(b *testing.B) { benchmarkAdd(32768, b) }
func BenchmarkAdd65536(b *testing.B) { benchmarkAdd(65536, b) }
func benchmarkAdd(initial int, b *testing.B) {
s := populate(New(rng), 0, initial)
b.ResetTimer()
populateFast(s, initial, b.N)
}
func BenchmarkGet08192(b *testing.B) { benchmarkGet(8192, b) }
func BenchmarkGet16384(b *testing.B) { benchmarkGet(16384, b) }
func BenchmarkGet32768(b *testing.B) { benchmarkGet(32768, b) }
func BenchmarkGet65536(b *testing.B) { benchmarkGet(65536, b) }
func benchmarkGet(initial int, b *testing.B) {
s := populate(New(rng), 0, initial)
b.ResetTimer()
for idx := 0; idx < b.N; idx++ {
s.Get(indices[idx%initial])
}
}
func populate(s *SkipList, offset, lim int) *SkipList {
for idx := 0; idx < lim; idx++ {
k := indices[(offset+idx)%len(indices)]
v := keyVals[k]
s.Insert(k, v)
}
return s
}
func populateFast(s *SkipList, offset, lim int) *SkipList {
for idx := 0; idx < lim; idx++ {
n := idx + offset
s.Insert(intKey(n), n)
}
return s
}
func TestMain(m *testing.M) {
for idx := 0; idx < keyValsLen; idx++ {
keyVals[intKey(idx)] = strconv.FormatInt(int64(idx), 3)
}
for idx, k := range rand.Perm(keyValsLen) {
indices[idx] = intKey(k)
}
os.Exit(m.Run())
}

View File

@@ -1,11 +0,0 @@
{
"version": 0,
"dependencies": [
{
"importpath": "github.com/msackman/skiplist",
"repository": "https://github.com/msackman/skiplist",
"revision": "57733164b18444c51f63e9a80f1693961dde8036",
"branch": "master"
}
]
}

View File

@@ -10,7 +10,7 @@ all: $(TARGETS)
ifeq ($(BUILD_IN_CONTAINER),true)
$(TARGETS):
$(SUDO) docker run -ti $(RM) -v $(shell pwd)/../:/go/src/github.com/weaveworks/scope -e GOARCH -e GOOS \
weaveworks/scope-backend-build -C experimental $@
weaveworks/scope-backend-build -C extras $@
else
$(TARGETS):
go build -ldflags "-extldflags \"-static\"" -tags netgo -o $@ ./$(@D)

View File

@@ -1,4 +1,6 @@
# Experimental Code
# Extra code
Useful things that go well with scope.
- Code in this directory must always compile (enforced by CircleCI)
- This directory is ignored for testing but should lint cleanly

View File

@@ -11,7 +11,7 @@ qotd/qotd: qotd/qotd.o
ifeq ($(BUILD_IN_CONTAINER),true)
qotd/qotd qotd/qotd.o searchapp/searchapp shout/shout:
$(SUDO) docker run -ti --rm -v $(shell pwd)/../../:/go/src/github.com/weaveworks/scope -e GOARCH -e GOOS \
weaveworks/scope-backend-build -C experimental/example $@
weaveworks/scope-backend-build -C extras/example $@
else
qotd/qotd:
gcc -o $@ $< $(CFLAGS)

Some files were not shown because too many files have changed in this diff Show More