mirror of
https://github.com/weaveworks/scope.git
synced 2026-03-03 02:00:43 +00:00
Merge pull request #6 from weaveworks/add-runner
Copy test runner from weave repo, add some more details to README.
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
|||||||
cover/cover
|
cover/cover
|
||||||
socks/proxy
|
socks/proxy
|
||||||
socks/image.tar
|
socks/image.tar
|
||||||
|
runner/runner
|
||||||
|
|||||||
42
README.md
42
README.md
@@ -1,12 +1,36 @@
|
|||||||
# Weaveworks Tools
|
# Weaveworks Build Tools
|
||||||
|
|
||||||
Included in this repo are tools shared by weave.git and scope.git. They include
|
Included in this repo are tools shared by weave.git and scope.git. They include
|
||||||
|
|
||||||
- ```cover```: a tool which merges overlapping coverage reports generated by go test
|
- ```cover```: a tool which merges overlapping coverage reports generated by go
|
||||||
- ```lint```: a script to lint Go project; runs various tools like golint, go vet, errcheck etc
|
test
|
||||||
- ```rebuild-image```: a script to rebuild docker images when their input files change; useful
|
- ```lint```: a script to lint Go project; runs various tools like golint, go
|
||||||
when you using docker images to build your software, but you don't want to build the
|
vet, errcheck etc
|
||||||
image every time.
|
- ```rebuild-image```: a script to rebuild docker images when their input files
|
||||||
- ```socks```: a simple, dockerised SOCKS proxy for getting your laptop onto the Weave network
|
change; useful when you using docker images to build your software, but you
|
||||||
- ```test```: a script to run all go unit tests in subdirectories, gather the coverage results,
|
don't want to build the image every time.
|
||||||
and merge them into a single report.
|
- ```socks```: a simple, dockerised SOCKS proxy for getting your laptop onto
|
||||||
|
the Weave network
|
||||||
|
- ```test```: a script to run all go unit tests in subdirectories, gather the
|
||||||
|
coverage results, and merge them into a single report.
|
||||||
|
- ```runner```: a tool for running tests in parallel; given each test is
|
||||||
|
suffixed with the number of hosts it requires, and the hosts available are
|
||||||
|
contained in the environment variable HOSTS, the tool will run tests in
|
||||||
|
parallel, on different hosts.
|
||||||
|
|
||||||
|
## Using build-tools.git
|
||||||
|
|
||||||
|
To allow you to tie your code to a specific version of build-tools.git, such
|
||||||
|
that future changes don't break you, we recommendation that you [`git subtree`]()
|
||||||
|
this repository into your own repository:
|
||||||
|
|
||||||
|
[`git subtree`]: http://blogs.atlassian.com/2013/05/alternatives-to-git-submodule-git-subtree/
|
||||||
|
|
||||||
|
```
|
||||||
|
git subtree add --prefix tools https://github.com/weaveworks/build-tools.git master --squash
|
||||||
|
````
|
||||||
|
|
||||||
|
To update the code in build-tools.git, the process is therefore:
|
||||||
|
- PR into build-tools.git, go through normal review process etc.
|
||||||
|
- Do `git subtree pull --prefix tools https://github.com/weaveworks/build-tools.git master --squash`
|
||||||
|
in your repo, and PR that.
|
||||||
|
|||||||
@@ -19,4 +19,5 @@ test:
|
|||||||
- cd $SRCDIR; ./lint .
|
- cd $SRCDIR; ./lint .
|
||||||
- cd $SRCDIR/cover; make
|
- cd $SRCDIR/cover; make
|
||||||
- cd $SRCDIR/socks; make
|
- cd $SRCDIR/socks; make
|
||||||
|
- cd $SRCDIR/runner; make
|
||||||
|
|
||||||
|
|||||||
11
runner/Makefile
Normal file
11
runner/Makefile
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
.PHONY: all clean
|
||||||
|
|
||||||
|
all: runner
|
||||||
|
|
||||||
|
runner: *.go
|
||||||
|
go get -tags netgo ./$(@D)
|
||||||
|
go build -ldflags "-extldflags \"-static\" -linkmode=external" -tags netgo -o $@ ./$(@D)
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -rf runner
|
||||||
|
go clean ./...
|
||||||
275
runner/runner.go
Normal file
275
runner/runner.go
Normal file
@@ -0,0 +1,275 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/mflag"
|
||||||
|
"github.com/mgutz/ansi"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultSchedulerHost = "positive-cocoa-90213.appspot.com"
|
||||||
|
jsonContentType = "application/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
start = ansi.ColorCode("black+ub")
|
||||||
|
fail = ansi.ColorCode("red+b")
|
||||||
|
succ = ansi.ColorCode("green+b")
|
||||||
|
reset = ansi.ColorCode("reset")
|
||||||
|
|
||||||
|
schedulerHost = defaultSchedulerHost
|
||||||
|
useScheduler = false
|
||||||
|
runParallel = false
|
||||||
|
verbose = false
|
||||||
|
|
||||||
|
consoleLock = sync.Mutex{}
|
||||||
|
)
|
||||||
|
|
||||||
|
type test struct {
|
||||||
|
name string
|
||||||
|
hosts int
|
||||||
|
}
|
||||||
|
|
||||||
|
type schedule struct {
|
||||||
|
Tests []string `json:"tests"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type result struct {
|
||||||
|
test
|
||||||
|
errored bool
|
||||||
|
hosts []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type tests []test
|
||||||
|
|
||||||
|
func (ts tests) Len() int { return len(ts) }
|
||||||
|
func (ts tests) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] }
|
||||||
|
func (ts tests) Less(i, j int) bool {
|
||||||
|
if ts[i].hosts != ts[j].hosts {
|
||||||
|
return ts[i].hosts < ts[j].hosts
|
||||||
|
}
|
||||||
|
return ts[i].name < ts[j].name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tests) pick(availible int) (test, bool) {
|
||||||
|
// pick the first test that fits in the availible hosts
|
||||||
|
for i, test := range *ts {
|
||||||
|
if test.hosts <= availible {
|
||||||
|
*ts = append((*ts)[:i], (*ts)[i+1:]...)
|
||||||
|
return test, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return test{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t test) run(hosts []string) bool {
|
||||||
|
consoleLock.Lock()
|
||||||
|
fmt.Printf("%s>>> Running %s on %s%s\n", start, t.name, hosts, reset)
|
||||||
|
consoleLock.Unlock()
|
||||||
|
|
||||||
|
var out bytes.Buffer
|
||||||
|
|
||||||
|
cmd := exec.Command(t.name)
|
||||||
|
cmd.Env = os.Environ()
|
||||||
|
cmd.Stdout = &out
|
||||||
|
cmd.Stderr = &out
|
||||||
|
|
||||||
|
// replace HOSTS in env
|
||||||
|
for i, env := range cmd.Env {
|
||||||
|
if strings.HasPrefix(env, "HOSTS") {
|
||||||
|
cmd.Env[i] = fmt.Sprintf("HOSTS=%s", strings.Join(hosts, " "))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
err := cmd.Run()
|
||||||
|
duration := float64(time.Now().Sub(start)) / float64(time.Second)
|
||||||
|
|
||||||
|
consoleLock.Lock()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("%s>>> Test %s finished after %0.1f secs with error: %v%s\n", fail, t.name, duration, err, reset)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%s>>> Test %s finished with success after %0.1f secs%s\n", succ, t.name, duration, reset)
|
||||||
|
}
|
||||||
|
if err != nil || verbose {
|
||||||
|
fmt.Print(out.String())
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
consoleLock.Unlock()
|
||||||
|
|
||||||
|
if err != nil && useScheduler {
|
||||||
|
updateScheduler(t.name, duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateScheduler(test string, duration float64) {
|
||||||
|
req := &http.Request{
|
||||||
|
Method: "POST",
|
||||||
|
Host: schedulerHost,
|
||||||
|
URL: &url.URL{
|
||||||
|
Opaque: fmt.Sprintf("/record/%s/%0.2f", url.QueryEscape(test), duration),
|
||||||
|
Scheme: "http",
|
||||||
|
Host: schedulerHost,
|
||||||
|
},
|
||||||
|
Close: true,
|
||||||
|
}
|
||||||
|
if resp, err := http.DefaultClient.Do(req); err != nil {
|
||||||
|
fmt.Printf("Error updating scheduler: %v\n", err)
|
||||||
|
} else {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSchedule(tests []string) ([]string, error) {
|
||||||
|
var (
|
||||||
|
testRun = "integration-" + os.Getenv("CIRCLE_BUILD_NUM")
|
||||||
|
shardCount = os.Getenv("CIRCLE_NODE_TOTAL")
|
||||||
|
shardID = os.Getenv("CIRCLE_NODE_INDEX")
|
||||||
|
requestBody = &bytes.Buffer{}
|
||||||
|
)
|
||||||
|
if err := json.NewEncoder(requestBody).Encode(schedule{tests}); err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
url := fmt.Sprintf("http://%s/schedule/%s/%s/%s", schedulerHost, testRun, shardCount, shardID)
|
||||||
|
resp, err := http.Post(url, jsonContentType, requestBody)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
var sched schedule
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&sched); err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
return sched.Tests, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTests(testNames []string) (tests, error) {
|
||||||
|
var err error
|
||||||
|
if useScheduler {
|
||||||
|
testNames, err = getSchedule(testNames)
|
||||||
|
if err != nil {
|
||||||
|
return tests{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tests := tests{}
|
||||||
|
for _, name := range testNames {
|
||||||
|
parts := strings.Split(strings.TrimSuffix(name, "_test.sh"), "_")
|
||||||
|
numHosts, err := strconv.Atoi(parts[len(parts)-1])
|
||||||
|
if err != nil {
|
||||||
|
numHosts = 1
|
||||||
|
}
|
||||||
|
tests = append(tests, test{name, numHosts})
|
||||||
|
fmt.Printf("Test %s needs %d hosts\n", name, numHosts)
|
||||||
|
}
|
||||||
|
return tests, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func summary(tests, failed tests) {
|
||||||
|
if len(failed) > 0 {
|
||||||
|
fmt.Printf("%s>>> Ran %d tests, %d failed%s\n", fail, len(tests), len(failed), reset)
|
||||||
|
for _, test := range failed {
|
||||||
|
fmt.Printf("%s>>> Fail %s%s\n", fail, test.name, reset)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%s>>> Ran %d tests, all succeeded%s\n", succ, len(tests), reset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parallel(ts tests, hosts []string) bool {
|
||||||
|
testsCopy := ts
|
||||||
|
sort.Sort(sort.Reverse(ts))
|
||||||
|
resultsChan := make(chan result)
|
||||||
|
outstanding := 0
|
||||||
|
failed := tests{}
|
||||||
|
for len(ts) > 0 || outstanding > 0 {
|
||||||
|
// While we have some free hosts, try and schedule
|
||||||
|
// a test on them
|
||||||
|
for len(hosts) > 0 {
|
||||||
|
test, ok := ts.pick(len(hosts))
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
testHosts := hosts[:test.hosts]
|
||||||
|
hosts = hosts[test.hosts:]
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
errored := test.run(testHosts)
|
||||||
|
resultsChan <- result{test, errored, testHosts}
|
||||||
|
}()
|
||||||
|
outstanding++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, wait for the test to finish and return
|
||||||
|
// the hosts to the pool
|
||||||
|
result := <-resultsChan
|
||||||
|
hosts = append(hosts, result.hosts...)
|
||||||
|
outstanding--
|
||||||
|
if result.errored {
|
||||||
|
failed = append(failed, result.test)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
summary(testsCopy, failed)
|
||||||
|
return len(failed) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func sequential(ts tests, hosts []string) bool {
|
||||||
|
failed := tests{}
|
||||||
|
for _, test := range ts {
|
||||||
|
if test.run(hosts) {
|
||||||
|
failed = append(failed, test)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
summary(ts, failed)
|
||||||
|
return len(failed) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
mflag.BoolVar(&useScheduler, []string{"scheduler"}, false, "Use scheduler to distribute tests across shards")
|
||||||
|
mflag.BoolVar(&runParallel, []string{"parallel"}, false, "Run tests in parallel on hosts where possible")
|
||||||
|
mflag.BoolVar(&verbose, []string{"v"}, false, "Print output from all tests (Also enabled via DEBUG=1)")
|
||||||
|
mflag.StringVar(&schedulerHost, []string{"scheduler-host"}, defaultSchedulerHost, "Hostname of scheduler.")
|
||||||
|
mflag.Parse()
|
||||||
|
|
||||||
|
if len(os.Getenv("DEBUG")) > 0 {
|
||||||
|
verbose = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tests, err := getTests(mflag.Args())
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing tests: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
hosts := strings.Fields(os.Getenv("HOSTS"))
|
||||||
|
maxHosts := len(hosts)
|
||||||
|
if maxHosts == 0 {
|
||||||
|
fmt.Print("No HOSTS specified.\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errored bool
|
||||||
|
if runParallel {
|
||||||
|
errored = parallel(tests, hosts)
|
||||||
|
} else {
|
||||||
|
errored = sequential(tests, hosts)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errored {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user