mirror of
https://github.com/weaveworks/scope.git
synced 2026-02-14 18:09:59 +00:00
Merge commit 'd9ce1d58e9cbcd67823ee0abe8dd346a9aa6d120' into iaguis/fix-lint
This commit is contained in:
@@ -8,11 +8,18 @@ machine:
|
||||
|
||||
dependencies:
|
||||
post:
|
||||
- sudo chmod a+wr --recursive /usr/local/go/pkg
|
||||
- go clean -i net
|
||||
- go install -tags netgo std
|
||||
- mkdir -p $(dirname $SRCDIR)
|
||||
- cp -r $(pwd)/ $SRCDIR
|
||||
- go get github.com/golang/lint/golint github.com/fzipp/gocyclo github.com/kisielk/errcheck
|
||||
- |
|
||||
cd $SRCDIR;
|
||||
go get \
|
||||
github.com/fzipp/gocyclo \
|
||||
github.com/golang/lint/golint \
|
||||
github.com/kisielk/errcheck \
|
||||
gopkg.in/mvdan/sh.v1/cmd/shfmt
|
||||
|
||||
test:
|
||||
override:
|
||||
|
||||
@@ -4,9 +4,6 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
WORKING_SUFFIX=$(if ! git diff --exit-code --quiet HEAD >&2; \
|
||||
then echo "-WIP"; \
|
||||
else echo ""; \
|
||||
fi)
|
||||
WORKING_SUFFIX=$(if ! git diff --exit-code --quiet HEAD >&2; then echo "-WIP"; else echo ""; fi)
|
||||
BRANCH_PREFIX=$(git rev-parse --abbrev-ref HEAD)
|
||||
echo "${BRANCH_PREFIX//\//-}-$(git rev-parse --short HEAD)$WORKING_SUFFIX"
|
||||
|
||||
61
tools/integration/assert.sh
Normal file → Executable file
61
tools/integration/assert.sh
Normal file → Executable file
@@ -25,14 +25,15 @@ export CONTINUE=${CONTINUE:-}
|
||||
|
||||
args="$(getopt -n "$0" -l \
|
||||
verbose,help,stop,discover,invariant,continue vhxdic "$@")" \
|
||||
|| exit -1
|
||||
|| exit -1
|
||||
for arg in $args; do
|
||||
case "$arg" in
|
||||
-h)
|
||||
echo "$0 [-vxidc]" \
|
||||
"[--verbose] [--stop] [--invariant] [--discover] [--continue]"
|
||||
echo "$(sed 's/./ /g' <<< "$0") [-h] [--help]"
|
||||
exit 0;;
|
||||
echo "$(sed 's/./ /g' <<<"$0") [-h] [--help]"
|
||||
exit 0
|
||||
;;
|
||||
--help)
|
||||
cat <<EOF
|
||||
Usage: $0 [options]
|
||||
@@ -47,17 +48,23 @@ Options:
|
||||
-h show brief usage information and exit
|
||||
--help show this help message and exit
|
||||
EOF
|
||||
exit 0;;
|
||||
-v|--verbose)
|
||||
DEBUG=1;;
|
||||
-x|--stop)
|
||||
STOP=1;;
|
||||
-i|--invariant)
|
||||
INVARIANT=1;;
|
||||
-d|--discover)
|
||||
DISCOVERONLY=1;;
|
||||
-c|--continue)
|
||||
CONTINUE=1;;
|
||||
exit 0
|
||||
;;
|
||||
-v | --verbose)
|
||||
DEBUG=1
|
||||
;;
|
||||
-x | --stop)
|
||||
STOP=1
|
||||
;;
|
||||
-i | --invariant)
|
||||
INVARIANT=1
|
||||
;;
|
||||
-d | --discover)
|
||||
DISCOVERONLY=1
|
||||
;;
|
||||
-c | --continue)
|
||||
CONTINUE=1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
@@ -74,9 +81,9 @@ assert_end() {
|
||||
# assert_end [suite ..]
|
||||
tests_endtime="$(date +%s%N)"
|
||||
# required visible decimal place for seconds (leading zeros if needed)
|
||||
tests_time="$( \
|
||||
printf "%010d" "$(( ${tests_endtime/%N/000000000}
|
||||
- ${tests_starttime/%N/000000000} ))")" # in ns
|
||||
tests_time="$(
|
||||
printf "%010d" "$((${tests_endtime/%N/000000000} - ${tests_starttime/%N/000000000}))"
|
||||
)" # in ns
|
||||
tests="$tests_ran ${*:+$* }tests"
|
||||
[[ -n "$DISCOVERONLY" ]] && echo "collected $tests." && _assert_reset && return
|
||||
[[ -n "$DEBUG" ]] && echo
|
||||
@@ -84,7 +91,8 @@ assert_end() {
|
||||
# ${tests_time:0:${#tests_time}-9} - seconds
|
||||
# ${tests_time:${#tests_time}-9:3} - milliseconds
|
||||
if [[ -z "$INVARIANT" ]]; then
|
||||
report_time=" in ${tests_time:0:${#tests_time}-9}.${tests_time:${#tests_time}-9:3}s"
|
||||
idx=$((${#tests_time} - 9))
|
||||
report_time=" in ${tests_time:0:${idx}}.${tests_time:${idx}:3}s"
|
||||
else
|
||||
report_time=
|
||||
fi
|
||||
@@ -100,15 +108,15 @@ assert_end() {
|
||||
|
||||
assert() {
|
||||
# assert <command> <expected stdout> [stdin]
|
||||
(( tests_ran++ )) || :
|
||||
((tests_ran++)) || :
|
||||
[[ -z "$DISCOVERONLY" ]] || return
|
||||
expected=$(echo -ne "${2:-}")
|
||||
result="$(eval 2>/dev/null "$1" <<< "${3:-}")" || true
|
||||
result="$(eval "$1" 2>/dev/null <<<"${3:-}")" || true
|
||||
if [[ "$result" == "$expected" ]]; then
|
||||
[[ -z "$DEBUG" ]] || echo -n .
|
||||
return
|
||||
fi
|
||||
result="$(sed -e :a -e '$!N;s/\n/\\n/;ta' <<< "$result")"
|
||||
result="$(sed -e :a -e '$!N;s/\n/\\n/;ta' <<<"$result")"
|
||||
[[ -z "$result" ]] && result="nothing" || result="\"$result\""
|
||||
[[ -z "$2" ]] && expected="nothing" || expected="\"$2\""
|
||||
_assert_fail "expected $expected${_indent}got $result" "$1" "$3"
|
||||
@@ -116,10 +124,10 @@ assert() {
|
||||
|
||||
assert_raises() {
|
||||
# assert_raises <command> <expected code> [stdin]
|
||||
(( tests_ran++ )) || :
|
||||
((tests_ran++)) || :
|
||||
[[ -z "$DISCOVERONLY" ]] || return
|
||||
status=0
|
||||
(eval "$1" <<< "${3:-}") > /dev/null 2>&1 || status=$?
|
||||
(eval "$1" <<<"${3:-}") >/dev/null 2>&1 || status=$?
|
||||
expected=${2:-0}
|
||||
if [[ "$status" -eq "$expected" ]]; then
|
||||
[[ -z "$DEBUG" ]] || echo -n .
|
||||
@@ -138,12 +146,12 @@ _assert_fail() {
|
||||
exit 1
|
||||
fi
|
||||
tests_errors[$tests_failed]="$report"
|
||||
(( tests_failed++ )) || :
|
||||
((tests_failed++)) || :
|
||||
}
|
||||
|
||||
skip_if() {
|
||||
# skip_if <command ..>
|
||||
(eval "$@") > /dev/null 2>&1 && status=0 || status=$?
|
||||
(eval "$@") >/dev/null 2>&1 && status=0 || status=$?
|
||||
[[ "$status" -eq 0 ]] || return
|
||||
skip
|
||||
}
|
||||
@@ -175,9 +183,8 @@ _skip() {
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
_assert_reset
|
||||
: ${tests_suite_status:=0} # remember if any of the tests failed so far
|
||||
: ${tests_suite_status:=0} # remember if any of the tests failed so far
|
||||
_assert_cleanup() {
|
||||
local status=$?
|
||||
# modify exit code if it's not already non-zero
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/bin/bash
|
||||
# NB only to be sourced
|
||||
|
||||
set -e
|
||||
@@ -16,49 +17,55 @@ N_MACHINES=${N_MACHINES:-3}
|
||||
IP_PREFIX=${IP_PREFIX:-192.168.48}
|
||||
IP_SUFFIX_BASE=${IP_SUFFIX_BASE:-10}
|
||||
|
||||
if [ -z "$HOSTS" ] ; then
|
||||
for i in $(seq 1 $N_MACHINES); do
|
||||
IP="${IP_PREFIX}.$((${IP_SUFFIX_BASE}+$i))"
|
||||
if [ -z "$HOSTS" ]; then
|
||||
for i in $(seq 1 "$N_MACHINES"); do
|
||||
IP="${IP_PREFIX}.$((IP_SUFFIX_BASE + i))"
|
||||
HOSTS="$HOSTS $IP"
|
||||
done
|
||||
fi
|
||||
|
||||
# these are used by the tests
|
||||
HOST1=$(echo $HOSTS | cut -f 1 -d ' ')
|
||||
HOST2=$(echo $HOSTS | cut -f 2 -d ' ')
|
||||
HOST3=$(echo $HOSTS | cut -f 3 -d ' ')
|
||||
# shellcheck disable=SC2034
|
||||
HOST1=$(echo "$HOSTS" | cut -f 1 -d ' ')
|
||||
# shellcheck disable=SC2034
|
||||
HOST2=$(echo "$HOSTS" | cut -f 2 -d ' ')
|
||||
# shellcheck disable=SC2034
|
||||
HOST3=$(echo "$HOSTS" | cut -f 3 -d ' ')
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
. "$DIR/assert.sh"
|
||||
|
||||
SSH_DIR=${SSH_DIR:-$DIR}
|
||||
SSH=${SSH:-ssh -l vagrant -i "$SSH_DIR/insecure_private_key" -o "UserKnownHostsFile=$SSH_DIR/.ssh_known_hosts" -o CheckHostIP=no -o StrictHostKeyChecking=no}
|
||||
SSH=${SSH:-ssh -l vagrant -i \"$SSH_DIR/insecure_private_key\" -o \"UserKnownHostsFile=$SSH_DIR/.ssh_known_hosts\" -o CheckHostIP=no -o StrictHostKeyChecking=no}
|
||||
|
||||
SMALL_IMAGE="alpine"
|
||||
# shellcheck disable=SC2034
|
||||
TEST_IMAGES="$SMALL_IMAGE"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
PING="ping -nq -W 1 -c 1"
|
||||
DOCKER_PORT=2375
|
||||
|
||||
remote() {
|
||||
rem=$1
|
||||
shift 1
|
||||
"$@" > >(while read line; do echo -e $'\e[0;34m'"$rem>"$'\e[0m'" $line"; done)
|
||||
"$@" > >(while read -r line; do echo -e $'\e[0;34m'"$rem>"$'\e[0m'" $line"; done)
|
||||
}
|
||||
|
||||
colourise() {
|
||||
[ -t 0 ] && echo -ne $'\e['$1'm' || true
|
||||
([ -t 0 ] && echo -ne $'\e['"$1"'m') || true
|
||||
shift
|
||||
# It's important that we don't do this in a subshell, as some
|
||||
# commands we execute need to modify global state
|
||||
"$@"
|
||||
[ -t 0 ] && echo -ne $'\e[0m' || true
|
||||
([ -t 0 ] && echo -ne $'\e[0m') || true
|
||||
}
|
||||
|
||||
whitely() {
|
||||
colourise '1;37' "$@"
|
||||
}
|
||||
|
||||
greyly () {
|
||||
greyly() {
|
||||
colourise '0;37' "$@"
|
||||
}
|
||||
|
||||
@@ -73,21 +80,21 @@ greenly() {
|
||||
run_on() {
|
||||
host=$1
|
||||
shift 1
|
||||
[ -z "$DEBUG" ] || greyly echo "Running on $host: $@" >&2
|
||||
remote $host $SSH $host "$@"
|
||||
[ -z "$DEBUG" ] || greyly echo "Running on $host:" "$@" >&2
|
||||
remote "$host" "$SSH" "$host" "$@"
|
||||
}
|
||||
|
||||
docker_on() {
|
||||
host=$1
|
||||
shift 1
|
||||
[ -z "$DEBUG" ] || greyly echo "Docker on $host:$DOCKER_PORT: $@" >&2
|
||||
docker -H tcp://$host:$DOCKER_PORT "$@"
|
||||
[ -z "$DEBUG" ] || greyly echo "Docker on $host:$DOCKER_PORT:" "$@" >&2
|
||||
docker -H "tcp://$host:$DOCKER_PORT" "$@"
|
||||
}
|
||||
|
||||
weave_on() {
|
||||
host=$1
|
||||
shift 1
|
||||
[ -z "$DEBUG" ] || greyly echo "Weave on $host:$DOCKER_PORT: $@" >&2
|
||||
[ -z "$DEBUG" ] || greyly echo "Weave on $host:$DOCKER_PORT:" "$@" >&2
|
||||
DOCKER_HOST=tcp://$host:$DOCKER_PORT $WEAVE "$@"
|
||||
}
|
||||
|
||||
@@ -95,24 +102,24 @@ exec_on() {
|
||||
host=$1
|
||||
container=$2
|
||||
shift 2
|
||||
docker -H tcp://$host:$DOCKER_PORT exec $container "$@"
|
||||
docker -H "tcp://$host:$DOCKER_PORT" exec "$container" "$@"
|
||||
}
|
||||
|
||||
rm_containers() {
|
||||
host=$1
|
||||
shift
|
||||
[ $# -eq 0 ] || docker_on $host rm -f "$@" >/dev/null
|
||||
[ $# -eq 0 ] || docker_on "$host" rm -f "$@" >/dev/null
|
||||
}
|
||||
|
||||
start_suite() {
|
||||
for host in $HOSTS; do
|
||||
[ -z "$DEBUG" ] || echo "Cleaning up on $host: removing all containers and resetting weave"
|
||||
PLUGIN_ID=$(docker_on $host ps -aq --filter=name=weaveplugin)
|
||||
PLUGIN_ID=$(docker_on "$host" ps -aq --filter=name=weaveplugin)
|
||||
PLUGIN_FILTER="cat"
|
||||
[ -n "$PLUGIN_ID" ] && PLUGIN_FILTER="grep -v $PLUGIN_ID"
|
||||
rm_containers $host $(docker_on $host ps -aq 2>/dev/null | $PLUGIN_FILTER)
|
||||
run_on $host "docker network ls | grep -q ' weave ' && docker network rm weave" || true
|
||||
weave_on $host reset 2>/dev/null
|
||||
rm_containers "$host" "$(docker_on "$host" ps -aq 2>/dev/null | "$PLUGIN_FILTER")"
|
||||
run_on "$host" "docker network ls | grep -q ' weave ' && docker network rm weave" || true
|
||||
weave_on "$host" reset 2>/dev/null
|
||||
done
|
||||
whitely echo "$@"
|
||||
}
|
||||
@@ -122,4 +129,3 @@ end_suite() {
|
||||
}
|
||||
|
||||
WEAVE=$DIR/../weave
|
||||
|
||||
|
||||
@@ -16,68 +16,73 @@ set -e
|
||||
: "${NUM_HOSTS:=}"
|
||||
|
||||
if [ -z "${PROJECT}" ] || [ -z "${NUM_HOSTS}" ] || [ -z "${TEMPLATE_NAME}" ]; then
|
||||
echo "Must specify PROJECT, NUM_HOSTS and TEMPLATE_NAME"
|
||||
exit 1
|
||||
echo "Must specify PROJECT, NUM_HOSTS and TEMPLATE_NAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SUFFIX=""
|
||||
if [ -n "$CIRCLECI" ]; then
|
||||
SUFFIX="-${CIRCLE_BUILD_NUM}-$CIRCLE_NODE_INDEX"
|
||||
SUFFIX="-${CIRCLE_BUILD_NUM}-$CIRCLE_NODE_INDEX"
|
||||
fi
|
||||
|
||||
# Setup authentication
|
||||
gcloud auth activate-service-account --key-file "$KEY_FILE" 1>/dev/null
|
||||
gcloud config set project "$PROJECT"
|
||||
|
||||
function vm_names {
|
||||
local names=
|
||||
for i in $(seq 1 "$NUM_HOSTS"); do
|
||||
names=( "host$i$SUFFIX" "${names[@]}" )
|
||||
done
|
||||
echo "${names[@]}"
|
||||
function vm_names() {
|
||||
local names=
|
||||
for i in $(seq 1 "$NUM_HOSTS"); do
|
||||
names=("host$i$SUFFIX" "${names[@]}")
|
||||
done
|
||||
echo "${names[@]}"
|
||||
}
|
||||
|
||||
# Delete all vms in this account
|
||||
function destroy {
|
||||
local names
|
||||
names="$(vm_names)"
|
||||
if [ "$(gcloud compute instances list --zone "$ZONE" -q "$names" | wc -l)" -le 1 ] ; then
|
||||
return 0
|
||||
fi
|
||||
for i in {0..10}; do
|
||||
# gcloud instances delete can sometimes hang.
|
||||
case $(set +e; timeout 60s /bin/bash -c "gcloud compute instances delete --zone $ZONE -q $names >/dev/null 2>&1"; echo $?) in
|
||||
0)
|
||||
return 0
|
||||
;;
|
||||
124)
|
||||
# 124 means it timed out
|
||||
break
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
esac
|
||||
done
|
||||
function destroy() {
|
||||
local names
|
||||
names="$(vm_names)"
|
||||
if [ "$(gcloud compute instances list --zone "$ZONE" -q "$names" | wc -l)" -le 1 ]; then
|
||||
return 0
|
||||
fi
|
||||
for i in {0..10}; do
|
||||
# gcloud instances delete can sometimes hang.
|
||||
case $(
|
||||
set +e
|
||||
timeout 60s /bin/bash -c "gcloud compute instances delete --zone $ZONE -q $names >/dev/null 2>&1"
|
||||
echo $?
|
||||
) in
|
||||
0)
|
||||
return 0
|
||||
;;
|
||||
124)
|
||||
# 124 means it timed out
|
||||
break
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
function internal_ip {
|
||||
jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].networkIP" "$1"
|
||||
function internal_ip() {
|
||||
jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].networkIP" "$1"
|
||||
}
|
||||
|
||||
function external_ip {
|
||||
jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].accessConfigs[0].natIP" "$1"
|
||||
function external_ip() {
|
||||
jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].accessConfigs[0].natIP" "$1"
|
||||
}
|
||||
|
||||
function try_connect {
|
||||
for i in {0..10}; do
|
||||
ssh -t "$1" true && return
|
||||
sleep 2
|
||||
done
|
||||
function try_connect() {
|
||||
for i in {0..10}; do
|
||||
ssh -t "$1" true && return
|
||||
sleep 2
|
||||
done
|
||||
}
|
||||
|
||||
function install_docker_on {
|
||||
name=$1
|
||||
ssh -t "$name" sudo bash -x -s <<EOF
|
||||
function install_docker_on() {
|
||||
name=$1
|
||||
ssh -t "$name" sudo bash -x -s <<EOF
|
||||
curl -sSL https://get.docker.com/gpg | sudo apt-key add -
|
||||
curl -sSL https://get.docker.com/ | sh
|
||||
apt-get update -qq;
|
||||
@@ -86,92 +91,93 @@ usermod -a -G docker vagrant;
|
||||
echo 'DOCKER_OPTS="-H unix:///var/run/docker.sock -H unix:///var/run/alt-docker.sock -H tcp://0.0.0.0:2375 -s overlay"' >> /etc/default/docker;
|
||||
service docker restart
|
||||
EOF
|
||||
# It seems we need a short delay for docker to start up, so I put this in
|
||||
# a separate ssh connection. This installs nsenter.
|
||||
ssh -t "$name" sudo docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter
|
||||
# It seems we need a short delay for docker to start up, so I put this in
|
||||
# a separate ssh connection. This installs nsenter.
|
||||
ssh -t "$name" sudo docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter
|
||||
}
|
||||
|
||||
function copy_hosts {
|
||||
hostname=$1
|
||||
hosts=$2
|
||||
ssh -t "$hostname" "sudo -- sh -c \"cat >>/etc/hosts\"" < "$hosts"
|
||||
function copy_hosts() {
|
||||
hostname=$1
|
||||
hosts=$2
|
||||
ssh -t "$hostname" "sudo -- sh -c \"cat >>/etc/hosts\"" <"$hosts"
|
||||
}
|
||||
|
||||
# Create new set of VMs
|
||||
function setup {
|
||||
destroy
|
||||
function setup() {
|
||||
destroy
|
||||
|
||||
names=( $(vm_names) )
|
||||
gcloud compute instances create "${names[@]}" --image "$TEMPLATE_NAME" --zone "$ZONE"
|
||||
gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE"
|
||||
sed -i '/UserKnownHostsFile=\/dev\/null/d' ~/.ssh/config
|
||||
names=($(vm_names))
|
||||
gcloud compute instances create "${names[@]}" --image "$TEMPLATE_NAME" --zone "$ZONE"
|
||||
gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE"
|
||||
sed -i '/UserKnownHostsFile=\/dev\/null/d' ~/.ssh/config
|
||||
|
||||
# build an /etc/hosts file for these vms
|
||||
hosts=$(mktemp hosts.XXXXXXXXXX)
|
||||
json=$(mktemp json.XXXXXXXXXX)
|
||||
gcloud compute instances list --format=json > "$json"
|
||||
for name in "${names[@]}"; do
|
||||
echo "$(internal_ip "$json" "$name") $name.$ZONE.$PROJECT" >> "$hosts"
|
||||
done
|
||||
# build an /etc/hosts file for these vms
|
||||
hosts=$(mktemp hosts.XXXXXXXXXX)
|
||||
json=$(mktemp json.XXXXXXXXXX)
|
||||
gcloud compute instances list --format=json >"$json"
|
||||
for name in "${names[@]}"; do
|
||||
echo "$(internal_ip "$json" "$name") $name.$ZONE.$PROJECT" >>"$hosts"
|
||||
done
|
||||
|
||||
for name in "${names[@]}"; do
|
||||
hostname="$name.$ZONE.$PROJECT"
|
||||
for name in "${names[@]}"; do
|
||||
hostname="$name.$ZONE.$PROJECT"
|
||||
|
||||
# Add the remote ip to the local /etc/hosts
|
||||
sudo sed -i "/$hostname/d" /etc/hosts
|
||||
sudo sh -c "echo \"$(external_ip "$json" "$name") $hostname\" >>/etc/hosts"
|
||||
try_connect "$hostname"
|
||||
# Add the remote ip to the local /etc/hosts
|
||||
sudo sed -i "/$hostname/d" /etc/hosts
|
||||
sudo sh -c "echo \"$(external_ip "$json" "$name") $hostname\" >>/etc/hosts"
|
||||
try_connect "$hostname"
|
||||
|
||||
copy_hosts "$hostname" "$hosts" &
|
||||
done
|
||||
copy_hosts "$hostname" "$hosts" &
|
||||
done
|
||||
|
||||
wait
|
||||
wait
|
||||
|
||||
rm "$hosts" "$json"
|
||||
rm "$hosts" "$json"
|
||||
}
|
||||
|
||||
function make_template {
|
||||
gcloud compute instances create "$TEMPLATE_NAME" --image "$IMAGE" --zone "$ZONE"
|
||||
gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE"
|
||||
name="$TEMPLATE_NAME.$ZONE.$PROJECT"
|
||||
try_connect "$name"
|
||||
install_docker_on "$name"
|
||||
gcloud -q compute instances delete "$TEMPLATE_NAME" --keep-disks boot --zone "$ZONE"
|
||||
gcloud compute images create "$TEMPLATE_NAME" --source-disk "$TEMPLATE_NAME" --source-disk-zone "$ZONE"
|
||||
function make_template() {
|
||||
gcloud compute instances create "$TEMPLATE_NAME" --image "$IMAGE" --zone "$ZONE"
|
||||
gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE"
|
||||
name="$TEMPLATE_NAME.$ZONE.$PROJECT"
|
||||
try_connect "$name"
|
||||
install_docker_on "$name"
|
||||
gcloud -q compute instances delete "$TEMPLATE_NAME" --keep-disks boot --zone "$ZONE"
|
||||
gcloud compute images create "$TEMPLATE_NAME" --source-disk "$TEMPLATE_NAME" --source-disk-zone "$ZONE"
|
||||
}
|
||||
|
||||
function hosts {
|
||||
hosts=
|
||||
args=
|
||||
json=$(mktemp json.XXXXXXXXXX)
|
||||
gcloud compute instances list --format=json > "$json"
|
||||
for name in $(vm_names); do
|
||||
hostname="$name.$ZONE.$PROJECT"
|
||||
hosts=( $hostname "${hosts[@]}" )
|
||||
args=( "--add-host=$hostname:$(internal_ip "$json" "$name")" "${args[@]}" )
|
||||
done
|
||||
echo export SSH=\"ssh -l vagrant\"
|
||||
echo "export HOSTS=\"${hosts[*]}\""
|
||||
echo "export ADD_HOST_ARGS=\"${args[*]}\""
|
||||
rm "$json"
|
||||
function hosts() {
|
||||
hosts=
|
||||
args=
|
||||
json=$(mktemp json.XXXXXXXXXX)
|
||||
gcloud compute instances list --format=json >"$json"
|
||||
for name in $(vm_names); do
|
||||
hostname="$name.$ZONE.$PROJECT"
|
||||
hosts=($hostname "${hosts[@]}")
|
||||
args=("--add-host=$hostname:$(internal_ip "$json" "$name")" "${args[@]}")
|
||||
done
|
||||
echo export SSH=\"ssh -l vagrant\"
|
||||
echo "export HOSTS=\"${hosts[*]}\""
|
||||
echo "export ADD_HOST_ARGS=\"${args[*]}\""
|
||||
rm "$json"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
setup)
|
||||
setup
|
||||
;;
|
||||
setup)
|
||||
setup
|
||||
;;
|
||||
|
||||
hosts)
|
||||
hosts
|
||||
;;
|
||||
hosts)
|
||||
hosts
|
||||
;;
|
||||
|
||||
destroy)
|
||||
destroy
|
||||
;;
|
||||
destroy)
|
||||
destroy
|
||||
;;
|
||||
|
||||
make_template)
|
||||
# see if template exists
|
||||
if ! gcloud compute images list | grep "$PROJECT" | grep "$TEMPLATE_NAME"; then
|
||||
make_template
|
||||
fi
|
||||
make_template)
|
||||
# see if template exists
|
||||
if ! gcloud compute images list | grep "$PROJECT" | grep "$TEMPLATE_NAME"; then
|
||||
make_template
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -14,17 +14,17 @@ fi
|
||||
whitely echo ...ok
|
||||
|
||||
# shellcheck disable=SC2068
|
||||
TESTS=( ${@:-$(find . -name '*_test.sh')} )
|
||||
RUNNER_ARGS=( )
|
||||
TESTS=(${@:-$(find . -name '*_test.sh')})
|
||||
RUNNER_ARGS=()
|
||||
|
||||
# If running on circle, use the scheduler to work out what tests to run
|
||||
if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ]; then
|
||||
RUNNER_ARGS=( "${RUNNER_ARGS[@]}" -scheduler )
|
||||
RUNNER_ARGS=("${RUNNER_ARGS[@]}" -scheduler)
|
||||
fi
|
||||
|
||||
# If running on circle or PARALLEL is not empty, run tests in parallel
|
||||
if [ -n "$CIRCLECI" ] || [ -n "$PARALLEL" ]; then
|
||||
RUNNER_ARGS=( "${RUNNER_ARGS[@]}" -parallel )
|
||||
RUNNER_ARGS=("${RUNNER_ARGS[@]}" -parallel)
|
||||
fi
|
||||
|
||||
make -C "${DIR}/../runner"
|
||||
|
||||
290
tools/lint
290
tools/lint
@@ -1,9 +1,13 @@
|
||||
#!/bin/bash
|
||||
# This scipt lints go files for common errors.
|
||||
# This scipt lints files for common errors.
|
||||
#
|
||||
# It runs gofmt and go vet, and optionally golint and
|
||||
# For go files, it runs gofmt and go vet, and optionally golint and
|
||||
# gocyclo, if they are installed.
|
||||
#
|
||||
# For shell files, it runs shfmt. If you don't have that installed, you can get
|
||||
# it with:
|
||||
# go get -u github.com/mvdan/sh/cmd/shfmt
|
||||
#
|
||||
# With no arguments, it lints the current files staged
|
||||
# for git commit. Or you can pass it explicit filenames
|
||||
# (or directories) and it will lint them.
|
||||
@@ -17,159 +21,193 @@ IGNORE_LINT_COMMENT=
|
||||
IGNORE_TEST_PACKAGES=
|
||||
IGNORE_SPELLINGS=
|
||||
while true; do
|
||||
case "$1" in
|
||||
-nocomment)
|
||||
IGNORE_LINT_COMMENT=1
|
||||
shift 1
|
||||
;;
|
||||
-notestpackage)
|
||||
IGNORE_TEST_PACKAGES=1
|
||||
shift 1
|
||||
;;
|
||||
case "$1" in
|
||||
-nocomment)
|
||||
IGNORE_LINT_COMMENT=1
|
||||
shift 1
|
||||
;;
|
||||
-notestpackage)
|
||||
IGNORE_TEST_PACKAGES=1
|
||||
shift 1
|
||||
;;
|
||||
-ignorespelling)
|
||||
IGNORE_SPELLINGS="$2,$IGNORE_SPELLINGS"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
esac
|
||||
IGNORE_SPELLINGS="$2,$IGNORE_SPELLINGS"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
spell_check() {
|
||||
local filename="$1"
|
||||
local lint_result=0
|
||||
|
||||
function spell_check {
|
||||
filename="$1"
|
||||
local lint_result=0
|
||||
# we don't want to spell check tar balls, binaries, Makefile and json files
|
||||
if file "$filename" | grep executable >/dev/null 2>&1; then
|
||||
return $lint_result
|
||||
fi
|
||||
if [[ $filename == *".tar" || $filename == *".gz" || $filename == *".json" || $(basename "$filename") == "Makefile" ]]; then
|
||||
return $lint_result
|
||||
fi
|
||||
|
||||
# we don't want to spell check tar balls, binaries, Makefile and json files
|
||||
if file "$filename" | grep executable >/dev/null 2>&1; then
|
||||
return $lint_result
|
||||
fi
|
||||
if [[ $filename == *".tar" || $filename == *".gz" || $filename == *".json" || $(basename "$filename") == "Makefile" ]]; then
|
||||
return $lint_result
|
||||
fi
|
||||
# misspell is completely optional. If you don't like it
|
||||
# don't have it installed.
|
||||
if ! type misspell >/dev/null 2>&1; then
|
||||
return $lint_result
|
||||
fi
|
||||
|
||||
# misspell is completely optional. If you don't like it
|
||||
# don't have it installed.
|
||||
if ! type misspell >/dev/null 2>&1; then
|
||||
return $lint_result
|
||||
fi
|
||||
if ! misspell -error -i "$IGNORE_SPELLINGS" "${filename}"; then
|
||||
lint_result=1
|
||||
fi
|
||||
|
||||
if ! misspell -error -i "$IGNORE_SPELLINGS" "${filename}"; then
|
||||
lint_result=1
|
||||
fi
|
||||
|
||||
return $lint_result
|
||||
return $lint_result
|
||||
}
|
||||
|
||||
function test_mismatch {
|
||||
filename="$1"
|
||||
package=$(grep '^package ' "$filename" | awk '{print $2}')
|
||||
local lint_result=0
|
||||
test_mismatch() {
|
||||
local filename="$1"
|
||||
local package=$(grep '^package ' "$filename" | awk '{print $2}')
|
||||
local lint_result=0
|
||||
|
||||
if [[ $package == "main" ]]; then
|
||||
return # in package main, all bets are off
|
||||
fi
|
||||
if [[ $package == "main" ]]; then
|
||||
return # in package main, all bets are off
|
||||
fi
|
||||
|
||||
if [[ $filename == *"_internal_test.go" ]]; then
|
||||
if [[ $package == *"_test" ]]; then
|
||||
lint_result=1
|
||||
echo "${filename}: should not be part of a _test package"
|
||||
fi
|
||||
else
|
||||
if [[ ! $package == *"_test" ]]; then
|
||||
lint_result=1
|
||||
echo "${filename}: should be part of a _test package"
|
||||
fi
|
||||
fi
|
||||
if [[ $filename == *"_internal_test.go" ]]; then
|
||||
if [[ $package == *"_test" ]]; then
|
||||
lint_result=1
|
||||
echo "${filename}: should not be part of a _test package"
|
||||
fi
|
||||
else
|
||||
if [[ ! $package == *"_test" ]]; then
|
||||
lint_result=1
|
||||
echo "${filename}: should be part of a _test package"
|
||||
fi
|
||||
fi
|
||||
|
||||
return $lint_result
|
||||
return $lint_result
|
||||
}
|
||||
|
||||
function lint_go {
|
||||
filename="$1"
|
||||
local lint_result=0
|
||||
lint_go() {
|
||||
local filename="$1"
|
||||
local lint_result=0
|
||||
|
||||
if [ -n "$(gofmt -s -l "${filename}")" ]; then
|
||||
lint_result=1
|
||||
echo "${filename}: run gofmt -s -w ${filename}!"
|
||||
fi
|
||||
if [ -n "$(gofmt -s -l "${filename}")" ]; then
|
||||
lint_result=1
|
||||
echo "${filename}: run gofmt -s -w ${filename}"
|
||||
fi
|
||||
|
||||
go tool vet "${filename}" || lint_result=$?
|
||||
go tool vet "${filename}" || lint_result=$?
|
||||
|
||||
# golint is completely optional. If you don't like it
|
||||
# don't have it installed.
|
||||
if type golint >/dev/null 2>&1; then
|
||||
# golint doesn't set an exit code it seems
|
||||
if [ -z "$IGNORE_LINT_COMMENT" ]; then
|
||||
lintoutput=$(golint "${filename}")
|
||||
else
|
||||
lintoutput=$(golint "${filename}" | grep -vE 'comment|dot imports|ALL_CAPS')
|
||||
fi
|
||||
if [ -n "$lintoutput" ]; then
|
||||
lint_result=1
|
||||
echo "$lintoutput"
|
||||
fi
|
||||
fi
|
||||
# golint is completely optional. If you don't like it
|
||||
# don't have it installed.
|
||||
if type golint >/dev/null 2>&1; then
|
||||
# golint doesn't set an exit code it seems
|
||||
if [ -z "$IGNORE_LINT_COMMENT" ]; then
|
||||
lintoutput=$(golint "${filename}")
|
||||
else
|
||||
lintoutput=$(golint "${filename}" | grep -vE 'comment|dot imports|ALL_CAPS')
|
||||
fi
|
||||
if [ -n "$lintoutput" ]; then
|
||||
lint_result=1
|
||||
echo "$lintoutput"
|
||||
fi
|
||||
fi
|
||||
|
||||
# gocyclo is completely optional. If you don't like it
|
||||
# don't have it installed. Also never blocks a commit,
|
||||
# it just warns.
|
||||
if type gocyclo >/dev/null 2>&1; then
|
||||
gocyclo -over 25 "${filename}" | while read -r line; do
|
||||
echo "${filename}": higher than 25 cyclomatic complexity - "${line}"
|
||||
done
|
||||
fi
|
||||
# gocyclo is completely optional. If you don't like it
|
||||
# don't have it installed. Also never blocks a commit,
|
||||
# it just warns.
|
||||
if type gocyclo >/dev/null 2>&1; then
|
||||
gocyclo -over 25 "${filename}" | while read -r line; do
|
||||
echo "${filename}": higher than 25 cyclomatic complexity - "${line}"
|
||||
done
|
||||
fi
|
||||
|
||||
return $lint_result
|
||||
return $lint_result
|
||||
}
|
||||
|
||||
function lint {
|
||||
filename="$1"
|
||||
ext="${filename##*\.}"
|
||||
local lint_result=0
|
||||
lint_sh() {
|
||||
local filename="$1"
|
||||
local lint_result=0
|
||||
|
||||
# Don't lint deleted files
|
||||
if [ ! -f "$filename" ]; then
|
||||
return
|
||||
fi
|
||||
if ! diff <(shfmt -i 4 "${filename}") "${filename}" >/dev/null; then
|
||||
lint_result=1
|
||||
echo "${filename}: run shfmt -i 4 -w ${filename}"
|
||||
fi
|
||||
|
||||
# Don't lint this script or static.go
|
||||
case "$(basename "${filename}")" in
|
||||
lint) return;;
|
||||
static.go) return;;
|
||||
coverage.html) return;;
|
||||
esac
|
||||
# the shellcheck is completely optional. If you don't like it
|
||||
# don't have it installed.
|
||||
if type shellcheck >/dev/null 2>&1; then
|
||||
shellcheck "${filename}" || lint_result=1
|
||||
fi
|
||||
|
||||
case "$ext" in
|
||||
go) lint_go "${filename}" || lint_result=1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "$IGNORE_TEST_PACKAGES" ]; then
|
||||
if [[ "$filename" == *"_test.go" ]]; then
|
||||
test_mismatch "${filename}" || lint_result=1
|
||||
fi
|
||||
fi
|
||||
|
||||
spell_check "${filename}" || lint_result=1
|
||||
|
||||
return $lint_result
|
||||
return $lint_result
|
||||
}
|
||||
|
||||
function lint_files {
|
||||
local lint_result=0
|
||||
while read -r filename; do
|
||||
lint "${filename}" || lint_result=1
|
||||
done
|
||||
exit $lint_result
|
||||
lint_tf() {
|
||||
local filename="$1"
|
||||
local lint_result=0
|
||||
|
||||
if ! diff <(hclfmt "${filename}") "${filename}" >/dev/null; then
|
||||
lint_result=1
|
||||
echo "${filename}: run hclfmt -w ${filename}"
|
||||
fi
|
||||
|
||||
return $lint_result
|
||||
}
|
||||
|
||||
function list_files {
|
||||
if [ $# -gt 0 ]; then
|
||||
lint() {
|
||||
filename="$1"
|
||||
ext="${filename##*\.}"
|
||||
local lint_result=0
|
||||
|
||||
# Don't lint deleted files
|
||||
if [ ! -f "$filename" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Don't lint static.go
|
||||
case "$(basename "${filename}")" in
|
||||
static.go) return ;;
|
||||
coverage.html) return ;;
|
||||
esac
|
||||
|
||||
if [[ "$(file --mime-type "${filename}" | awk '{print $2}')" == "text/x-shellscript" ]]; then
|
||||
ext="sh"
|
||||
fi
|
||||
|
||||
case "$ext" in
|
||||
go) lint_go "${filename}" || lint_result=1 ;;
|
||||
sh) lint_sh "${filename}" || lint_result=1 ;;
|
||||
tf) lint_tf "${filename}" || lint_result=1 ;;
|
||||
esac
|
||||
|
||||
if [ -z "$IGNORE_TEST_PACKAGES" ]; then
|
||||
if [[ "$filename" == *"_test.go" ]]; then
|
||||
test_mismatch "${filename}" || lint_result=1
|
||||
fi
|
||||
fi
|
||||
|
||||
spell_check "${filename}" || lint_result=1
|
||||
|
||||
return $lint_result
|
||||
}
|
||||
|
||||
lint_files() {
|
||||
local lint_result=0
|
||||
while read -r filename; do
|
||||
lint "${filename}" || lint_result=1
|
||||
done
|
||||
exit $lint_result
|
||||
}
|
||||
|
||||
list_files() {
|
||||
if [ $# -gt 0 ]; then
|
||||
git ls-files --exclude-standard | grep -vE '(^|/)vendor/'
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi
|
||||
}
|
||||
|
||||
list_files "$@" | lint_files
|
||||
|
||||
@@ -10,15 +10,15 @@ fatal() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ ! -d .git ] ; then
|
||||
if [ ! -d .git ]; then
|
||||
fatal "Current directory is not a git clone"
|
||||
fi
|
||||
|
||||
if [ -z "${PRODUCT}" ]; then
|
||||
fatal "Must specify PRODUCT"
|
||||
fatal "Must specify PRODUCT"
|
||||
fi
|
||||
|
||||
if ! BRANCH=$(git symbolic-ref --short HEAD) || [ -z "$BRANCH" ] ; then
|
||||
if ! BRANCH=$(git symbolic-ref --short HEAD) || [ -z "$BRANCH" ]; then
|
||||
fatal "Could not determine branch"
|
||||
fi
|
||||
|
||||
@@ -28,9 +28,9 @@ case "$BRANCH" in
|
||||
TAGS="$VERSION"
|
||||
;;
|
||||
*)
|
||||
if echo "$BRANCH" | grep -qE '^[0-9]+\.[0-9]+' ; then
|
||||
if echo "$BRANCH" | grep -qE '^[0-9]+\.[0-9]+'; then
|
||||
DESCRIBE=$(git describe --match 'v*')
|
||||
if ! VERSION=$(echo "$DESCRIBE" | grep -oP '(?<=^v)[0-9]+\.[0-9]+\.[0-9]+') ; then
|
||||
if ! VERSION=$(echo "$DESCRIBE" | grep -oP '(?<=^v)[0-9]+\.[0-9]+\.[0-9]+'); then
|
||||
fatal "Could not infer latest $BRANCH version from $DESCRIBE"
|
||||
fi
|
||||
TAGS="$VERSION latest"
|
||||
@@ -41,7 +41,7 @@ case "$BRANCH" in
|
||||
;;
|
||||
esac
|
||||
|
||||
for TAG in $TAGS ; do
|
||||
for TAG in $TAGS; do
|
||||
echo ">>> Publishing $PRODUCT $VERSION to $1/docs/$PRODUCT/$TAG"
|
||||
wordepress \
|
||||
--url "$1" --user "$2" --password "$3" \
|
||||
|
||||
@@ -10,7 +10,7 @@ SAVEDNAME=$(echo "$IMAGENAME" | sed "s/[\/\-]/\./g")
|
||||
IMAGEDIR=$2
|
||||
shift 2
|
||||
|
||||
INPUTFILES=( "$@" )
|
||||
INPUTFILES=("$@")
|
||||
CACHEDIR=$HOME/docker/
|
||||
|
||||
# Rebuild the image
|
||||
@@ -18,60 +18,60 @@ rebuild() {
|
||||
mkdir -p "$CACHEDIR"
|
||||
rm "$CACHEDIR/$SAVEDNAME"* || true
|
||||
docker build -t "$IMAGENAME" "$IMAGEDIR"
|
||||
docker save "$IMAGENAME:latest" | gzip - > "$CACHEDIR/$SAVEDNAME-$CIRCLE_SHA1.gz"
|
||||
docker save "$IMAGENAME:latest" | gzip - >"$CACHEDIR/$SAVEDNAME-$CIRCLE_SHA1.gz"
|
||||
}
|
||||
|
||||
# Get the revision the cached image was build at
|
||||
cached_image_rev() {
|
||||
find "$CACHEDIR" -name "$SAVEDNAME-*" -type f | sed -n 's/^[^\-]*\-\([a-z0-9]*\).gz$/\1/p'
|
||||
find "$CACHEDIR" -name "$SAVEDNAME-*" -type f | sed -n 's/^[^\-]*\-\([a-z0-9]*\).gz$/\1/p'
|
||||
}
|
||||
|
||||
# Have there been any revision between $1 and $2
|
||||
has_changes() {
|
||||
local rev1=$1
|
||||
local rev2=$2
|
||||
local changes
|
||||
changes=$(git diff --oneline "$rev1..$rev2" -- "${INPUTFILES[@]}" | wc -l)
|
||||
[ "$changes" -gt 0 ]
|
||||
local rev1=$1
|
||||
local rev2=$2
|
||||
local changes
|
||||
changes=$(git diff --oneline "$rev1..$rev2" -- "${INPUTFILES[@]}" | wc -l)
|
||||
[ "$changes" -gt 0 ]
|
||||
}
|
||||
|
||||
commit_timestamp() {
|
||||
local rev=$1
|
||||
git show -s --format=%ct "$rev"
|
||||
local rev=$1
|
||||
git show -s --format=%ct "$rev"
|
||||
}
|
||||
|
||||
# Is the SHA1 actually present in the repo?
|
||||
# It could be it isn't, e.g. after a force push
|
||||
is_valid_commit() {
|
||||
local rev=$1
|
||||
git rev-parse --quiet --verify "$rev^{commit}" > /dev/null
|
||||
local rev=$1
|
||||
git rev-parse --quiet --verify "$rev^{commit}" >/dev/null
|
||||
}
|
||||
|
||||
cached_revision=$(cached_image_rev)
|
||||
if [ -z "$cached_revision" ]; then
|
||||
echo ">>> No cached image found; rebuilding"
|
||||
rebuild
|
||||
exit 0
|
||||
echo ">>> No cached image found; rebuilding"
|
||||
rebuild
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if ! is_valid_commit "$cached_revision"; then
|
||||
echo ">>> Git commit of cached image not found in repo; rebuilding"
|
||||
rebuild
|
||||
exit 0
|
||||
echo ">>> Git commit of cached image not found in repo; rebuilding"
|
||||
rebuild
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo ">>> Found cached image rev $cached_revision"
|
||||
if has_changes "$cached_revision" "$CIRCLE_SHA1" ; then
|
||||
echo ">>> Found changes, rebuilding"
|
||||
rebuild
|
||||
exit 0
|
||||
if has_changes "$cached_revision" "$CIRCLE_SHA1"; then
|
||||
echo ">>> Found changes, rebuilding"
|
||||
rebuild
|
||||
exit 0
|
||||
fi
|
||||
|
||||
IMAGE_TIMEOUT="$(( 3 * 24 * 60 * 60 ))"
|
||||
IMAGE_TIMEOUT="$((3 * 24 * 60 * 60))"
|
||||
if [ "$(commit_timestamp "$cached_revision")" -lt "${IMAGE_TIMEOUT}" ]; then
|
||||
echo ">>> Image is more the 24hrs old; rebuilding"
|
||||
rebuild
|
||||
exit 0
|
||||
echo ">>> Image is more the 24hrs old; rebuilding"
|
||||
rebuild
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# we didn't rebuild; import cached version
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
set -eu
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "Usage: $0 <host>"
|
||||
exit 1
|
||||
echo "Usage: $0 <host>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
HOST=$1
|
||||
@@ -12,10 +12,10 @@ HOST=$1
|
||||
echo "Starting proxy container..."
|
||||
PROXY_CONTAINER=$(ssh "$HOST" weave run -d weaveworks/socksproxy)
|
||||
|
||||
function finish {
|
||||
echo "Removing proxy container.."
|
||||
# shellcheck disable=SC2029
|
||||
ssh "$HOST" docker rm -f "$PROXY_CONTAINER"
|
||||
function finish() {
|
||||
echo "Removing proxy container.."
|
||||
# shellcheck disable=SC2029
|
||||
ssh "$HOST" docker rm -f "$PROXY_CONTAINER"
|
||||
}
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
27
tools/test
27
tools/test
@@ -3,7 +3,7 @@
|
||||
set -e
|
||||
|
||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
GO_TEST_ARGS=( -tags netgo -cpu 4 -timeout 8m )
|
||||
GO_TEST_ARGS=(-tags netgo -cpu 4 -timeout 8m)
|
||||
SLOW=
|
||||
NO_GO_GET=
|
||||
|
||||
@@ -33,10 +33,10 @@ if [ -n "$SLOW" ] || [ -n "$CIRCLECI" ]; then
|
||||
fi
|
||||
|
||||
if [ -n "$SLOW" ]; then
|
||||
GO_TEST_ARGS=( "${GO_TEST_ARGS[@]}" -race -covermode=atomic )
|
||||
GO_TEST_ARGS=("${GO_TEST_ARGS[@]}" -race -covermode=atomic)
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
if [ -n "$COVERDIR" ] ; then
|
||||
if [ -n "$COVERDIR" ]; then
|
||||
coverdir="$COVERDIR"
|
||||
else
|
||||
coverdir=$(mktemp -d coverage.XXXXXXXXXX)
|
||||
@@ -49,18 +49,18 @@ fail=0
|
||||
|
||||
if [ -z "$TESTDIRS" ]; then
|
||||
# NB: Relies on paths being prefixed with './'.
|
||||
TESTDIRS=( $(git ls-files -- '*_test.go' | grep -vE '^(vendor|prog|experimental)/' | xargs -n1 dirname | sort -u | sed -e 's|^|./|') )
|
||||
TESTDIRS=($(git ls-files -- '*_test.go' | grep -vE '^(vendor|prog|experimental)/' | xargs -n1 dirname | sort -u | sed -e 's|^|./|'))
|
||||
else
|
||||
# TESTDIRS on the right side is not really an array variable, it
|
||||
# is just a string with spaces, but it is written like that to
|
||||
# shut up the shellcheck tool.
|
||||
TESTDIRS=( $(for d in ${TESTDIRS[*]}; do echo "$d"; done) )
|
||||
TESTDIRS=($(for d in ${TESTDIRS[*]}; do echo "$d"; done))
|
||||
fi
|
||||
|
||||
# If running on circle, use the scheduler to work out what tests to run on what shard
|
||||
if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then
|
||||
PREFIX=$(go list -e ./ | sed -e 's/\//-/g')
|
||||
TESTDIRS=( $(echo "${TESTDIRS[@]}" | "$DIR/sched" sched "$PREFIX-$CIRCLE_BUILD_NUM" "$CIRCLE_NODE_TOTAL" "$CIRCLE_NODE_INDEX") )
|
||||
TESTDIRS=($(echo "${TESTDIRS[@]}" | "$DIR/sched" sched "$PREFIX-$CIRCLE_BUILD_NUM" "$CIRCLE_NODE_TOTAL" "$CIRCLE_NODE_INDEX"))
|
||||
echo "${TESTDIRS[@]}"
|
||||
fi
|
||||
|
||||
@@ -74,26 +74,29 @@ for dir in "${TESTDIRS[@]}"; do
|
||||
go get -t -tags netgo "$dir"
|
||||
fi
|
||||
|
||||
GO_TEST_ARGS_RUN=( "${GO_TEST_ARGS[@]}" )
|
||||
GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}")
|
||||
if [ -n "$SLOW" ]; then
|
||||
COVERPKGS=$( (go list "$dir"; go list -f '{{join .Deps "\n"}}' "$dir" | grep -v "vendor" | grep "^$PACKAGE_BASE/") | paste -s -d, -)
|
||||
COVERPKGS=$( (
|
||||
go list "$dir"
|
||||
go list -f '{{join .Deps "\n"}}' "$dir" | grep -v "vendor" | grep "^$PACKAGE_BASE/"
|
||||
) | paste -s -d, -)
|
||||
output=$(mktemp "$coverdir/unit.XXXXXXXXXX")
|
||||
GO_TEST_ARGS_RUN=( "${GO_TEST_ARGS[@]}" -coverprofile=$output -coverpkg=$COVERPKGS )
|
||||
GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}" -coverprofile=$output -coverpkg=$COVERPKGS)
|
||||
fi
|
||||
|
||||
START=$(date +%s)
|
||||
if ! go test "${GO_TEST_ARGS_RUN[@]}" "$dir"; then
|
||||
fail=1
|
||||
fi
|
||||
RUNTIME=$(( $(date +%s) - START ))
|
||||
RUNTIME=$(($(date +%s) - START))
|
||||
|
||||
# Report test runtime when running on circle, to help scheduler
|
||||
if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then
|
||||
"$DIR/sched" time "$dir" $RUNTIME
|
||||
"$DIR/sched" time "$dir" "$RUNTIME"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$SLOW" ] && [ -z "$COVERDIR" ] ; then
|
||||
if [ -n "$SLOW" ] && [ -z "$COVERDIR" ]; then
|
||||
go get github.com/weaveworks/tools/cover
|
||||
cover "$coverdir"/* >profile.cov
|
||||
rm -rf "$coverdir"
|
||||
|
||||
Reference in New Issue
Block a user