mirror of
https://github.com/weaveworks/scope.git
synced 2026-03-03 10:11:03 +00:00
More gce scripts from weave into tools.
This commit is contained in:
186
integration/assert.sh
Normal file
186
integration/assert.sh
Normal file
@@ -0,0 +1,186 @@
|
||||
#!/bin/bash
|
||||
# assert.sh 1.1 - bash unit testing framework
|
||||
# Copyright (C) 2009-2015 Robert Lehmann
|
||||
#
|
||||
# http://github.com/lehmannro/assert.sh
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as published
|
||||
# by the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
export DISCOVERONLY=${DISCOVERONLY:-}
|
||||
export DEBUG=${DEBUG:-}
|
||||
export STOP=${STOP:-}
|
||||
export INVARIANT=${INVARIANT:-}
|
||||
export CONTINUE=${CONTINUE:-}
|
||||
|
||||
args="$(getopt -n "$0" -l \
|
||||
verbose,help,stop,discover,invariant,continue vhxdic $*)" \
|
||||
|| exit -1
|
||||
for arg in $args; do
|
||||
case "$arg" in
|
||||
-h)
|
||||
echo "$0 [-vxidc]" \
|
||||
"[--verbose] [--stop] [--invariant] [--discover] [--continue]"
|
||||
echo "`sed 's/./ /g' <<< "$0"` [-h] [--help]"
|
||||
exit 0;;
|
||||
--help)
|
||||
cat <<EOF
|
||||
Usage: $0 [options]
|
||||
Language-agnostic unit tests for subprocesses.
|
||||
|
||||
Options:
|
||||
-v, --verbose generate output for every individual test case
|
||||
-x, --stop stop running tests after the first failure
|
||||
-i, --invariant do not measure timings to remain invariant between runs
|
||||
-d, --discover collect test suites only, do not run any tests
|
||||
-c, --continue do not modify exit code to test suite status
|
||||
-h show brief usage information and exit
|
||||
--help show this help message and exit
|
||||
EOF
|
||||
exit 0;;
|
||||
-v|--verbose)
|
||||
DEBUG=1;;
|
||||
-x|--stop)
|
||||
STOP=1;;
|
||||
-i|--invariant)
|
||||
INVARIANT=1;;
|
||||
-d|--discover)
|
||||
DISCOVERONLY=1;;
|
||||
-c|--continue)
|
||||
CONTINUE=1;;
|
||||
esac
|
||||
done
|
||||
|
||||
_indent=$'\n\t' # local format helper
|
||||
|
||||
_assert_reset() {
|
||||
tests_ran=0
|
||||
tests_failed=0
|
||||
tests_errors=()
|
||||
tests_starttime="$(date +%s%N)" # nanoseconds_since_epoch
|
||||
}
|
||||
|
||||
assert_end() {
|
||||
# assert_end [suite ..]
|
||||
tests_endtime="$(date +%s%N)"
|
||||
# required visible decimal place for seconds (leading zeros if needed)
|
||||
tests_time="$( \
|
||||
printf "%010d" "$(( ${tests_endtime/%N/000000000}
|
||||
- ${tests_starttime/%N/000000000} ))")" # in ns
|
||||
tests="$tests_ran ${*:+$* }tests"
|
||||
[[ -n "$DISCOVERONLY" ]] && echo "collected $tests." && _assert_reset && return
|
||||
[[ -n "$DEBUG" ]] && echo
|
||||
# to get report_time split tests_time on 2 substrings:
|
||||
# ${tests_time:0:${#tests_time}-9} - seconds
|
||||
# ${tests_time:${#tests_time}-9:3} - milliseconds
|
||||
[[ -z "$INVARIANT" ]] \
|
||||
&& report_time=" in ${tests_time:0:${#tests_time}-9}.${tests_time:${#tests_time}-9:3}s" \
|
||||
|| report_time=
|
||||
|
||||
if [[ "$tests_failed" -eq 0 ]]; then
|
||||
echo "all $tests passed$report_time."
|
||||
else
|
||||
for error in "${tests_errors[@]}"; do echo "$error"; done
|
||||
echo "$tests_failed of $tests failed$report_time."
|
||||
fi
|
||||
tests_failed_previous=$tests_failed
|
||||
[[ $tests_failed -gt 0 ]] && tests_suite_status=1
|
||||
_assert_reset
|
||||
}
|
||||
|
||||
assert() {
|
||||
# assert <command> <expected stdout> [stdin]
|
||||
(( tests_ran++ )) || :
|
||||
[[ -z "$DISCOVERONLY" ]] || return
|
||||
expected=$(echo -ne "${2:-}")
|
||||
result="$(eval 2>/dev/null $1 <<< ${3:-})" || true
|
||||
if [[ "$result" == "$expected" ]]; then
|
||||
[[ -z "$DEBUG" ]] || echo -n .
|
||||
return
|
||||
fi
|
||||
result="$(sed -e :a -e '$!N;s/\n/\\n/;ta' <<< "$result")"
|
||||
[[ -z "$result" ]] && result="nothing" || result="\"$result\""
|
||||
[[ -z "$2" ]] && expected="nothing" || expected="\"$2\""
|
||||
_assert_fail "expected $expected${_indent}got $result" "$1" "$3"
|
||||
}
|
||||
|
||||
assert_raises() {
|
||||
# assert_raises <command> <expected code> [stdin]
|
||||
(( tests_ran++ )) || :
|
||||
[[ -z "$DISCOVERONLY" ]] || return
|
||||
status=0
|
||||
(eval $1 <<< ${3:-}) > /dev/null 2>&1 || status=$?
|
||||
expected=${2:-0}
|
||||
if [[ "$status" -eq "$expected" ]]; then
|
||||
[[ -z "$DEBUG" ]] || echo -n .
|
||||
return
|
||||
fi
|
||||
_assert_fail "program terminated with code $status instead of $expected" "$1" "$3"
|
||||
}
|
||||
|
||||
_assert_fail() {
|
||||
# _assert_fail <failure> <command> <stdin>
|
||||
[[ -n "$DEBUG" ]] && echo -n X
|
||||
report="test #$tests_ran \"$2${3:+ <<< $3}\" failed:${_indent}$1"
|
||||
if [[ -n "$STOP" ]]; then
|
||||
[[ -n "$DEBUG" ]] && echo
|
||||
echo "$report"
|
||||
exit 1
|
||||
fi
|
||||
tests_errors[$tests_failed]="$report"
|
||||
(( tests_failed++ )) || :
|
||||
}
|
||||
|
||||
skip_if() {
|
||||
# skip_if <command ..>
|
||||
(eval $@) > /dev/null 2>&1 && status=0 || status=$?
|
||||
[[ "$status" -eq 0 ]] || return
|
||||
skip
|
||||
}
|
||||
|
||||
skip() {
|
||||
# skip (no arguments)
|
||||
shopt -q extdebug && tests_extdebug=0 || tests_extdebug=1
|
||||
shopt -q -o errexit && tests_errexit=0 || tests_errexit=1
|
||||
# enable extdebug so returning 1 in a DEBUG trap handler skips next command
|
||||
shopt -s extdebug
|
||||
# disable errexit (set -e) so we can safely return 1 without causing exit
|
||||
set +o errexit
|
||||
tests_trapped=0
|
||||
trap _skip DEBUG
|
||||
}
|
||||
_skip() {
|
||||
if [[ $tests_trapped -eq 0 ]]; then
|
||||
# DEBUG trap for command we want to skip. Do not remove the handler
|
||||
# yet because *after* the command we need to reset extdebug/errexit (in
|
||||
# another DEBUG trap.)
|
||||
tests_trapped=1
|
||||
[[ -z "$DEBUG" ]] || echo -n s
|
||||
return 1
|
||||
else
|
||||
trap - DEBUG
|
||||
[[ $tests_extdebug -eq 0 ]] || shopt -u extdebug
|
||||
[[ $tests_errexit -eq 1 ]] || set -o errexit
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
_assert_reset
|
||||
: ${tests_suite_status:=0} # remember if any of the tests failed so far
|
||||
_assert_cleanup() {
|
||||
local status=$?
|
||||
# modify exit code if it's not already non-zero
|
||||
[[ $status -eq 0 && -z $CONTINUE ]] && exit $tests_suite_status
|
||||
}
|
||||
trap _assert_cleanup EXIT
|
||||
125
integration/config.sh
Normal file
125
integration/config.sh
Normal file
@@ -0,0 +1,125 @@
|
||||
# NB only to be sourced
|
||||
|
||||
set -e
|
||||
|
||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Protect against being sourced multiple times to prevent
|
||||
# overwriting assert.sh global state
|
||||
if ! [ -z "$SOURCED_CONFIG_SH" ]; then
|
||||
return
|
||||
fi
|
||||
SOURCED_CONFIG_SH=true
|
||||
|
||||
# these ought to match what is in Vagrantfile
|
||||
N_MACHINES=${N_MACHINES:-3}
|
||||
IP_PREFIX=${IP_PREFIX:-192.168.48}
|
||||
IP_SUFFIX_BASE=${IP_SUFFIX_BASE:-10}
|
||||
|
||||
if [ -z "$HOSTS" ] ; then
|
||||
for i in $(seq 1 $N_MACHINES); do
|
||||
IP="${IP_PREFIX}.$((${IP_SUFFIX_BASE}+$i))"
|
||||
HOSTS="$HOSTS $IP"
|
||||
done
|
||||
fi
|
||||
|
||||
# these are used by the tests
|
||||
HOST1=$(echo $HOSTS | cut -f 1 -d ' ')
|
||||
HOST2=$(echo $HOSTS | cut -f 2 -d ' ')
|
||||
HOST3=$(echo $HOSTS | cut -f 3 -d ' ')
|
||||
|
||||
. "$DIR/assert.sh"
|
||||
|
||||
SSH_DIR=${SSH_DIR:-$DIR}
|
||||
SSH=${SSH:-ssh -l vagrant -i "$SSH_DIR/insecure_private_key" -o "UserKnownHostsFile=$SSH_DIR/.ssh_known_hosts" -o CheckHostIP=no -o StrictHostKeyChecking=no}
|
||||
|
||||
SMALL_IMAGE="alpine"
|
||||
TEST_IMAGES="$SMALL_IMAGE"
|
||||
|
||||
PING="ping -nq -W 1 -c 1"
|
||||
DOCKER_PORT=2375
|
||||
|
||||
remote() {
|
||||
rem=$1
|
||||
shift 1
|
||||
"$@" > >(while read line; do echo -e $'\e[0;34m'"$rem>"$'\e[0m'" $line"; done)
|
||||
}
|
||||
|
||||
colourise() {
|
||||
[ -t 0 ] && echo -ne $'\e['$1'm' || true
|
||||
shift
|
||||
# It's important that we don't do this in a subshell, as some
|
||||
# commands we execute need to modify global state
|
||||
"$@"
|
||||
[ -t 0 ] && echo -ne $'\e[0m' || true
|
||||
}
|
||||
|
||||
whitely() {
|
||||
colourise '1;37' "$@"
|
||||
}
|
||||
|
||||
greyly () {
|
||||
colourise '0;37' "$@"
|
||||
}
|
||||
|
||||
redly() {
|
||||
colourise '1;31' "$@"
|
||||
}
|
||||
|
||||
greenly() {
|
||||
colourise '1;32' "$@"
|
||||
}
|
||||
|
||||
run_on() {
|
||||
host=$1
|
||||
shift 1
|
||||
[ -z "$DEBUG" ] || greyly echo "Running on $host: $@" >&2
|
||||
remote $host $SSH $host "$@"
|
||||
}
|
||||
|
||||
docker_on() {
|
||||
host=$1
|
||||
shift 1
|
||||
[ -z "$DEBUG" ] || greyly echo "Docker on $host:$DOCKER_PORT: $@" >&2
|
||||
docker -H tcp://$host:$DOCKER_PORT "$@"
|
||||
}
|
||||
|
||||
weave_on() {
|
||||
host=$1
|
||||
shift 1
|
||||
[ -z "$DEBUG" ] || greyly echo "Weave on $host:$DOCKER_PORT: $@" >&2
|
||||
DOCKER_HOST=tcp://$host:$DOCKER_PORT $WEAVE "$@"
|
||||
}
|
||||
|
||||
exec_on() {
|
||||
host=$1
|
||||
container=$2
|
||||
shift 2
|
||||
docker -H tcp://$host:$DOCKER_PORT exec $container "$@"
|
||||
}
|
||||
|
||||
rm_containers() {
|
||||
host=$1
|
||||
shift
|
||||
[ $# -eq 0 ] || docker_on $host rm -f "$@" >/dev/null
|
||||
}
|
||||
|
||||
start_suite() {
|
||||
for host in $HOSTS; do
|
||||
[ -z "$DEBUG" ] || echo "Cleaning up on $host: removing all containers and resetting weave"
|
||||
PLUGIN_ID=$(docker_on $host ps -aq --filter=name=weaveplugin)
|
||||
PLUGIN_FILTER="cat"
|
||||
[ -n "$PLUGIN_ID" ] && PLUGIN_FILTER="grep -v $PLUGIN_ID"
|
||||
rm_containers $host $(docker_on $host ps -aq 2>/dev/null | $PLUGIN_FILTER)
|
||||
run_on $host "docker network ls | grep -q ' weave ' && docker network rm weave" || true
|
||||
weave_on $host reset 2>/dev/null
|
||||
done
|
||||
whitely echo "$@"
|
||||
}
|
||||
|
||||
end_suite() {
|
||||
whitely assert_end
|
||||
}
|
||||
|
||||
WEAVE=$DIR/../weave
|
||||
|
||||
176
integration/gce.sh
Executable file
176
integration/gce.sh
Executable file
@@ -0,0 +1,176 @@
|
||||
#!/bin/bash
|
||||
# This script has a bunch of GCE-related functions:
|
||||
# ./gce.sh setup - starts two VMs on GCE and configures them to run our integration tests
|
||||
# . ./gce.sh; ./run_all.sh - set a bunch of environment variables for the tests
|
||||
# ./gce.sh destroy - tear down the VMs
|
||||
# ./gce.sh make_template - make a fresh VM template; update TEMPLATE_NAME first!
|
||||
|
||||
set -e
|
||||
|
||||
: ${KEY_FILE:=/tmp/gce_private_key.json}
|
||||
: ${SSH_KEY_FILE:=$HOME/.ssh/gce_ssh_key}
|
||||
: ${IMAGE:=ubuntu-14-04}
|
||||
: ${ZONE:=us-central1-a}
|
||||
: ${PROJECT:=}
|
||||
: ${TEMPLATE_NAME:=}
|
||||
: ${NUM_HOSTS:=}
|
||||
|
||||
if [ -z "${PROJECT}" -o -z "${NUM_HOSTS}" -o -z "${TEMPLATE_NAME}" ]; then
|
||||
echo "Must specify PROJECT, NUM_HOSTS and TEMPLATE_NAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SUFFIX=""
|
||||
if [ -n "$CIRCLECI" ]; then
|
||||
SUFFIX="-${CIRCLE_BUILD_NUM}-$CIRCLE_NODE_INDEX"
|
||||
fi
|
||||
|
||||
# Setup authentication
|
||||
gcloud auth activate-service-account --key-file $KEY_FILE 1>/dev/null
|
||||
gcloud config set project $PROJECT
|
||||
|
||||
function vm_names {
|
||||
local names=
|
||||
for i in $(seq 1 $NUM_HOSTS); do
|
||||
names="host$i$SUFFIX $names"
|
||||
done
|
||||
echo "$names"
|
||||
}
|
||||
|
||||
# Delete all vms in this account
|
||||
function destroy {
|
||||
names="$(vm_names)"
|
||||
if [ $(gcloud compute instances list --zone $ZONE -q $names | wc -l) -le 1 ] ; then
|
||||
return 0
|
||||
fi
|
||||
for i in {0..10}; do
|
||||
# gcloud instances delete can sometimes hang.
|
||||
case $(set +e; timeout 60s /bin/bash -c "gcloud compute instances delete --zone $ZONE -q $names >/dev/null 2>&1"; echo $?) in
|
||||
0)
|
||||
return 0
|
||||
;;
|
||||
124)
|
||||
# 124 means it timed out
|
||||
break
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
function internal_ip {
|
||||
jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].networkIP" $1
|
||||
}
|
||||
|
||||
function external_ip {
|
||||
jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].accessConfigs[0].natIP" $1
|
||||
}
|
||||
|
||||
function try_connect {
|
||||
for i in {0..10}; do
|
||||
ssh -t $1 true && return
|
||||
sleep 2
|
||||
done
|
||||
}
|
||||
|
||||
function install_docker_on {
|
||||
name=$1
|
||||
ssh -t $name sudo bash -x -s <<EOF
|
||||
curl -sSL https://get.docker.com/gpg | sudo apt-key add -
|
||||
curl -sSL https://get.docker.com/ | sh
|
||||
apt-get update -qq;
|
||||
apt-get install -q -y --force-yes --no-install-recommends ethtool;
|
||||
usermod -a -G docker vagrant;
|
||||
echo 'DOCKER_OPTS="-H unix:///var/run/docker.sock -H unix:///var/run/alt-docker.sock -H tcp://0.0.0.0:2375 -s overlay"' >> /etc/default/docker;
|
||||
service docker restart
|
||||
EOF
|
||||
# It seems we need a short delay for docker to start up, so I put this in
|
||||
# a separate ssh connection. This installs nsenter.
|
||||
ssh -t $name sudo docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter
|
||||
}
|
||||
|
||||
function copy_hosts {
|
||||
hostname=$1
|
||||
hosts=$2
|
||||
cat $hosts | ssh -t "$hostname" "sudo -- sh -c \"cat >>/etc/hosts\""
|
||||
}
|
||||
|
||||
# Create new set of VMs
|
||||
function setup {
|
||||
destroy
|
||||
|
||||
names="$(vm_names)"
|
||||
gcloud compute instances create $names --image $TEMPLATE_NAME --zone $ZONE
|
||||
gcloud compute config-ssh --ssh-key-file $SSH_KEY_FILE
|
||||
sed -i '/UserKnownHostsFile=\/dev\/null/d' ~/.ssh/config
|
||||
|
||||
# build an /etc/hosts file for these vms
|
||||
hosts=$(mktemp hosts.XXXXXXXXXX)
|
||||
json=$(mktemp json.XXXXXXXXXX)
|
||||
gcloud compute instances list --format=json >$json
|
||||
for name in $names; do
|
||||
echo "$(internal_ip $json $name) $name.$ZONE.$PROJECT" >>$hosts
|
||||
done
|
||||
|
||||
for name in $names; do
|
||||
hostname="$name.$ZONE.$PROJECT"
|
||||
|
||||
# Add the remote ip to the local /etc/hosts
|
||||
sudo sed -i "/$hostname/d" /etc/hosts
|
||||
sudo sh -c "echo \"$(external_ip $json $name) $hostname\" >>/etc/hosts"
|
||||
try_connect $hostname
|
||||
|
||||
copy_hosts $hostname $hosts &
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
rm $hosts $json
|
||||
}
|
||||
|
||||
function make_template {
|
||||
gcloud compute instances create $TEMPLATE_NAME --image $IMAGE --zone $ZONE
|
||||
gcloud compute config-ssh --ssh-key-file $SSH_KEY_FILE
|
||||
name="$TEMPLATE_NAME.$ZONE.$PROJECT"
|
||||
try_connect $name
|
||||
install_docker_on $name
|
||||
gcloud -q compute instances delete $TEMPLATE_NAME --keep-disks boot --zone $ZONE
|
||||
gcloud compute images create $TEMPLATE_NAME --source-disk $TEMPLATE_NAME --source-disk-zone $ZONE
|
||||
}
|
||||
|
||||
function hosts {
|
||||
hosts=
|
||||
args=
|
||||
json=$(mktemp json.XXXXXXXXXX)
|
||||
gcloud compute instances list --format=json >$json
|
||||
for name in $(vm_names); do
|
||||
hostname="$name.$ZONE.$PROJECT"
|
||||
hosts="$hostname $hosts"
|
||||
args="--add-host=$hostname:$(internal_ip $json $name) $args"
|
||||
done
|
||||
echo export SSH=\"ssh -l vagrant\"
|
||||
echo export HOSTS=\"$hosts\"
|
||||
echo export ADD_HOST_ARGS=\"$args\"
|
||||
rm $json
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
setup)
|
||||
setup
|
||||
;;
|
||||
|
||||
hosts)
|
||||
hosts
|
||||
;;
|
||||
|
||||
destroy)
|
||||
destroy
|
||||
;;
|
||||
|
||||
make_template)
|
||||
# see if template exists
|
||||
if ! gcloud compute images list | grep $PROJECT | grep $TEMPLATE_NAME; then
|
||||
make_template
|
||||
fi
|
||||
esac
|
||||
27
integration/run_all.sh
Executable file
27
integration/run_all.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
. "$DIR/config.sh"
|
||||
|
||||
whitely echo Sanity checks
|
||||
if ! bash "$DIR/sanity_check.sh"; then
|
||||
whitely echo ...failed
|
||||
exit 1
|
||||
fi
|
||||
whitely echo ...ok
|
||||
|
||||
TESTS="${@:-$(find . -name '*_test.sh')}"
|
||||
RUNNER_ARGS=""
|
||||
|
||||
# If running on circle, use the scheduler to work out what tests to run
|
||||
if [ -n "$CIRCLECI" -a -z "$NO_SCHEDULER" ]; then
|
||||
RUNNER_ARGS="$RUNNER_ARGS -scheduler"
|
||||
fi
|
||||
|
||||
# If running on circle or PARALLEL is not empty, run tests in parallel
|
||||
if [ -n "$CIRCLECI" -o -n "$PARALLEL" ]; then
|
||||
RUNNER_ARGS="$RUNNER_ARGS -parallel"
|
||||
fi
|
||||
|
||||
make -C ${DIR}/../runner
|
||||
HOSTS="$HOSTS" "${DIR}/../runner/runner" $RUNNER_ARGS $TESTS
|
||||
26
integration/sanity_check.sh
Executable file
26
integration/sanity_check.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#! /bin/bash
|
||||
|
||||
. ./config.sh
|
||||
|
||||
set -e
|
||||
|
||||
whitely echo Ping each host from the other
|
||||
for host in $HOSTS; do
|
||||
for other in $HOSTS; do
|
||||
[ $host = $other ] || run_on $host $PING $other
|
||||
done
|
||||
done
|
||||
|
||||
whitely echo Check we can reach docker
|
||||
|
||||
for host in $HOSTS; do
|
||||
echo
|
||||
echo Host Version Info: $host
|
||||
echo =====================================
|
||||
echo "# docker version"
|
||||
docker_on $host version
|
||||
echo "# docker info"
|
||||
docker_on $host info
|
||||
echo "# weave version"
|
||||
weave_on $host version
|
||||
done
|
||||
Reference in New Issue
Block a user