Merge commit '1eeb4d0d80f2c02805b531c09e88658fb72b78a8' into unit-test-tags

Bring in updates to tools subdir
This commit is contained in:
Bryan Boreham
2017-07-13 16:18:44 +00:00
20 changed files with 638 additions and 426 deletions

View File

@@ -14,7 +14,8 @@ RUN apt-get update && \
unzip && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN pip install attrs pyhcl
RUN curl -fsSL -o shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \
RUN curl -fsSLo shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \
echo "b1925c2c405458811f0c227266402cf1868b4de529f114722c2e3a5af4ac7bb2 shfmt" | sha256sum -c && \
chmod +x shfmt && \
mv shfmt /usr/bin
RUN go clean -i net && \

View File

@@ -14,9 +14,10 @@ dependencies:
- mkdir -p $(dirname $SRCDIR)
- cp -r $(pwd)/ $SRCDIR
- |
curl -fsSL -o shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \
chmod +x shfmt && \
sudo mv shfmt /usr/bin
curl -fsSLo shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \
echo "b1925c2c405458811f0c227266402cf1868b4de529f114722c2e3a5af4ac7bb2 shfmt" | sha256sum -c && \
chmod +x shfmt && \
sudo mv shfmt /usr/bin
- |
cd $SRCDIR;
go get \
@@ -24,6 +25,7 @@ dependencies:
github.com/golang/lint/golint \
github.com/kisielk/errcheck \
github.com/fatih/hclfmt
- pip install yapf==0.16.2 flake8==3.3.0
test:
override:

View File

@@ -113,6 +113,28 @@ N.B.: `--ssh-extra-args` is used to provide:
* `StrictHostKeyChecking=no`: as VMs come and go, the same IP can be used by a different machine, so checking the host's SSH key may fail. Note that this introduces a risk of a man-in-the-middle attack.
* `UserKnownHostsFile=/dev/null`: if you previously connected a VM with the same IP but a different public key, and added it to `~/.ssh/known_hosts`, SSH may still fail to connect, hence we use `/dev/null` instead of `~/.ssh/known_hosts`.
### Docker installation role
Various ways to install Docker are provided:
- `docker-from-docker-ce-repo`
- `docker-from-docker-repo`
- `docker-from-get.docker.com`
- `docker-from-tarball`
each producing a slightly different outcome, which can be useful for testing various setup scenarios.
The `docker-install` role selects one of the above ways to install Docker based on the `docker_install_role` variable.
The default value for this variable is configured in `group_vars/all`.
You can however override it with whichever role you would want to run by passing the name of the role as a key-value pair in `extra-vars`, e.g.:
```
ansible-playbook <playbook>.yml \
--extra-vars "docker_install_role=docker-from-docker-ce-repo"
```
## Resources
* [https://www.vagrantup.com/docs/provisioning/ansible.html](https://www.vagrantup.com/docs/provisioning/ansible.html)

View File

@@ -1,8 +1,8 @@
---
go_version: 1.8.1
terraform_version: 0.8.5
docker_version: 1.11.2
docker_install_role: 'docker-from-get.docker.com'
docker_version: 17.06
docker_install_role: 'docker-from-docker-ce-repo'
kubernetes_version: 1.6.1
kubernetes_cni_version: 0.5.1
kubernetes_token: '123456.0123456789123456'

View File

@@ -0,0 +1,2 @@
[Timer]
Persistent=false

View File

@@ -38,3 +38,11 @@
dest: /usr/bin
mode: 0555
creates: /usr/bin/terraform
# Ubuntu runs an apt update process that will run on first boot from image.
# This is of questionable value when the machines are only going to live for a few minutes.
# If you leave them on they will run the process daily.
# Also we have seen the update process create a 'defunct' process which then throws off Weave Net smoke-test checks.
# So, we override the 'persistent' setting so it will still run at the scheduled time but will not try to catch up on first boot.
- name: copy apt daily override
copy: src=apt-daily.timer.conf dest=/etc/systemd/system/apt-daily.timer.d/

View File

@@ -1,3 +1,3 @@
[Service]
ExecStart=
ExecStart=/usr/bin/docker daemon -H fd:// -H unix:///var/run/alt-docker.sock -H tcp://0.0.0.0:2375 -s overlay --insecure-registry "weave-ci-registry:5000"
ExecStart=/usr/bin/dockerd -H fd:// -H unix:///var/run/alt-docker.sock -H tcp://0.0.0.0:2375 -s overlay --insecure-registry "weave-ci-registry:5000"

View File

@@ -0,0 +1,35 @@
---
# Debian / Ubuntu specific:
- name: install dependencies for docker repository
package:
name: "{{ item }}"
state: present
with_items:
- apt-transport-https
- ca-certificates
- name: add apt key for the docker repository
apt_key:
keyserver: hkp://ha.pool.sks-keyservers.net:80
id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
state: present
register: apt_key_docker_repo
- name: add docker's apt repository ({{ ansible_distribution | lower }}-{{ ansible_distribution_release }})
apt_repository:
repo: deb https://download.docker.com/linux/ubuntu {{ ansible_lsb.codename|lower }} stable
state: present
register: apt_docker_repo
- name: update apt's cache
apt:
update_cache: yes
when: apt_key_docker_repo.changed or apt_docker_repo.changed
- name: install docker-engine
package:
name: "{{ item }}"
state: present
with_items:
- docker-ce={{ docker_version }}*

View File

@@ -1,29 +1,10 @@
# Docker installation from Docker's CentOS Community Edition
# See also: https://docs.docker.com/engine/installation/linux/centos/
---
# Set up Docker
# See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install
- name: remove all potentially pre existing packages
yum:
name: '{{ item }}'
state: absent
with_items:
- docker
- docker-common
- container-selinux
- docker-selinux
- docker-engine
# Distribution-specific tasks:
- include: debian.yml
when: ansible_os_family == "Debian"
- name: install yum-utils
yum:
name: yum-utils
state: present
- name: add docker ce repo
command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# Note that Docker CE versions do not follow regular Docker versions, but look
# like, for example: "17.03.0.el7"
- name: install docker
yum:
name: 'docker-ce-{{ docker_version }}'
update_cache: yes
state: present
- include: redhat.yml
when: ansible_os_family == "RedHat"

View File

@@ -0,0 +1,29 @@
# Docker installation from Docker's CentOS Community Edition
# See also: https://docs.docker.com/engine/installation/linux/centos/
- name: remove all potentially pre existing packages
yum:
name: '{{ item }}'
state: absent
with_items:
- docker
- docker-common
- container-selinux
- docker-selinux
- docker-engine
- name: install yum-utils
yum:
name: yum-utils
state: present
- name: add docker ce repo
command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# Note that Docker CE versions do not follow regular Docker versions, but look
# like, for example: "17.03.0.el7"
- name: install docker
yum:
name: 'docker-ce-{{ docker_version }}'
update_cache: yes
state: present

View File

@@ -5,4 +5,4 @@
shell: curl -sSL https://get.docker.com/gpg | sudo apt-key add -
- name: install docker
shell: 'curl -sSL https://get.docker.com/ | sed -e s/docker-engine/docker-engine={{ docker_version }}*/ | sh'
shell: 'curl -sSL https://get.docker.com/ | sed -e s/docker-engine/docker-engine={{ docker_version }}*/ -e s/docker-ce/docker-ce={{ docker_version }}*/ | sh'

View File

@@ -45,3 +45,12 @@
- alpine
- aanand/docker-dnsutils
- weaveworks/hello-world
- name: docker pull docker-py which is used by tests
docker_image:
name: joffrey/docker-py
tag: '{{ item }}'
state: present
with_items:
- '1.8.1'
- '1.9.0-rc2'

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# Generate the cross product of latest versions of Weave Net's dependencies:
# - Go
@@ -20,74 +20,72 @@ from list_versions import DEPS, get_versions_from, filter_versions
from itertools import product
# See also: /usr/include/sysexits.h
_ERROR_RUNTIME=1
_ERROR_ILLEGAL_ARGS=64
_ERROR_RUNTIME = 1
_ERROR_ILLEGAL_ARGS = 64
def _usage(error_message=None):
if error_message:
stderr.write('ERROR: ' + error_message + linesep)
stdout.write(linesep.join([
'Usage:',
' cross_versions.py [OPTION]...',
'Examples:',
' cross_versions.py',
' cross_versions.py -r',
' cross_versions.py --rc',
' cross_versions.py -l',
' cross_versions.py --latest',
'Options:',
'-l/--latest Include only the latest version of each major and minor versions sub-tree.',
'-r/--rc Include release candidate versions.',
'-h/--help Prints this!',
''
]))
if error_message:
stderr.write('ERROR: ' + error_message + linesep)
stdout.write(
linesep.join([
'Usage:', ' cross_versions.py [OPTION]...', 'Examples:',
' cross_versions.py', ' cross_versions.py -r',
' cross_versions.py --rc', ' cross_versions.py -l',
' cross_versions.py --latest', 'Options:',
'-l/--latest Include only the latest version of each major and'
' minor versions sub-tree.',
'-r/--rc Include release candidate versions.',
'-h/--help Prints this!', ''
]))
def _validate_input(argv):
try:
config = {
'rc': False,
'latest': False
}
opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc'])
for opt, value in opts:
if opt in ('-h', '--help'):
_usage()
exit()
if opt in ('-l', '--latest'):
config['latest'] = True
if opt in ('-r', '--rc'):
config['rc'] = True
if len(args) != 0:
raise ValueError('Unsupported argument(s): %s.' % args)
return config
except GetoptError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
except ValueError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
try:
config = {'rc': False, 'latest': False}
opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc'])
for opt, value in opts:
if opt in ('-h', '--help'):
_usage()
exit()
if opt in ('-l', '--latest'):
config['latest'] = True
if opt in ('-r', '--rc'):
config['rc'] = True
if len(args) != 0:
raise ValueError('Unsupported argument(s): %s.' % args)
return config
except GetoptError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
except ValueError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
def _versions(dependency, config):
return map(str,
filter_versions(
get_versions_from(DEPS[dependency]['url'], DEPS[dependency]['re']),
DEPS[dependency]['min'],
**config
)
)
return map(str,
filter_versions(
get_versions_from(DEPS[dependency]['url'],
DEPS[dependency]['re']),
DEPS[dependency]['min'], **config))
def cross_versions(config):
docker_versions = _versions('docker', config)
k8s_versions = _versions('kubernetes', config)
return product(docker_versions, k8s_versions)
docker_versions = _versions('docker', config)
k8s_versions = _versions('kubernetes', config)
return product(docker_versions, k8s_versions)
def main(argv):
try:
config = _validate_input(argv)
print(linesep.join('\t'.join(triple) for triple in cross_versions(config)))
except Exception as e:
print(str(e))
exit(_ERROR_RUNTIME)
try:
config = _validate_input(argv)
print(linesep.join('\t'.join(triple)
for triple in cross_versions(config)))
except Exception as e:
print(str(e))
exit(_ERROR_RUNTIME)
if __name__ == '__main__':
main(argv[1:])
main(argv[1:])

View File

@@ -1,17 +1,32 @@
#!/usr/bin/python
#!/usr/bin/env python
# List all available versions of Weave Net's dependencies:
# - Go
# - Docker
# - Kubernetes
#
# Depending on the parameters passed, it can gather the equivalent of the below bash one-liners:
# git ls-remote --tags https://github.com/golang/go | grep -oP '(?<=refs/tags/go)[\.\d]+$' | sort --version-sort
# git ls-remote --tags https://github.com/golang/go | grep -oP '(?<=refs/tags/go)[\.\d]+rc\d+$' | sort --version-sort | tail -n 1
# git ls-remote --tags https://github.com/docker/docker | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+$' | sort --version-sort
# git ls-remote --tags https://github.com/docker/docker | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+\-rc\d*$' | sort --version-sort | tail -n 1
# git ls-remote --tags https://github.com/kubernetes/kubernetes | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+$' | sort --version-sort
# git ls-remote --tags https://github.com/kubernetes/kubernetes | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+\-beta\.\d+$' | sort --version-sort | tail -n 1
# Depending on the parameters passed, it can gather the equivalent of the below
# bash one-liners:
# git ls-remote --tags https://github.com/golang/go \
# | grep -oP '(?<=refs/tags/go)[\.\d]+$' \
# | sort --version-sort
# git ls-remote --tags https://github.com/golang/go \
# | grep -oP '(?<=refs/tags/go)[\.\d]+rc\d+$' \
# | sort --version-sort \
# | tail -n 1
# git ls-remote --tags https://github.com/docker/docker \
# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+$' \
# | sort --version-sort
# git ls-remote --tags https://github.com/docker/docker \
# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+\-rc\d*$' \
# | sort --version-sort \
# | tail -n 1
# git ls-remote --tags https://github.com/kubernetes/kubernetes \
# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+$' \
# | sort --version-sort
# git ls-remote --tags https://github.com/kubernetes/kubernetes \
# | grep -oP '(?<=refs/tags/v)\d+\.\d+\.\d+\-beta\.\d+$' \
# | sort --version-sort | tail -n 1
#
# Dependencies:
# - python
@@ -23,7 +38,7 @@
from os import linesep, path
from sys import argv, exit, stdout, stderr
from getopt import getopt, GetoptError
from subprocess import Popen, PIPE, STDOUT
from subprocess import Popen, PIPE
from pkg_resources import parse_version
from itertools import groupby
from six.moves import filter
@@ -31,236 +46,298 @@ import shlex
import re
# See also: /usr/include/sysexits.h
_ERROR_RUNTIME=1
_ERROR_ILLEGAL_ARGS=64
_ERROR_RUNTIME = 1
_ERROR_ILLEGAL_ARGS = 64
_TAG_REGEX='^[0-9a-f]{40}\s+refs/tags/%s$'
_VERSION='version'
DEPS={
'go': {
'url': 'https://github.com/golang/go',
're': 'go(?P<%s>[\d\.]+(?:rc\d)*)' % _VERSION,
'min': None
},
'docker': {
'url': 'https://github.com/docker/docker',
're': 'v(?P<%s>\d+\.\d+\.\d+(?:\-rc\d)*)' % _VERSION,
# Weave Net only works with Docker from 1.10.0 onwards, so we ignore all previous versions:
'min': '1.10.0'
},
'kubernetes': {
'url': 'https://github.com/kubernetes/kubernetes',
're': 'v(?P<%s>\d+\.\d+\.\d+(?:\-beta\.\d)*)' % _VERSION,
# Weave Kube requires Kubernetes 1.4.2+, so we ignore all previous versions:
'min': '1.4.2'
}
_TAG_REGEX = '^[0-9a-f]{40}\s+refs/tags/%s$'
_VERSION = 'version'
DEPS = {
'go': {
'url': 'https://github.com/golang/go',
're': 'go(?P<%s>[\d\.]+(?:rc\d)*)' % _VERSION,
'min': None
},
'docker': {
'url': 'https://github.com/docker/docker',
're': 'v(?P<%s>\d+\.\d+\.\d+(?:\-rc\d)*)' % _VERSION,
# Weave Net only works with Docker from 1.10.0 onwards, so we ignore
# all previous versions:
'min': '1.10.0',
},
'kubernetes': {
'url': 'https://github.com/kubernetes/kubernetes',
're': 'v(?P<%s>\d+\.\d+\.\d+(?:\-beta\.\d)*)' % _VERSION,
# Weave Kube requires Kubernetes 1.4.2+, so we ignore all previous
# versions:
'min': '1.4.2',
}
}
class Version(object):
''' Helper class to parse and manipulate (sort, filter, group) software versions. '''
def __init__(self, version):
self.version = version
self.digits = [int(x) if x else 0 for x in re.match('(\d*)\.?(\d*)\.?(\d*).*?', version).groups()]
self.major, self.minor, self.patch = self.digits
self.__parsed = parse_version(version)
self.is_rc = self.__parsed.is_prerelease
def __lt__ (self, other):
return self.__parsed.__lt__(other.__parsed)
def __gt__ (self, other):
return self.__parsed.__gt__(other.__parsed)
def __le__ (self, other):
return self.__parsed.__le__(other.__parsed)
def __ge__ (self, other):
return self.__parsed.__ge__(other.__parsed)
def __eq__ (self, other):
return self.__parsed.__eq__(other.__parsed)
def __ne__ (self, other):
return self.__parsed.__ne__(other.__parsed)
def __str__(self):
return self.version
def __repr__(self):
return self.version
''' Helper class to parse and manipulate (sort, filter, group) software
versions. '''
def __init__(self, version):
self.version = version
self.digits = [
int(x) if x else 0
for x in re.match('(\d*)\.?(\d*)\.?(\d*).*?', version).groups()
]
self.major, self.minor, self.patch = self.digits
self.__parsed = parse_version(version)
self.is_rc = self.__parsed.is_prerelease
def __lt__(self, other):
return self.__parsed.__lt__(other.__parsed)
def __gt__(self, other):
return self.__parsed.__gt__(other.__parsed)
def __le__(self, other):
return self.__parsed.__le__(other.__parsed)
def __ge__(self, other):
return self.__parsed.__ge__(other.__parsed)
def __eq__(self, other):
return self.__parsed.__eq__(other.__parsed)
def __ne__(self, other):
return self.__parsed.__ne__(other.__parsed)
def __str__(self):
return self.version
def __repr__(self):
return self.version
def _read_go_version_from_dockerfile():
# Read Go version from weave/build/Dockerfile
dockerfile_path = path.join(path.dirname(path.dirname(path.dirname(path.realpath(__file__)))), 'build', 'Dockerfile')
with open(dockerfile_path, 'r') as f:
for line in f:
m = re.match('^FROM golang:(\S*)$', line)
if m:
return m.group(1)
raise RuntimeError("Failed to read Go version from weave/build/Dockerfile. You may be running this script from somewhere else than weave/tools.")
# Read Go version from weave/build/Dockerfile
dockerfile_path = path.join(
path.dirname(path.dirname(path.dirname(path.realpath(__file__)))),
'build', 'Dockerfile')
with open(dockerfile_path, 'r') as f:
for line in f:
m = re.match('^FROM golang:(\S*)$', line)
if m:
return m.group(1)
raise RuntimeError(
"Failed to read Go version from weave/build/Dockerfile."
" You may be running this script from somewhere else than weave/tools."
)
def _try_set_min_go_version():
''' Set the current version of Go used to build Weave Net's containers as the minimum version. '''
try:
DEPS['go']['min'] = _read_go_version_from_dockerfile()
except IOError as e:
stderr.write('WARNING: No minimum Go version set. Root cause: %s%s' % (e, linesep))
''' Set the current version of Go used to build Weave Net's containers as
the minimum version. '''
try:
DEPS['go']['min'] = _read_go_version_from_dockerfile()
except IOError as e:
stderr.write('WARNING: No minimum Go version set. Root cause: %s%s' %
(e, linesep))
def _sanitize(out):
return out.decode('ascii').strip().split(linesep)
return out.decode('ascii').strip().split(linesep)
def _parse_tag(tag, version_pattern, debug=False):
''' Parse Git tag output's line using the provided `version_pattern`, e.g.:
>>> _parse_tag('915b77eb4efd68916427caf8c7f0b53218c5ea4a refs/tags/v1.4.6', 'v(?P<version>\d+\.\d+\.\d+(?:\-beta\.\d)*)')
'1.4.6'
'''
pattern = _TAG_REGEX % version_pattern
m = re.match(pattern, tag)
if m:
return m.group(_VERSION)
elif debug:
stderr.write('ERROR: Failed to parse version out of tag [%s] using [%s].%s' % (tag, pattern, linesep))
''' Parse Git tag output's line using the provided `version_pattern`, e.g.:
>>> _parse_tag(
'915b77eb4efd68916427caf8c7f0b53218c5ea4a refs/tags/v1.4.6',
'v(?P<version>\d+\.\d+\.\d+(?:\-beta\.\d)*)')
'1.4.6'
'''
pattern = _TAG_REGEX % version_pattern
m = re.match(pattern, tag)
if m:
return m.group(_VERSION)
elif debug:
stderr.write(
'ERROR: Failed to parse version out of tag [%s] using [%s].%s' %
(tag, pattern, linesep))
def get_versions_from(git_repo_url, version_pattern):
''' Get release and release candidates' versions from the provided Git repository. '''
git = Popen(shlex.split('git ls-remote --tags %s' % git_repo_url), stdout=PIPE)
out, err = git.communicate()
status_code = git.returncode
if status_code != 0:
raise RuntimeError('Failed to retrieve git tags from %s. Status code: %s. Output: %s. Error: %s' % (git_repo_url, status_code, out, err))
return list(filter(None, (_parse_tag(line, version_pattern) for line in _sanitize(out))))
''' Get release and release candidates' versions from the provided Git
repository. '''
git = Popen(
shlex.split('git ls-remote --tags %s' % git_repo_url), stdout=PIPE)
out, err = git.communicate()
status_code = git.returncode
if status_code != 0:
raise RuntimeError('Failed to retrieve git tags from %s. '
'Status code: %s. Output: %s. Error: %s' %
(git_repo_url, status_code, out, err))
return list(
filter(None, (_parse_tag(line, version_pattern)
for line in _sanitize(out))))
def _tree(versions, level=0):
''' Group versions by major, minor and patch version digits. '''
if not versions or level >= len(versions[0].digits):
return # Empty versions or no more digits to group by.
versions_tree = []
for _, versions_group in groupby(versions, lambda v: v.digits[level]):
subtree = _tree(list(versions_group), level+1)
if subtree:
versions_tree.append(subtree)
# Return the current subtree if non-empty, or the list of "leaf" versions:
return versions_tree if versions_tree else versions
''' Group versions by major, minor and patch version digits. '''
if not versions or level >= len(versions[0].digits):
return # Empty versions or no more digits to group by.
versions_tree = []
for _, versions_group in groupby(versions, lambda v: v.digits[level]):
subtree = _tree(list(versions_group), level + 1)
if subtree:
versions_tree.append(subtree)
# Return the current subtree if non-empty, or the list of "leaf" versions:
return versions_tree if versions_tree else versions
def _is_iterable(obj):
'''
Check if the provided object is an iterable collection, i.e. not a string, e.g. a list, a generator:
>>> _is_iterable('string')
False
>>> _is_iterable([1, 2, 3])
True
>>> _is_iterable((x for x in [1, 2, 3]))
True
'''
return hasattr(obj, '__iter__') and not isinstance(obj, str)
'''
Check if the provided object is an iterable collection, i.e. not a string,
e.g. a list, a generator:
>>> _is_iterable('string')
False
>>> _is_iterable([1, 2, 3])
True
>>> _is_iterable((x for x in [1, 2, 3]))
True
'''
return hasattr(obj, '__iter__') and not isinstance(obj, str)
def _leaf_versions(tree, rc):
'''
Recursively traverse the versions tree in a depth-first fashion,
and collect the last node of each branch, i.e. leaf versions.
'''
versions = []
if _is_iterable(tree):
for subtree in tree:
versions.extend(_leaf_versions(subtree, rc))
if not versions:
if rc:
last_rc = next(filter(lambda v: v.is_rc, reversed(tree)), None)
last_prod = next(filter(lambda v: not v.is_rc, reversed(tree)), None)
if last_rc and last_prod and (last_prod < last_rc):
versions.extend([last_prod, last_rc])
elif not last_prod:
versions.append(last_rc)
else:
# Either there is no RC, or we ignore the RC as older than the latest production version:
versions.append(last_prod)
else:
versions.append(tree[-1])
return versions
'''
Recursively traverse the versions tree in a depth-first fashion,
and collect the last node of each branch, i.e. leaf versions.
'''
versions = []
if _is_iterable(tree):
for subtree in tree:
versions.extend(_leaf_versions(subtree, rc))
if not versions:
if rc:
last_rc = next(filter(lambda v: v.is_rc, reversed(tree)), None)
last_prod = next(
filter(lambda v: not v.is_rc, reversed(tree)), None)
if last_rc and last_prod and (last_prod < last_rc):
versions.extend([last_prod, last_rc])
elif not last_prod:
versions.append(last_rc)
else:
# Either there is no RC, or we ignore the RC as older than
# the latest production version:
versions.append(last_prod)
else:
versions.append(tree[-1])
return versions
def filter_versions(versions, min_version=None, rc=False, latest=False):
''' Filter provided versions
''' Filter provided versions
>>> filter_versions(['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], min_version=None, latest=False, rc=False)
[1.0.0, 1.0.1, 1.1.1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version=None, latest=False, rc=False)
[1.0.0, 1.0.1, 1.1.1, 2.0.0]
>>> filter_versions(['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], min_version=None, latest=True, rc=False)
[1.0.1, 1.1.1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version=None, latest=True, rc=False)
[1.0.1, 1.1.1, 2.0.0]
>>> filter_versions(['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], min_version=None, latest=False, rc=True)
[1.0.0-beta.1, 1.0.0, 1.0.1, 1.1.1, 1.1.2-rc1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version=None, latest=False, rc=True)
[1.0.0-beta.1, 1.0.0, 1.0.1, 1.1.1, 1.1.2-rc1, 2.0.0]
>>> filter_versions(['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], min_version='1.1.0', latest=False, rc=True)
[1.1.1, 1.1.2-rc1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version='1.1.0', latest=False, rc=True)
[1.1.1, 1.1.2-rc1, 2.0.0]
>>> filter_versions(['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], min_version=None, latest=True, rc=True)
[1.0.1, 1.1.1, 1.1.2-rc1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version=None, latest=True, rc=True)
[1.0.1, 1.1.1, 1.1.2-rc1, 2.0.0]
>>> filter_versions(
['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'],
min_version='1.1.0', latest=True, rc=True)
[1.1.1, 1.1.2-rc1, 2.0.0]
'''
versions = sorted([Version(v) for v in versions])
if min_version:
min_version = Version(min_version)
versions = [v for v in versions if v >= min_version]
if not rc:
versions = [v for v in versions if not v.is_rc]
if latest:
versions_tree = _tree(versions)
return _leaf_versions(versions_tree, rc)
else:
return versions
>>> filter_versions(['1.0.0-beta.1', '1.0.0', '1.0.1', '1.1.1', '1.1.2-rc1', '2.0.0'], min_version='1.1.0', latest=True, rc=True)
[1.1.1, 1.1.2-rc1, 2.0.0]
'''
versions = sorted([Version(v) for v in versions])
if min_version:
min_version = Version(min_version)
versions = [v for v in versions if v >= min_version]
if not rc:
versions = [v for v in versions if not v.is_rc]
if latest:
versions_tree = _tree(versions)
return _leaf_versions(versions_tree, rc)
else:
return versions
def _usage(error_message=None):
if error_message:
stderr.write('ERROR: ' + error_message + linesep)
stdout.write(linesep.join([
'Usage:',
' list_versions.py [OPTION]... [DEPENDENCY]',
'Examples:',
' list_versions.py go',
' list_versions.py -r docker',
' list_versions.py --rc docker',
' list_versions.py -l kubernetes',
' list_versions.py --latest kubernetes',
'Options:',
'-l/--latest Include only the latest version of each major and minor versions sub-tree.',
'-r/--rc Include release candidate versions.',
'-h/--help Prints this!',
''
]))
if error_message:
stderr.write('ERROR: ' + error_message + linesep)
stdout.write(
linesep.join([
'Usage:', ' list_versions.py [OPTION]... [DEPENDENCY]',
'Examples:', ' list_versions.py go',
' list_versions.py -r docker',
' list_versions.py --rc docker',
' list_versions.py -l kubernetes',
' list_versions.py --latest kubernetes', 'Options:',
'-l/--latest Include only the latest version of each major and'
' minor versions sub-tree.',
'-r/--rc Include release candidate versions.',
'-h/--help Prints this!', ''
]))
def _validate_input(argv):
try:
config = {
'rc': False,
'latest': False
}
opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc'])
for opt, value in opts:
if opt in ('-h', '--help'):
_usage()
exit()
if opt in ('-l', '--latest'):
config['latest'] = True
if opt in ('-r', '--rc'):
config['rc'] = True
if len(args) != 1:
raise ValueError('Please provide a dependency to get versions of. Expected 1 argument but got %s: %s.' % (len(args), args))
dependency=args[0].lower()
if dependency not in DEPS.keys():
raise ValueError('Please provide a valid dependency. Supported one dependency among {%s} but got: %s.' % (', '.join(DEPS.keys()), dependency))
return dependency, config
except GetoptError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
except ValueError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
try:
config = {'rc': False, 'latest': False}
opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc'])
for opt, value in opts:
if opt in ('-h', '--help'):
_usage()
exit()
if opt in ('-l', '--latest'):
config['latest'] = True
if opt in ('-r', '--rc'):
config['rc'] = True
if len(args) != 1:
raise ValueError('Please provide a dependency to get versions of.'
' Expected 1 argument but got %s: %s.' %
(len(args), args))
dependency = args[0].lower()
if dependency not in DEPS.keys():
raise ValueError(
'Please provide a valid dependency.'
' Supported one dependency among {%s} but got: %s.' %
(', '.join(DEPS.keys()), dependency))
return dependency, config
except GetoptError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
except ValueError as e:
_usage(str(e))
exit(_ERROR_ILLEGAL_ARGS)
def main(argv):
try:
dependency, config = _validate_input(argv)
if dependency == 'go':
_try_set_min_go_version()
versions = get_versions_from(DEPS[dependency]['url'], DEPS[dependency]['re'])
versions = filter_versions(versions, DEPS[dependency]['min'], **config)
print(linesep.join(map(str, versions)))
except Exception as e:
print(str(e))
exit(_ERROR_RUNTIME)
try:
dependency, config = _validate_input(argv)
if dependency == 'go':
_try_set_min_go_version()
versions = get_versions_from(DEPS[dependency]['url'],
DEPS[dependency]['re'])
versions = filter_versions(versions, DEPS[dependency]['min'], **config)
print(linesep.join(map(str, versions)))
except Exception as e:
print(str(e))
exit(_ERROR_RUNTIME)
if __name__ == '__main__':
main(argv[1:])
main(argv[1:])

View File

@@ -4,6 +4,6 @@ set -o errexit
set -o nounset
set -o pipefail
WORKING_SUFFIX=$(if ! git diff --exit-code --quiet HEAD >&2; then echo "-WIP"; else echo ""; fi)
WORKING_SUFFIX=$(if git status --porcelain | grep -qE '^(?:[^?][^ ]|[^ ][^?])\s'; then echo "-WIP"; else echo ""; fi)
BRANCH_PREFIX=$(git rev-parse --abbrev-ref HEAD)
echo "${BRANCH_PREFIX//\//-}-$(git rev-parse --short HEAD)$WORKING_SUFFIX"

View File

@@ -115,11 +115,8 @@ rm_containers() {
start_suite() {
for host in $HOSTS; do
[ -z "$DEBUG" ] || echo "Cleaning up on $host: removing all containers and resetting weave"
PLUGIN_ID=$(docker_on "$host" ps -aq --filter=name=weaveplugin)
PLUGIN_FILTER="cat"
[ -n "$PLUGIN_ID" ] && PLUGIN_FILTER="grep -v $PLUGIN_ID"
# shellcheck disable=SC2046
rm_containers "$host" $(docker_on "$host" ps -aq 2>/dev/null | $PLUGIN_FILTER)
rm_containers "$host" $(docker_on "$host" ps -aq 2>/dev/null)
run_on "$host" "docker network ls | grep -q ' weave ' && docker network rm weave" || true
weave_on "$host" reset 2>/dev/null
done

View File

@@ -113,7 +113,7 @@ lint_sh() {
local filename="$1"
local lint_result=0
if ! diff -u <(shfmt -i 4 "${filename}") "${filename}"; then
if ! diff -u "${filename}" <(shfmt -i 4 "${filename}"); then
lint_result=1
echo "${filename}: run shfmt -i 4 -w ${filename}"
fi
@@ -153,6 +153,21 @@ lint_md() {
return $lint_result
}
lint_py() {
local filename="$1"
local lint_result=0
if yapf --diff "${filename}" | grep -qE '^[+-]'; then
lint_result=1
echo "${filename}: run yapf --in-place ${filename}"
else
# Only run flake8 if yapf passes, since they pick up a lot of similar issues
flake8 "${filename}" || lint_result=1
fi
return $lint_result
}
lint() {
filename="$1"
ext="${filename##*\.}"
@@ -179,6 +194,7 @@ lint() {
sh) lint_sh "${filename}" || lint_result=1 ;;
tf) lint_tf "${filename}" || lint_result=1 ;;
md) lint_md "${filename}" || lint_result=1 ;;
py) lint_py "${filename}" || lint_result=1 ;;
esac
spell_check "${filename}" || lint_result=1

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
import sys, string, urllib
import requests
import optparse

View File

@@ -19,157 +19,188 @@ app.debug = True
# observations faster.
alpha = 0.3
class Test(ndb.Model):
total_run_time = ndb.FloatProperty(default=0.) # Not total, but a EWMA
total_runs = ndb.IntegerProperty(default=0)
total_run_time = ndb.FloatProperty(default=0.) # Not total, but a EWMA
total_runs = ndb.IntegerProperty(default=0)
def parallelism(self):
name = self.key.string_id()
m = re.search('(\d+)_test.sh$', name)
if m is None:
return 1
else:
return int(m.group(1))
def parallelism(self):
name = self.key.string_id()
m = re.search('(\d+)_test.sh$', name)
if m is None:
return 1
else:
return int(m.group(1))
def cost(self):
p = self.parallelism()
logging.info("Test %s has parallelism %d and avg run time %s",
self.key.string_id(), p, self.total_run_time)
return self.parallelism() * self.total_run_time
def cost(self):
p = self.parallelism()
logging.info("Test %s has parallelism %d and avg run time %s", self.key.string_id(), p, self.total_run_time)
return self.parallelism() * self.total_run_time
class Schedule(ndb.Model):
shards = ndb.JsonProperty()
shards = ndb.JsonProperty()
@app.route('/record/<path:test_name>/<runtime>', methods=['POST'])
@ndb.transactional
def record(test_name, runtime):
test = Test.get_by_id(test_name)
if test is None:
test = Test(id=test_name)
test.total_run_time = (test.total_run_time * (1-alpha)) + (float(runtime) * alpha)
test.total_runs += 1
test.put()
return ('', 204)
test = Test.get_by_id(test_name)
if test is None:
test = Test(id=test_name)
test.total_run_time = (test.total_run_time *
(1 - alpha)) + (float(runtime) * alpha)
test.total_runs += 1
test.put()
return ('', 204)
@app.route('/schedule/<test_run>/<int:shard_count>/<int:shard>', methods=['POST'])
@app.route(
'/schedule/<test_run>/<int:shard_count>/<int:shard>', methods=['POST'])
def schedule(test_run, shard_count, shard):
# read tests from body
test_names = flask.request.get_json(force=True)['tests']
# read tests from body
test_names = flask.request.get_json(force=True)['tests']
# first see if we have a scedule already
schedule_id = "%s-%d" % (test_run, shard_count)
schedule = Schedule.get_by_id(schedule_id)
if schedule is not None:
# first see if we have a scedule already
schedule_id = "%s-%d" % (test_run, shard_count)
schedule = Schedule.get_by_id(schedule_id)
if schedule is not None:
return flask.json.jsonify(tests=schedule.shards[str(shard)])
# if not, do simple greedy algorithm
test_times = ndb.get_multi(
ndb.Key(Test, test_name) for test_name in test_names)
def avg(test):
if test is not None:
return test.cost()
return 1
test_times = [(test_name, avg(test))
for test_name, test in zip(test_names, test_times)]
test_times_dict = dict(test_times)
test_times.sort(key=operator.itemgetter(1))
shards = {i: [] for i in xrange(shard_count)}
while test_times:
test_name, time = test_times.pop()
# find shortest shard and put it in that
s, _ = min(
((i, sum(test_times_dict[t] for t in shards[i]))
for i in xrange(shard_count)),
key=operator.itemgetter(1))
shards[s].append(test_name)
# atomically insert or retrieve existing schedule
schedule = Schedule.get_or_insert(schedule_id, shards=shards)
return flask.json.jsonify(tests=schedule.shards[str(shard)])
# if not, do simple greedy algorithm
test_times = ndb.get_multi(ndb.Key(Test, test_name) for test_name in test_names)
def avg(test):
if test is not None:
return test.cost()
return 1
test_times = [(test_name, avg(test)) for test_name, test in zip(test_names, test_times)]
test_times_dict = dict(test_times)
test_times.sort(key=operator.itemgetter(1))
shards = {i: [] for i in xrange(shard_count)}
while test_times:
test_name, time = test_times.pop()
# find shortest shard and put it in that
s, _ = min(((i, sum(test_times_dict[t] for t in shards[i]))
for i in xrange(shard_count)), key=operator.itemgetter(1))
shards[s].append(test_name)
# atomically insert or retrieve existing schedule
schedule = Schedule.get_or_insert(schedule_id, shards=shards)
return flask.json.jsonify(tests=schedule.shards[str(shard)])
FIREWALL_REGEXES = [
re.compile(r'^(?P<network>\w+)-allow-(?P<type>\w+)-(?P<build>\d+)-(?P<shard>\d+)$'),
re.compile(r'^(?P<network>\w+)-(?P<build>\d+)-(?P<shard>\d+)-allow-(?P<type>[\w\-]+)$'),
re.compile(
r'^(?P<network>\w+)-allow-(?P<type>\w+)-(?P<build>\d+)-(?P<shard>\d+)$'
),
re.compile(r'^(?P<network>\w+)-(?P<build>\d+)-(?P<shard>\d+)-allow-'
r'(?P<type>[\w\-]+)$'),
]
NAME_REGEXES = [
re.compile(r'^host(?P<index>\d+)-(?P<build>\d+)-(?P<shard>\d+)$'),
re.compile(r'^test-(?P<build>\d+)-(?P<shard>\d+)-(?P<index>\d+)$'),
re.compile(r'^host(?P<index>\d+)-(?P<build>\d+)-(?P<shard>\d+)$'),
re.compile(r'^test-(?P<build>\d+)-(?P<shard>\d+)-(?P<index>\d+)$'),
]
def _matches_any_regex(name, regexes):
for regex in regexes:
matches = regex.match(name)
if matches:
return matches
for regex in regexes:
matches = regex.match(name)
if matches:
return matches
PROJECTS = [
('weaveworks/weave', 'weave-net-tests', 'us-central1-a', True),
('weaveworks/weave', 'positive-cocoa-90213', 'us-central1-a', True),
('weaveworks/scope', 'scope-integration-tests', 'us-central1-a', False),
('weaveworks/weave', 'weave-net-tests', 'us-central1-a', True),
('weaveworks/weave', 'positive-cocoa-90213', 'us-central1-a', True),
('weaveworks/scope', 'scope-integration-tests', 'us-central1-a', False),
]
@app.route('/tasks/gc')
def gc():
# Get list of running VMs, pick build id out of VM name
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
# Get list of running VMs, pick build id out of VM name
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
for repo, project, zone, gc_fw in PROJECTS:
gc_project(compute, repo, project, zone, gc_fw)
for repo, project, zone, gc_fw in PROJECTS:
gc_project(compute, repo, project, zone, gc_fw)
return "Done"
return "Done"
def gc_project(compute, repo, project, zone, gc_fw):
logging.info("GCing %s, %s, %s", repo, project, zone)
# Get list of builds, filter down to running builds:
running = _get_running_builds(repo)
# Stop VMs for builds that aren't running:
_gc_compute_engine_instances(compute, project, zone, running)
# Remove firewall rules for builds that aren't running:
if gc_fw:
_gc_firewall_rules(compute, project, running)
logging.info("GCing %s, %s, %s", repo, project, zone)
# Get list of builds, filter down to running builds:
running = _get_running_builds(repo)
# Stop VMs for builds that aren't running:
_gc_compute_engine_instances(compute, project, zone, running)
# Remove firewall rules for builds that aren't running:
if gc_fw:
_gc_firewall_rules(compute, project, running)
def _get_running_builds(repo):
result = urlfetch.fetch('https://circleci.com/api/v1/project/%s' % repo,
headers={'Accept': 'application/json'})
assert result.status_code == 200
builds = json.loads(result.content)
running = {build['build_num'] for build in builds if not build.get('stop_time')}
logging.info("Runnings builds: %r", running)
return running
result = urlfetch.fetch(
'https://circleci.com/api/v1/project/%s' % repo,
headers={'Accept': 'application/json'})
assert result.status_code == 200
builds = json.loads(result.content)
running = {
build['build_num']
for build in builds if not build.get('stop_time')
}
logging.info("Runnings builds: %r", running)
return running
def _get_hosts_by_build(instances):
host_by_build = collections.defaultdict(list)
for instance in instances['items']:
matches = _matches_any_regex(instance['name'], NAME_REGEXES)
if not matches:
continue
host_by_build[int(matches.group('build'))].append(instance['name'])
logging.info("Running VMs by build: %r", host_by_build)
return host_by_build
host_by_build = collections.defaultdict(list)
for instance in instances['items']:
matches = _matches_any_regex(instance['name'], NAME_REGEXES)
if not matches:
continue
host_by_build[int(matches.group('build'))].append(instance['name'])
logging.info("Running VMs by build: %r", host_by_build)
return host_by_build
def _gc_compute_engine_instances(compute, project, zone, running):
instances = compute.instances().list(project=project, zone=zone).execute()
if 'items' not in instances:
return
host_by_build = _get_hosts_by_build(instances)
stopped = []
for build, names in host_by_build.iteritems():
if build in running:
continue
for name in names:
stopped.append(name)
logging.info("Stopping VM %s", name)
compute.instances().delete(project=project, zone=zone, instance=name).execute()
return stopped
instances = compute.instances().list(project=project, zone=zone).execute()
if 'items' not in instances:
return
host_by_build = _get_hosts_by_build(instances)
stopped = []
for build, names in host_by_build.iteritems():
if build in running:
continue
for name in names:
stopped.append(name)
logging.info("Stopping VM %s", name)
compute.instances().delete(
project=project, zone=zone, instance=name).execute()
return stopped
def _gc_firewall_rules(compute, project, running):
firewalls = compute.firewalls().list(project=project).execute()
if 'items' not in firewalls:
return
for firewall in firewalls['items']:
matches = _matches_any_regex(firewall['name'], FIREWALL_REGEXES)
if not matches:
continue
if int(matches.group('build')) in running:
continue
logging.info("Deleting firewall rule %s", firewall['name'])
compute.firewalls().delete(project=project, firewall=firewall['name']).execute()
firewalls = compute.firewalls().list(project=project).execute()
if 'items' not in firewalls:
return
for firewall in firewalls['items']:
matches = _matches_any_regex(firewall['name'], FIREWALL_REGEXES)
if not matches:
continue
if int(matches.group('build')) in running:
continue
logging.info("Deleting firewall rule %s", firewall['name'])
compute.firewalls().delete(
project=project, firewall=firewall['name']).execute()

View File

@@ -33,9 +33,13 @@ while [ $# -gt 0 ]; do
shift 1
;;
"-netgo")
TAGS="-tags netgo"
TAGS="netgo"
shift 1
;;
"-tags")
TAGS="$2"
shift 2
;;
"-p")
PARALLEL=true
shift 1
@@ -51,7 +55,7 @@ while [ $# -gt 0 ]; do
esac
done
GO_TEST_ARGS=($TAGS -cpu 4 -timeout $TIMEOUT)
GO_TEST_ARGS=(-tags "${TAGS[@]}" -cpu 4 -timeout $TIMEOUT)
if [ -n "$SLOW" ] || [ -n "$CIRCLECI" ]; then
SLOW=true
@@ -97,7 +101,7 @@ go test -i "${GO_TEST_ARGS[@]}" "${TESTDIRS[@]}"
run_test() {
local dir=$1
if [ -z "$NO_GO_GET" ]; then
go get -t "$TAGS" "$dir"
go get -t -tags "${TAGS[@]}" "$dir"
fi
local GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}")