From 332eb9cff9063d86ed6c21021678278f497512db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Petazzoni?= Date: Wed, 2 Mar 2016 12:11:22 +0000 Subject: [PATCH] Add script to automate LB setup; add setup/teardown helpers --- ...ad-balancer.py => add-load-balancer-v1.py} | 0 bin/add-load-balancer-v2.py | 72 ++++++++++ bin/reconfigure-load-balancers.py | 46 ++++++ bin/setup-all-the-things.sh | 131 ++++++++++++++++++ 4 files changed, 249 insertions(+) rename bin/{add-load-balancer.py => add-load-balancer-v1.py} (100%) create mode 100755 bin/add-load-balancer-v2.py create mode 100755 bin/reconfigure-load-balancers.py create mode 100755 bin/setup-all-the-things.sh diff --git a/bin/add-load-balancer.py b/bin/add-load-balancer-v1.py similarity index 100% rename from bin/add-load-balancer.py rename to bin/add-load-balancer-v1.py diff --git a/bin/add-load-balancer-v2.py b/bin/add-load-balancer-v2.py new file mode 100755 index 00000000..b26962d9 --- /dev/null +++ b/bin/add-load-balancer-v2.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python + +import os +import sys +import yaml + +def error(msg): + print("ERROR: {}".format(msg)) + exit(1) + +# arg 1 = service name + +service_name = sys.argv[1] + +compose_file = os.environ["COMPOSE_FILE"] +input_file, output_file = compose_file, compose_file + +config = yaml.load(open(input_file)) + +version = config.get("version") +if version != "2": + error("Unsupported $COMPOSE_FILE version: {!r}".format(version)) + +# The load balancers need to know the service port to use. +# Those ports must be declared here. +ports = yaml.load(open("ports.yml")) + +port = str(ports[service_name]) + +if service_name not in config["services"]: + error("service {} not found in $COMPOSE_FILE" + .format(service_name)) + +lb_name = "{}-lb".format(service_name) +be_name = "{}-be".format(service_name) + +if lb_name in config["services"]: + error("load balancer {} already exists in $COMPOSE_FILE" + .format(service_name)) + +service = config["services"][service_name] +if "networks" in service: + error("service {} has custom networks" + .format(service_name)) + +# Put the service on its own network. +service["networks"] = {service_name: {"aliases": [ be_name ] } } +# Put a label indicating which load balancer is responsible for this service. +if "labels" not in service: + service["labels"] = {} +service["labels"]["loadbalancer"] = lb_name + +# Add the load balancer. +config["services"][lb_name] = { + "image": "jpetazzo/hamba", + "command": "{} {} {}".format(port, be_name, port), + "depends_on": [ service_name ], + "networks": { + "default": { + "aliases": [ service_name ], + }, + service_name: None, + }, +} + +if "networks" not in config: + config["networks"] = {} +if service_name not in config["networks"]: + config["networks"][service_name] = None + +yaml.safe_dump(config, open(output_file, "w"), default_flow_style=False) + diff --git a/bin/reconfigure-load-balancers.py b/bin/reconfigure-load-balancers.py new file mode 100755 index 00000000..8d538fd8 --- /dev/null +++ b/bin/reconfigure-load-balancers.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +# FIXME: hardcoded +PORT="80" + +import os +import subprocess + +project_name = os.path.basename(os.path.realpath(".")) + +# Get all existing services for this application. +containers_data = subprocess.check_output([ + "docker", "ps", + "--filter", "label=com.docker.compose.project={}".format(project_name), + "--format", '{{ .Label "com.docker.compose.service" }} ' + '{{ .Label "com.docker.compose.container-number" }} ' + '{{ .Label "loadbalancer" }}', +]) + +load_balancers = dict() +for line in containers_data.split('\n'): + if not line: + continue + service_name, container_number, load_balancer = line.split(' ') + if load_balancer: + if load_balancer not in load_balancers: + load_balancers[load_balancer] = [] + load_balancers[load_balancer].append((service_name, int(container_number))) + +for load_balancer, backends in load_balancers.items(): + # FIXME: iterate on all load balancers + container_name = "{}_{}_1".format(project_name, load_balancer) + command = [ + "docker", "run", "--rm", + "--volumes-from", container_name, + "--net", "container:{}".format(container_name), + "jpetazzo/hamba", "reconfigure", PORT, + ] + command.extend( + "{}_{}_{}:{}".format(project_name, backend_name, backend_number, PORT) + for (backend_name, backend_number) in sorted(backends) + ) + print("Updating configuration for {} with {} backend(s)..." + .format(container_name, len(backends))) + subprocess.check_output(command) + diff --git a/bin/setup-all-the-things.sh b/bin/setup-all-the-things.sh new file mode 100755 index 00000000..16e2947a --- /dev/null +++ b/bin/setup-all-the-things.sh @@ -0,0 +1,131 @@ +#!/bin/sh +unset DOCKER_REGISTRY +unset DOCKER_HOST +unset COMPOSE_FILE + +SWARM_IMAGE=jpetazzo/swarm:1.1.3-rc2-debug-experimental + +check_ssh_keys () { + for N in $(seq 1 5); do + ssh node$N true + done +} + +clean_1_containers () { + for N in $(seq 1 5); do + ssh node$N "docker ps -aq | xargs -r -n1 -P10 docker rm -f" + done +} + +clean_2_volumes () { + for N in $(seq 1 5); do + ssh node$N "docker volume ls -q | xargs -r docker volume rm" + done +} + +clean_3_images () { + for N in $(seq 1 5); do + ssh node$N "docker images | awk '/dockercoins|jpetazzo/ {print \$1\":\"\$2}' | xargs -r docker rmi -f" + done +} + +clean_4_machines () { + rm -rf ~/.docker/machine/ +} + +clean_all () { + clean_1_containers + clean_2_volumes + clean_3_images + clean_4_machines +} + +dm_swarm () { + eval $(docker-machine env node1 --swarm) +} +dm_node1 () { + eval $(docker-machine env node1) +} + +setup_1_swarm () { + grep node[12345] /etc/hosts | grep -v ^127 | + while read IPADDR NODENAME; do + docker-machine create --driver generic \ + --engine-opt cluster-store=consul://localhost:8500 \ + --engine-opt cluster-advertise=eth0:2376 \ + --swarm --swarm-master --swarm-image $SWARM_IMAGE \ + --swarm-discovery consul://localhost:8500 \ + --swarm-opt replication --swarm-opt advertise=$IPADDR:3376 \ + --generic-ssh-user docker --generic-ip-address $IPADDR $NODENAME + done +} + +setup_2_consul () { + ssh node1 docker run --name consul_node1 \ + -d --restart=always --net host \ + jpetazzo/consul agent -server -bootstrap + + IPADDR=$(ssh node1 ip a ls dev eth0 | + sed -n 's,.*inet \(.*\)/.*,\1,p') + + # Start other Consul nodes + for N in 2 3 4 5; do + ssh node$N docker run --name consul_node$N \ + -d --restart=always --net host \ + jpetazzo/consul agent -server -join $IPADDR + done +} + +setup_3_wait () { + # Wait for a Swarm master + dm_swarm + while ! docker ps; do sleep 1; done + + # Wait for all nodes to be there + while ! [ "$(docker info | grep "^Nodes:")" = "Nodes: 5" ]; do sleep 1; done +} + +setup_4_registry () { + cd ~/orchestration-workshop/registry + dm_swarm + docker-compose up -d + for N in $(seq 2 5); do + docker-compose scale frontend=$N + done +} + +setup_5_btp_dockercoins () { + cd ~/orchestration-workshop/dockercoins + dm_node1 + export DOCKER_REGISTRY=localhost:5000 + cp docker-compose.yml-v2 docker-compose.yml + ~/orchestration-workshop/bin/build-tag-push.py | tee /tmp/btp.log + export $(tail -n 1 /tmp/btp.log) +} + +setup_6_add_lbs () { + cd ~/orchestration-workshop/dockercoins + ~/orchestration-workshop/bin/add-load-balancer-v2.py rng + ~/orchestration-workshop/bin/add-load-balancer-v2.py hasher +} + +setup_all () { + setup_1_swarm + setup_2_consul + setup_3_wait + setup_4_registry + setup_5_btp_dockercoins + setup_6_add_lbs + dm_swarm +} + +demo_1_compose_up () { + dm_swarm + cd ~/orchestration-workshop/dockercoins + docker-compose up -d +} + +grep -qs -- MAGICMARKER "$0" && { # Don't display this line in the function lis + echo "You should source this file, then invoke the following functions:" + grep -- '^[a-z].*{$' "$0" | cut -d" " -f1 +}