mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-14 17:49:59 +00:00
Remove obsolete scripts
This commit is contained in:
@@ -1,42 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
# arg 1 = service name
|
||||
# arg 2 = number of instances
|
||||
|
||||
service_name = sys.argv[1]
|
||||
desired_instances = int(sys.argv[2])
|
||||
|
||||
compose_file = os.environ["COMPOSE_FILE"]
|
||||
input_file, output_file = compose_file, compose_file
|
||||
|
||||
config = yaml.load(open(input_file))
|
||||
|
||||
# The ambassadors need to know the service port to use.
|
||||
# Those ports must be declared here.
|
||||
ports = yaml.load(open("ports.yml"))
|
||||
|
||||
port = str(ports[service_name])
|
||||
|
||||
command_line = port
|
||||
|
||||
depends_on = []
|
||||
|
||||
for n in range(1, 1+desired_instances):
|
||||
config["services"]["{}{}".format(service_name, n)] = config["services"][service_name]
|
||||
command_line += " {}{}:{}".format(service_name, n, port)
|
||||
depends_on.append("{}{}".format(service_name, n))
|
||||
|
||||
config["services"][service_name] = {
|
||||
"image": "jpetazzo/hamba",
|
||||
"command": command_line,
|
||||
"depends_on": depends_on,
|
||||
}
|
||||
if "networks" in config["services"]["{}1".format(service_name)]:
|
||||
config["services"][service_name]["networks"] = config["services"]["{}1".format(service_name)]["networks"]
|
||||
|
||||
yaml.safe_dump(config, open(output_file, "w"), default_flow_style=False)
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
def error(msg):
|
||||
print("ERROR: {}".format(msg))
|
||||
exit(1)
|
||||
|
||||
# arg 1 = service name
|
||||
|
||||
service_name = sys.argv[1]
|
||||
|
||||
compose_file = os.environ["COMPOSE_FILE"]
|
||||
input_file, output_file = compose_file, compose_file
|
||||
|
||||
config = yaml.load(open(input_file))
|
||||
|
||||
version = config.get("version")
|
||||
if version != "2":
|
||||
error("Unsupported $COMPOSE_FILE version: {!r}".format(version))
|
||||
|
||||
# The load balancers need to know the service port to use.
|
||||
# Those ports must be declared here.
|
||||
ports = yaml.load(open("ports.yml"))
|
||||
|
||||
port = str(ports[service_name])
|
||||
|
||||
if service_name not in config["services"]:
|
||||
error("service {} not found in $COMPOSE_FILE"
|
||||
.format(service_name))
|
||||
|
||||
lb_name = "{}-lb".format(service_name)
|
||||
be_name = "{}-be".format(service_name)
|
||||
wd_name = "{}-wd".format(service_name)
|
||||
|
||||
if lb_name in config["services"]:
|
||||
error("load balancer {} already exists in $COMPOSE_FILE"
|
||||
.format(lb_name))
|
||||
|
||||
if wd_name in config["services"]:
|
||||
error("dns watcher {} already exists in $COMPOSE_FILE"
|
||||
.format(wd_name))
|
||||
|
||||
service = config["services"][service_name]
|
||||
if "networks" in service:
|
||||
error("service {} has custom networks"
|
||||
.format(service_name))
|
||||
|
||||
# Put the service on its own network.
|
||||
service["networks"] = {service_name: {"aliases": [ be_name ] } }
|
||||
# Put a label indicating which load balancer is responsible for this service.
|
||||
if "labels" not in service:
|
||||
service["labels"] = {}
|
||||
service["labels"]["loadbalancer"] = lb_name
|
||||
|
||||
# Add the load balancer.
|
||||
config["services"][lb_name] = {
|
||||
"image": "jpetazzo/hamba",
|
||||
"command": "{} {} {}".format(port, be_name, port),
|
||||
"depends_on": [ service_name ],
|
||||
"networks": {
|
||||
"default": {
|
||||
"aliases": [ service_name ],
|
||||
},
|
||||
service_name: None,
|
||||
},
|
||||
}
|
||||
|
||||
# Add the DNS watcher.
|
||||
config["services"][wd_name] = {
|
||||
"image": "jpetazzo/watchdns",
|
||||
"command": "{} {} {}".format(port, be_name, port),
|
||||
"volumes_from": [ lb_name ],
|
||||
"networks": {
|
||||
service_name: None,
|
||||
},
|
||||
}
|
||||
|
||||
if "networks" not in config:
|
||||
config["networks"] = {}
|
||||
if service_name not in config["networks"]:
|
||||
config["networks"][service_name] = None
|
||||
|
||||
yaml.safe_dump(config, open(output_file, "w"), default_flow_style=False)
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import ComposeFile
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
registry = os.environ.get("DOCKER_REGISTRY")
|
||||
|
||||
if not registry:
|
||||
print("Please set the DOCKER_REGISTRY variable, e.g.:")
|
||||
print("export DOCKER_REGISTRY=jpetazzo # use the Docker Hub")
|
||||
print("export DOCKER_REGISTRY=localhost:5000 # use a local registry")
|
||||
exit(1)
|
||||
|
||||
# Get the name of the current directory.
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Version used to tag the generated Docker image, using the UNIX timestamp or the given version.
|
||||
if "VERSION" not in os.environ:
|
||||
version = str(int(time.time()))
|
||||
else:
|
||||
version = os.environ["VERSION"]
|
||||
|
||||
# Execute "docker-compose build" and abort if it fails.
|
||||
subprocess.check_call(["docker-compose", "-f", "docker-compose.yml", "build"])
|
||||
|
||||
# Load the services from the input docker-compose.yml file.
|
||||
# TODO: run parallel builds.
|
||||
compose_file = ComposeFile("docker-compose.yml")
|
||||
|
||||
# Iterate over all services that have a "build" definition.
|
||||
# Tag them, and initiate a push in the background.
|
||||
push_operations = dict()
|
||||
for service_name, service in compose_file.services.items():
|
||||
if "build" in service:
|
||||
compose_image = "{}_{}".format(project_name, service_name)
|
||||
registry_image = "{}/{}:{}".format(registry, compose_image, version)
|
||||
# Re-tag the image so that it can be uploaded to the registry.
|
||||
subprocess.check_call(["docker", "tag", compose_image, registry_image])
|
||||
# Spawn "docker push" to upload the image.
|
||||
push_operations[service_name] = subprocess.Popen(["docker", "push", registry_image])
|
||||
# Replace the "build" definition by an "image" definition,
|
||||
# using the name of the image on the registry.
|
||||
del service["build"]
|
||||
service["image"] = registry_image
|
||||
|
||||
# Wait for push operations to complete.
|
||||
for service_name, popen_object in push_operations.items():
|
||||
print("Waiting for {} push to complete...".format(service_name))
|
||||
popen_object.wait()
|
||||
print("Done.")
|
||||
|
||||
# Write the new docker-compose.yml file.
|
||||
if "COMPOSE_FILE" not in os.environ:
|
||||
os.environ["COMPOSE_FILE"] = "docker-compose.yml-{}".format(version)
|
||||
print("Writing to new Compose file:")
|
||||
else:
|
||||
print("Writing to provided Compose file:")
|
||||
|
||||
print("COMPOSE_FILE={}".format(os.environ["COMPOSE_FILE"]))
|
||||
compose_file.save()
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import yaml
|
||||
|
||||
|
||||
def COMPOSE_FILE():
|
||||
if "COMPOSE_FILE" not in os.environ:
|
||||
print("The $COMPOSE_FILE environment variable is not set. Aborting.")
|
||||
exit(1)
|
||||
return os.environ["COMPOSE_FILE"]
|
||||
|
||||
|
||||
class ComposeFile(object):
|
||||
|
||||
def __init__(self, filename=None):
|
||||
if filename is None:
|
||||
filename = COMPOSE_FILE()
|
||||
if not os.path.isfile(filename):
|
||||
print("File {!r} does not exist. Aborting.".format(filename))
|
||||
exit(1)
|
||||
self.data = yaml.load(open(filename))
|
||||
|
||||
@property
|
||||
def services(self):
|
||||
if self.data.get("version") == "2":
|
||||
return self.data["services"]
|
||||
else:
|
||||
return self.data
|
||||
|
||||
def save(self, filename=None):
|
||||
if filename is None:
|
||||
filename = COMPOSE_FILE()
|
||||
with open(filename, "w") as f:
|
||||
yaml.safe_dump(self.data, f, default_flow_style=False)
|
||||
|
||||
# Executes a bunch of commands in parallel, but no more than N at a time.
|
||||
# This allows to execute concurrently a large number of tasks, without
|
||||
# turning into a fork bomb.
|
||||
# `parallelism` is the number of tasks to execute simultaneously.
|
||||
# `commands` is a list of tasks to execute.
|
||||
# Each task is itself a list, where the first element is a descriptive
|
||||
# string, and the folloowing elements are the arguments to pass to Popen.
|
||||
def parallel_run(commands, parallelism):
|
||||
running = []
|
||||
# While stuff is running, or we have stuff to run...
|
||||
while commands or running:
|
||||
# While there is stuff to run, and room in the pipe...
|
||||
while commands and len(running)<parallelism:
|
||||
command = commands.pop(0)
|
||||
print("START {}".format(command[0]))
|
||||
popen = subprocess.Popen(
|
||||
command[1:], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
popen._desc = command[0]
|
||||
running.append(popen)
|
||||
must_sleep = True
|
||||
for popen in running:
|
||||
status = popen.poll()
|
||||
if status is not None:
|
||||
must_sleep = False
|
||||
running.remove(popen)
|
||||
if status==0:
|
||||
print("OK {}".format(popen._desc))
|
||||
else:
|
||||
print("ERROR {} [Exit status: {}]"
|
||||
.format(popen._desc, status))
|
||||
output = "\n" + popen.communicate()[0].strip()
|
||||
output = output.replace("\n", "\n| ")
|
||||
print(output)
|
||||
else:
|
||||
print("WAIT ({} running, {} more to run)"
|
||||
.format(len(running), len(commands)))
|
||||
if must_sleep:
|
||||
time.sleep(1)
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import parallel_run
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Get all services and backends in our compose application.
|
||||
containers_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=com.docker.compose.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} '
|
||||
'{{ .Label "com.docker.compose.service" }} '
|
||||
'{{ .Ports }}',
|
||||
])
|
||||
|
||||
# Build list of backends.
|
||||
frontend_ports = dict()
|
||||
backends = dict()
|
||||
for container in containers_data.split('\n'):
|
||||
if not container:
|
||||
continue
|
||||
# TODO: support services with multiple ports!
|
||||
container_id, service_name, port = container.split(' ')
|
||||
if not port:
|
||||
continue
|
||||
backend, frontend = port.split("->")
|
||||
backend_addr, backend_port = backend.split(':')
|
||||
frontend_port, frontend_proto = frontend.split('/')
|
||||
# TODO: deal with udp (mostly skip it?)
|
||||
assert frontend_proto == "tcp"
|
||||
# TODO: check inconsistencies between port mappings
|
||||
frontend_ports[service_name] = frontend_port
|
||||
if service_name not in backends:
|
||||
backends[service_name] = []
|
||||
backends[service_name].append((backend_addr, backend_port))
|
||||
|
||||
# Get all existing ambassadors for this application.
|
||||
ambassadors_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=ambassador.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} '
|
||||
'{{ .Label "ambassador.service" }} '
|
||||
'{{ .Label "ambassador.bindaddr" }}',
|
||||
])
|
||||
|
||||
# Update ambassadors.
|
||||
operations = []
|
||||
for ambassador in ambassadors_data.split('\n'):
|
||||
if not ambassador:
|
||||
continue
|
||||
ambassador_id, service_name, bind_address = ambassador.split()
|
||||
print("Updating configuration for {}/{} -> {}:{} -> {}"
|
||||
.format(service_name, ambassador_id,
|
||||
bind_address, frontend_ports[service_name],
|
||||
backends[service_name]))
|
||||
command = [
|
||||
ambassador_id,
|
||||
"docker", "run", "--rm", "--volumes-from", ambassador_id,
|
||||
"jpetazzo/hamba", "reconfigure",
|
||||
"{}:{}".format(bind_address, frontend_ports[service_name])
|
||||
]
|
||||
for backend_addr, backend_port in backends[service_name]:
|
||||
command.extend([backend_addr, backend_port])
|
||||
operations.append(command)
|
||||
|
||||
# Execute all commands in parallel.
|
||||
parallel_run(operations, 10)
|
||||
@@ -1,71 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import ComposeFile, parallel_run
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
config = ComposeFile()
|
||||
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Get all services in our compose application.
|
||||
containers_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=com.docker.compose.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} {{ .Label "com.docker.compose.service" }}',
|
||||
])
|
||||
|
||||
# Get all existing ambassadors for this application.
|
||||
ambassadors_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=ambassador.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} '
|
||||
'{{ .Label "ambassador.container" }} '
|
||||
'{{ .Label "ambassador.service" }}',
|
||||
])
|
||||
|
||||
# Build a set of existing ambassadors.
|
||||
ambassadors = dict()
|
||||
for ambassador in ambassadors_data.split('\n'):
|
||||
if not ambassador:
|
||||
continue
|
||||
ambassador_id, container_id, linked_service = ambassador.split()
|
||||
ambassadors[container_id, linked_service] = ambassador_id
|
||||
|
||||
operations = []
|
||||
|
||||
# Start the missing ambassadors.
|
||||
for container in containers_data.split('\n'):
|
||||
if not container:
|
||||
continue
|
||||
container_id, service_name = container.split()
|
||||
extra_hosts = config.services[service_name].get("extra_hosts", {})
|
||||
for linked_service, bind_address in extra_hosts.items():
|
||||
description = "Ambassador {}/{}/{}".format(
|
||||
service_name, container_id, linked_service)
|
||||
ambassador_id = ambassadors.pop((container_id, linked_service), None)
|
||||
if ambassador_id:
|
||||
print("{} already exists: {}".format(description, ambassador_id))
|
||||
else:
|
||||
print("{} not found, creating it.".format(description))
|
||||
operations.append([
|
||||
description,
|
||||
"docker", "run", "-d",
|
||||
"--net", "container:{}".format(container_id),
|
||||
"--label", "ambassador.project={}".format(project_name),
|
||||
"--label", "ambassador.container={}".format(container_id),
|
||||
"--label", "ambassador.service={}".format(linked_service),
|
||||
"--label", "ambassador.bindaddr={}".format(bind_address),
|
||||
"jpetazzo/hamba", "run"
|
||||
])
|
||||
|
||||
# Destroy extraneous ambassadors.
|
||||
for ambassador_id in ambassadors.values():
|
||||
print("{} is not useful anymore, destroying it.".format(ambassador_id))
|
||||
operations.append([
|
||||
"rm -f {}".format(ambassador_id),
|
||||
"docker", "rm", "-f", ambassador_id,
|
||||
])
|
||||
|
||||
# Execute all commands in parallel.
|
||||
parallel_run(operations, 10)
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
docker ps -q --filter label=ambassador.project=dockercoins |
|
||||
xargs docker rm -f
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Some tools will choke on the YAML files generated by PyYAML;
|
||||
# in particular on a section like this one:
|
||||
#
|
||||
# service:
|
||||
# ports:
|
||||
# - 8000:5000
|
||||
#
|
||||
# This script adds two spaces in front of the dash in those files.
|
||||
# Warning: it is a hack, and probably won't work on some YAML files.
|
||||
[ -f "$COMPOSE_FILE" ] || {
|
||||
echo "Cannot find COMPOSE_FILE"
|
||||
exit 1
|
||||
}
|
||||
sed -i 's/^ -/ -/' $COMPOSE_FILE
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import ComposeFile
|
||||
import yaml
|
||||
|
||||
config = ComposeFile()
|
||||
|
||||
# The ambassadors need to know the service port to use.
|
||||
# Those ports must be declared here.
|
||||
ports = yaml.load(open("ports.yml"))
|
||||
|
||||
def generate_local_addr():
|
||||
last_byte = 2
|
||||
while last_byte<255:
|
||||
yield "127.127.0.{}".format(last_byte)
|
||||
last_byte += 1
|
||||
|
||||
for service_name, service in config.services.items():
|
||||
if "links" in service:
|
||||
for link, local_addr in zip(service["links"], generate_local_addr()):
|
||||
if link not in ports:
|
||||
print("Skipping link {} in service {} "
|
||||
"(no port mapping defined). "
|
||||
"Your code will probably break."
|
||||
.format(link, service_name))
|
||||
continue
|
||||
if "extra_hosts" not in service:
|
||||
service["extra_hosts"] = {}
|
||||
service["extra_hosts"][link] = local_addr
|
||||
del service["links"]
|
||||
if "ports" in service:
|
||||
del service["ports"]
|
||||
if "volumes" in service:
|
||||
del service["volumes"]
|
||||
if service_name in ports:
|
||||
service["ports"] = [ ports[service_name] ]
|
||||
|
||||
config.save()
|
||||
@@ -1,46 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# FIXME: hardcoded
|
||||
PORT="80"
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Get all existing services for this application.
|
||||
containers_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=com.docker.compose.project={}".format(project_name),
|
||||
"--format", '{{ .Label "com.docker.compose.service" }} '
|
||||
'{{ .Label "com.docker.compose.container-number" }} '
|
||||
'{{ .Label "loadbalancer" }}',
|
||||
])
|
||||
|
||||
load_balancers = dict()
|
||||
for line in containers_data.split('\n'):
|
||||
if not line:
|
||||
continue
|
||||
service_name, container_number, load_balancer = line.split(' ')
|
||||
if load_balancer:
|
||||
if load_balancer not in load_balancers:
|
||||
load_balancers[load_balancer] = []
|
||||
load_balancers[load_balancer].append((service_name, int(container_number)))
|
||||
|
||||
for load_balancer, backends in load_balancers.items():
|
||||
# FIXME: iterate on all load balancers
|
||||
container_name = "{}_{}_1".format(project_name, load_balancer)
|
||||
command = [
|
||||
"docker", "run", "--rm",
|
||||
"--volumes-from", container_name,
|
||||
"--net", "container:{}".format(container_name),
|
||||
"jpetazzo/hamba", "reconfigure", PORT,
|
||||
]
|
||||
command.extend(
|
||||
"{}_{}_{}:{}".format(project_name, backend_name, backend_number, PORT)
|
||||
for (backend_name, backend_number) in sorted(backends)
|
||||
)
|
||||
print("Updating configuration for {} with {} backend(s)..."
|
||||
.format(container_name, len(backends)))
|
||||
subprocess.check_output(command)
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
#!/bin/sh
|
||||
unset DOCKER_REGISTRY
|
||||
unset DOCKER_HOST
|
||||
unset COMPOSE_FILE
|
||||
|
||||
SWARM_IMAGE=${SWARM_IMAGE:-swarm}
|
||||
|
||||
prepare_1_check_ssh_keys () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N true
|
||||
done
|
||||
}
|
||||
|
||||
prepare_2_compile_swarm () {
|
||||
cd ~
|
||||
git clone git://github.com/docker/swarm
|
||||
cd swarm
|
||||
[[ -z "$1" ]] && {
|
||||
echo "Specify which revision to build."
|
||||
return
|
||||
}
|
||||
git checkout "$1" || return
|
||||
mkdir -p image
|
||||
docker build -t docker/swarm:$1 .
|
||||
docker run -i --entrypoint sh docker/swarm:$1 \
|
||||
-c 'cat $(which swarm)' > image/swarm
|
||||
chmod +x image/swarm
|
||||
cat >image/Dockerfile <<EOF
|
||||
FROM scratch
|
||||
COPY ./swarm /swarm
|
||||
ENTRYPOINT ["/swarm", "-debug", "-experimental"]
|
||||
EOF
|
||||
docker build -t jpetazzo/swarm:$1 image
|
||||
docker login
|
||||
docker push jpetazzo/swarm:$1
|
||||
docker logout
|
||||
SWARM_IMAGE=jpetazzo/swarm:$1
|
||||
}
|
||||
|
||||
clean_1_containers () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker ps -aq | xargs -r -n1 -P10 docker rm -f"
|
||||
done
|
||||
}
|
||||
|
||||
clean_2_volumes () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker volume ls -q | xargs -r docker volume rm"
|
||||
done
|
||||
}
|
||||
|
||||
clean_3_images () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker images | awk '/dockercoins|jpetazzo/ {print \$1\":\"\$2}' | xargs -r docker rmi -f"
|
||||
done
|
||||
}
|
||||
|
||||
clean_4_machines () {
|
||||
rm -rf ~/.docker/machine/
|
||||
}
|
||||
|
||||
clean_all () {
|
||||
clean_1_containers
|
||||
clean_2_volumes
|
||||
clean_3_images
|
||||
clean_4_machines
|
||||
}
|
||||
|
||||
dm_swarm () {
|
||||
eval $(docker-machine env node1 --swarm)
|
||||
}
|
||||
dm_node1 () {
|
||||
eval $(docker-machine env node1)
|
||||
}
|
||||
|
||||
setup_1_swarm () {
|
||||
grep node[12345] /etc/hosts | grep -v ^127 |
|
||||
while read IPADDR NODENAME; do
|
||||
docker-machine create --driver generic \
|
||||
--engine-opt cluster-store=consul://localhost:8500 \
|
||||
--engine-opt cluster-advertise=eth0:2376 \
|
||||
--swarm --swarm-master --swarm-image $SWARM_IMAGE \
|
||||
--swarm-discovery consul://localhost:8500 \
|
||||
--swarm-opt replication --swarm-opt advertise=$IPADDR:3376 \
|
||||
--generic-ssh-user docker --generic-ip-address $IPADDR $NODENAME
|
||||
done
|
||||
}
|
||||
|
||||
setup_2_consul () {
|
||||
IPADDR=$(ssh node1 ip a ls dev eth0 |
|
||||
sed -n 's,.*inet \(.*\)/.*,\1,p')
|
||||
|
||||
for N in 1 2 3 4 5; do
|
||||
ssh node$N -- docker run -d --restart=always --name consul_node$N \
|
||||
-e CONSUL_BIND_INTERFACE=eth0 --net host consul \
|
||||
agent -server -retry-join $IPADDR -bootstrap-expect 5 \
|
||||
-ui -client 0.0.0.0
|
||||
done
|
||||
}
|
||||
|
||||
setup_3_wait () {
|
||||
# Wait for a Swarm master
|
||||
dm_swarm
|
||||
while ! docker ps; do sleep 1; done
|
||||
|
||||
# Wait for all nodes to be there
|
||||
while ! [ "$(docker info | grep "^Nodes:")" = "Nodes: 5" ]; do sleep 1; done
|
||||
}
|
||||
|
||||
setup_4_registry () {
|
||||
cd ~/orchestration-workshop/registry
|
||||
dm_swarm
|
||||
docker-compose up -d
|
||||
for N in $(seq 2 5); do
|
||||
docker-compose scale frontend=$N
|
||||
done
|
||||
}
|
||||
|
||||
setup_5_btp_dockercoins () {
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
dm_node1
|
||||
export DOCKER_REGISTRY=localhost:5000
|
||||
cp docker-compose.yml-v2 docker-compose.yml
|
||||
~/orchestration-workshop/bin/build-tag-push.py | tee /tmp/btp.log
|
||||
export $(tail -n 1 /tmp/btp.log)
|
||||
}
|
||||
|
||||
setup_6_add_lbs () {
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
~/orchestration-workshop/bin/add-load-balancer-v2.py rng
|
||||
~/orchestration-workshop/bin/add-load-balancer-v2.py hasher
|
||||
}
|
||||
|
||||
setup_7_consulfs () {
|
||||
dm_swarm
|
||||
docker pull jpetazzo/consulfs
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker run --rm -v /usr/local/bin:/target jpetazzo/consulfs"
|
||||
ssh node$N mkdir -p ~/consul
|
||||
ssh -f node$N "mountpoint ~/consul || consulfs localhost:8500 ~/consul"
|
||||
done
|
||||
}
|
||||
|
||||
setup_8_syncmachine () {
|
||||
while ! mountpoint ~/consul; do
|
||||
sleep 1
|
||||
done
|
||||
cp -r ~/.docker/machine ~/consul/
|
||||
for N in $(seq 2 5); do
|
||||
ssh node$N mkdir -p ~/.docker
|
||||
ssh node$N "[ -L ~/.docker/machine ] || ln -s ~/consul/machine ~/.docker"
|
||||
done
|
||||
}
|
||||
|
||||
setup_9_elk () {
|
||||
dm_swarm
|
||||
cd ~/orchestration-workshop/elk
|
||||
docker-compose up -d
|
||||
for N in $(seq 1 5); do
|
||||
docker-compose scale logstash=$N
|
||||
done
|
||||
}
|
||||
|
||||
setup_all () {
|
||||
setup_1_swarm
|
||||
setup_2_consul
|
||||
setup_3_wait
|
||||
setup_4_registry
|
||||
setup_5_btp_dockercoins
|
||||
setup_6_add_lbs
|
||||
setup_7_consulfs
|
||||
setup_8_syncmachine
|
||||
dm_swarm
|
||||
}
|
||||
|
||||
|
||||
force_remove_network () {
|
||||
dm_swarm
|
||||
NET="$1"
|
||||
for CNAME in $(docker network inspect $NET | grep Name | grep -v \"$NET\" | cut -d\" -f4); do
|
||||
echo $CNAME
|
||||
docker network disconnect -f $NET $CNAME
|
||||
done
|
||||
docker network rm $NET
|
||||
}
|
||||
|
||||
demo_1_compose_up () {
|
||||
dm_swarm
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
docker-compose up -d
|
||||
}
|
||||
|
||||
grep -qs -- MAGICMARKER "$0" && { # Don't display this line in the function lis
|
||||
echo "You should source this file, then invoke the following functions:"
|
||||
grep -- '^[a-z].*{$' "$0" | cut -d" " -f1
|
||||
}
|
||||
|
||||
show_swarm_primary () {
|
||||
dm_swarm
|
||||
docker info 2>/dev/null | grep -e ^Role -e ^Primary
|
||||
}
|
||||
Reference in New Issue
Block a user