Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60ce3882e8 | ||
|
|
3c6706ad03 | ||
|
|
bee3e763a9 |
3
.gitignore
vendored
@@ -7,5 +7,4 @@ prepare-vms/ips.pdf
|
||||
prepare-vms/settings.yaml
|
||||
prepare-vms/tags
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
node_modules
|
||||
autotest/nextstep
|
||||
|
||||
19
LICENSE
@@ -1,12 +1,13 @@
|
||||
The code in this repository is licensed under the Apache License
|
||||
Version 2.0. You may obtain a copy of this license at:
|
||||
Copyright 2015 Jérôme Petazzoni
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
The instructions and slides in this repository (e.g. the files
|
||||
with extension .md and .yml in the "slides" subdirectory) are
|
||||
under the Creative Commons Attribution 4.0 International Public
|
||||
License. You may obtain a copy of this license at:
|
||||
|
||||
https://creativecommons.org/licenses/by/4.0/legalcode
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
13
README.md
@@ -39,16 +39,14 @@ your own tutorials.
|
||||
All these materials have been gathered in a single repository
|
||||
because they have a few things in common:
|
||||
|
||||
- some [common slides](slides/common/) that are re-used
|
||||
(and updated) identically between different decks;
|
||||
- a [build system](slides/) generating HTML slides from
|
||||
Markdown source files;
|
||||
- a [semi-automated test harness](slides/autopilot/) to check
|
||||
that the exercises and examples provided work properly;
|
||||
- a [PhantomJS script](slides/slidechecker.js) to check
|
||||
that the slides look good and don't have formatting issues;
|
||||
- some [common slides](slides/common/) that are re-used
|
||||
(and updated) identically between different decks;
|
||||
- [deployment scripts](prepare-vms/) to start training
|
||||
VMs in bulk;
|
||||
- a [semi-automated test harness](autotest/) to check
|
||||
that the exercises and examples provided work properly;
|
||||
- a fancy pipeline powered by
|
||||
[Netlify](https://www.netlify.com/) and continuously
|
||||
deploying `master` to http://container.training/.
|
||||
@@ -76,6 +74,9 @@ a few other contributors. It is actively maintained.
|
||||
|
||||
## Repository structure
|
||||
|
||||
- [autotest](autotest/)
|
||||
- Semi-automated testing system to check that all the exercises
|
||||
in the slides work properly.
|
||||
- [bin](bin/)
|
||||
- A few helper scripts that you can safely ignore for now.
|
||||
- [dockercoins](dockercoins/)
|
||||
|
||||
229
autotest/autotest.py
Executable file
@@ -0,0 +1,229 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import uuid
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
TIMEOUT = 60 # 1 minute
|
||||
|
||||
|
||||
def hrule():
|
||||
return "="*int(subprocess.check_output(["tput", "cols"]))
|
||||
|
||||
# A "snippet" is something that the user is supposed to do in the workshop.
|
||||
# Most of the "snippets" are shell commands.
|
||||
# Some of them can be key strokes or other actions.
|
||||
# In the markdown source, they are the code sections (identified by triple-
|
||||
# quotes) within .exercise[] sections.
|
||||
|
||||
class Snippet(object):
|
||||
|
||||
def __init__(self, slide, content):
|
||||
self.slide = slide
|
||||
self.content = content
|
||||
self.actions = []
|
||||
|
||||
def __str__(self):
|
||||
return self.content
|
||||
|
||||
|
||||
class Slide(object):
|
||||
|
||||
current_slide = 0
|
||||
|
||||
def __init__(self, content):
|
||||
Slide.current_slide += 1
|
||||
self.number = Slide.current_slide
|
||||
|
||||
# Remove commented-out slides
|
||||
# (remark.js considers ??? to be the separator for speaker notes)
|
||||
content = re.split("\n\?\?\?\n", content)[0]
|
||||
self.content = content
|
||||
|
||||
self.snippets = []
|
||||
exercises = re.findall("\.exercise\[(.*)\]", content, re.DOTALL)
|
||||
for exercise in exercises:
|
||||
if "```" in exercise:
|
||||
for snippet in exercise.split("```")[1::2]:
|
||||
self.snippets.append(Snippet(self, snippet))
|
||||
else:
|
||||
logging.warning("Exercise on slide {} does not have any ``` snippet."
|
||||
.format(self.number))
|
||||
self.debug()
|
||||
|
||||
def __str__(self):
|
||||
text = self.content
|
||||
for snippet in self.snippets:
|
||||
text = text.replace(snippet.content, ansi("7")(snippet.content))
|
||||
return text
|
||||
|
||||
def debug(self):
|
||||
logging.debug("\n{}\n{}\n{}".format(hrule(), self.content, hrule()))
|
||||
|
||||
|
||||
def ansi(code):
|
||||
return lambda s: "\x1b[{}m{}\x1b[0m".format(code, s)
|
||||
|
||||
|
||||
def wait_for_string(s):
|
||||
logging.debug("Waiting for string: {}".format(s))
|
||||
deadline = time.time() + TIMEOUT
|
||||
while time.time() < deadline:
|
||||
output = capture_pane()
|
||||
if s in output:
|
||||
return
|
||||
time.sleep(1)
|
||||
raise Exception("Timed out while waiting for {}!".format(s))
|
||||
|
||||
|
||||
def wait_for_prompt():
|
||||
logging.debug("Waiting for prompt.")
|
||||
deadline = time.time() + TIMEOUT
|
||||
while time.time() < deadline:
|
||||
output = capture_pane()
|
||||
# If we are not at the bottom of the screen, there will be a bunch of extra \n's
|
||||
output = output.rstrip('\n')
|
||||
if output[-2:] == "\n$":
|
||||
return
|
||||
time.sleep(1)
|
||||
raise Exception("Timed out while waiting for prompt!")
|
||||
|
||||
|
||||
def check_exit_status():
|
||||
token = uuid.uuid4().hex
|
||||
data = "echo {} $?\n".format(token)
|
||||
logging.debug("Sending {!r} to get exit status.".format(data))
|
||||
send_keys(data)
|
||||
time.sleep(0.5)
|
||||
wait_for_prompt()
|
||||
screen = capture_pane()
|
||||
status = re.findall("\n{} ([0-9]+)\n".format(token), screen, re.MULTILINE)
|
||||
logging.debug("Got exit status: {}.".format(status))
|
||||
if len(status) == 0:
|
||||
raise Exception("Couldn't retrieve status code {}. Timed out?".format(token))
|
||||
if len(status) > 1:
|
||||
raise Exception("More than one status code {}. I'm seeing double! Shoot them both.".format(token))
|
||||
code = int(status[0])
|
||||
if code != 0:
|
||||
raise Exception("Non-zero exit status: {}.".format(code))
|
||||
# Otherwise just return peacefully.
|
||||
|
||||
|
||||
slides = []
|
||||
content = open(sys.argv[1]).read()
|
||||
for slide in re.split("\n---?\n", content):
|
||||
slides.append(Slide(slide))
|
||||
|
||||
actions = []
|
||||
for slide in slides:
|
||||
for snippet in slide.snippets:
|
||||
content = snippet.content
|
||||
# Extract the "method" (e.g. bash, keys, ...)
|
||||
# On multi-line snippets, the method is alone on the first line
|
||||
# On single-line snippets, the data follows the method immediately
|
||||
if '\n' in content:
|
||||
method, data = content.split('\n', 1)
|
||||
else:
|
||||
method, data = content.split(' ', 1)
|
||||
actions.append((slide, snippet, method, data))
|
||||
|
||||
|
||||
def send_keys(data):
|
||||
subprocess.check_call(["tmux", "send-keys", data])
|
||||
|
||||
def capture_pane():
|
||||
return subprocess.check_output(["tmux", "capture-pane", "-p"])
|
||||
|
||||
|
||||
try:
|
||||
i = int(open("nextstep").read())
|
||||
logging.info("Loaded next step ({}) from file.".format(i))
|
||||
except Exception as e:
|
||||
logging.warning("Could not read nextstep file ({}), initializing to 0.".format(e))
|
||||
i = 0
|
||||
|
||||
interactive = True
|
||||
|
||||
while i < len(actions):
|
||||
with open("nextstep", "w") as f:
|
||||
f.write(str(i))
|
||||
slide, snippet, method, data = actions[i]
|
||||
|
||||
# Remove extra spaces (we don't want them in the terminal) and carriage returns
|
||||
data = data.strip()
|
||||
|
||||
print(hrule())
|
||||
print(slide.content.replace(snippet.content, ansi(7)(snippet.content)))
|
||||
print(hrule())
|
||||
if interactive:
|
||||
print("[{}/{}] Shall we execute that snippet above?".format(i, len(actions)))
|
||||
print("(ENTER to execute, 'c' to continue until next error, N to jump to step #N)")
|
||||
command = raw_input("> ")
|
||||
else:
|
||||
command = ""
|
||||
|
||||
# For now, remove the `highlighted` sections
|
||||
# (Make sure to use $() in shell snippets!)
|
||||
if '`' in data:
|
||||
logging.info("Stripping ` from snippet.")
|
||||
data = data.replace('`', '')
|
||||
|
||||
if command == "c":
|
||||
# continue until next timeout
|
||||
interactive = False
|
||||
elif command.isdigit():
|
||||
i = int(command)
|
||||
elif command == "":
|
||||
logging.info("Running with method {}: {}".format(method, data))
|
||||
if method == "keys":
|
||||
send_keys(data)
|
||||
elif method == "bash":
|
||||
# Make sure that we're ready
|
||||
wait_for_prompt()
|
||||
# Strip leading spaces
|
||||
data = re.sub("\n +", "\n", data)
|
||||
# Add "RETURN" at the end of the command :)
|
||||
data += "\n"
|
||||
# Send command
|
||||
send_keys(data)
|
||||
# Force a short sleep to avoid race condition
|
||||
time.sleep(0.5)
|
||||
_, _, next_method, next_data = actions[i+1]
|
||||
if next_method == "wait":
|
||||
wait_for_string(next_data)
|
||||
else:
|
||||
wait_for_prompt()
|
||||
# Verify return code FIXME should be optional
|
||||
check_exit_status()
|
||||
elif method == "copypaste":
|
||||
screen = capture_pane()
|
||||
matches = re.findall(data, screen, flags=re.DOTALL)
|
||||
if len(matches) == 0:
|
||||
raise Exception("Could not find regex {} in output.".format(data))
|
||||
# Arbitrarily get the most recent match
|
||||
match = matches[-1]
|
||||
# Remove line breaks (like a screen copy paste would do)
|
||||
match = match.replace('\n', '')
|
||||
send_keys(match + '\n')
|
||||
# FIXME: we should factor out the "bash" method
|
||||
wait_for_prompt()
|
||||
check_exit_status()
|
||||
else:
|
||||
logging.warning("Unknown method {}: {!r}".format(method, data))
|
||||
i += 1
|
||||
|
||||
else:
|
||||
i += 1
|
||||
logging.warning("Unknown command {}, skipping to next step.".format(command))
|
||||
|
||||
# Reset slide counter
|
||||
with open("nextstep", "w") as f:
|
||||
f.write(str(0))
|
||||
@@ -12,9 +12,7 @@
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_swarm -%}
|
||||
{%- set image_src = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
Put your initials in the first column to "claim" a cluster.
|
||||
Initials{% for node in clusters[0] %} node{{ loop.index }}{% endfor %}
|
||||
{% for cluster in clusters -%}
|
||||
{%- for node in cluster %} {{ node|trim }}{% endfor %}
|
||||
{% endfor %}
|
||||
|
Can't render this file because it contains an unexpected character in line 1 and column 42.
|
@@ -1,21 +0,0 @@
|
||||
#!/bin/sh
|
||||
if [ $(whoami) != ubuntu ]; then
|
||||
echo "This script should be executed on a freshly deployed node,"
|
||||
echo "with the 'ubuntu' user. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
if id docker; then
|
||||
sudo userdel -r docker
|
||||
fi
|
||||
pip install --user awscli jinja2 pdfkit
|
||||
sudo apt-get install -y wkhtmltopdf xvfb
|
||||
tmux new-session \; send-keys "
|
||||
[ -f ~/.ssh/id_rsa ] || ssh-keygen
|
||||
|
||||
eval \$(ssh-agent)
|
||||
ssh-add
|
||||
Xvfb :0 &
|
||||
export DISPLAY=:0
|
||||
mkdir -p ~/www
|
||||
sudo docker run -d -p 80:80 -v \$HOME/www:/usr/share/nginx/html nginx
|
||||
"
|
||||
@@ -1,5 +0,0 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 5
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: clusters.csv
|
||||
@@ -20,5 +20,5 @@ paper_margin: 0.2in
|
||||
engine_version: test
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.17.1
|
||||
machine_version: 0.13.0
|
||||
compose_version: 1.16.1
|
||||
machine_version: 0.12.0
|
||||
|
||||
@@ -20,5 +20,5 @@ paper_margin: 0.2in
|
||||
engine_version: test
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.17.1
|
||||
machine_version: 0.13.0
|
||||
compose_version: 1.16.1
|
||||
machine_version: 0.12.0
|
||||
|
||||
@@ -20,7 +20,7 @@ DEPENDENCIES="
|
||||
ssh
|
||||
curl
|
||||
jq
|
||||
parallel-ssh
|
||||
pssh
|
||||
wkhtmltopdf
|
||||
man
|
||||
"
|
||||
@@ -38,7 +38,7 @@ check_envvars() {
|
||||
if [ -z "${!envvar}" ]; then
|
||||
error "Environment variable $envvar is not set."
|
||||
if [ "$envvar" = "SSH_AUTH_SOCK" ]; then
|
||||
error "Hint: run 'eval \$(ssh-agent) ; ssh-add' and try again?"
|
||||
error "Hint: run '\$(ssh-agent) ; ssh-add' and try again?"
|
||||
fi
|
||||
status=1
|
||||
fi
|
||||
|
||||
@@ -1 +1 @@
|
||||
/ /swarm-video.yml.html 200!
|
||||
/ /dockercon.yml.html 200!
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
logging.basicConfig(level=os.environ.get("LOG_LEVEL", "INFO"))
|
||||
|
||||
filename = sys.argv[1]
|
||||
|
||||
logging.info("Checking file {}...".format(filename))
|
||||
text = subprocess.check_output(["./slidechecker.js", filename])
|
||||
html = open(filename).read()
|
||||
html = html.replace("</textarea>", "\n---\n```\n{}\n```\n</textarea>".format(text))
|
||||
|
||||
open(filename, "w").write(html)
|
||||
@@ -1,405 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
import click
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
import yaml
|
||||
|
||||
|
||||
logging.basicConfig(level=os.environ.get("LOG_LEVEL", "INFO"))
|
||||
|
||||
|
||||
TIMEOUT = 60 # 1 minute
|
||||
|
||||
|
||||
class State(object):
|
||||
|
||||
def __init__(self):
|
||||
self.interactive = True
|
||||
self.verify_status = False
|
||||
self.simulate_type = True
|
||||
self.next_step = 0
|
||||
|
||||
def load(self):
|
||||
data = yaml.load(open("state.yaml"))
|
||||
self.interactive = bool(data["interactive"])
|
||||
self.verify_status = bool(data["verify_status"])
|
||||
self.simulate_type = bool(data["simulate_type"])
|
||||
self.next_step = int(data["next_step"])
|
||||
|
||||
def save(self):
|
||||
with open("state.yaml", "w") as f:
|
||||
yaml.dump(dict(
|
||||
interactive=self.interactive,
|
||||
verify_status=self.verify_status,
|
||||
simulate_type=self.simulate_type,
|
||||
next_step=self.next_step), f, default_flow_style=False)
|
||||
|
||||
|
||||
def hrule():
|
||||
return "="*int(subprocess.check_output(["tput", "cols"]))
|
||||
|
||||
# A "snippet" is something that the user is supposed to do in the workshop.
|
||||
# Most of the "snippets" are shell commands.
|
||||
# Some of them can be key strokes or other actions.
|
||||
# In the markdown source, they are the code sections (identified by triple-
|
||||
# quotes) within .exercise[] sections.
|
||||
|
||||
class Snippet(object):
|
||||
|
||||
def __init__(self, slide, content):
|
||||
self.slide = slide
|
||||
self.content = content
|
||||
self.actions = []
|
||||
|
||||
def __str__(self):
|
||||
return self.content
|
||||
|
||||
|
||||
class Slide(object):
|
||||
|
||||
current_slide = 0
|
||||
|
||||
def __init__(self, content):
|
||||
Slide.current_slide += 1
|
||||
self.number = Slide.current_slide
|
||||
|
||||
# Remove commented-out slides
|
||||
# (remark.js considers ??? to be the separator for speaker notes)
|
||||
content = re.split("\n\?\?\?\n", content)[0]
|
||||
self.content = content
|
||||
|
||||
self.snippets = []
|
||||
exercises = re.findall("\.exercise\[(.*)\]", content, re.DOTALL)
|
||||
for exercise in exercises:
|
||||
if "```" in exercise:
|
||||
for snippet in exercise.split("```")[1::2]:
|
||||
self.snippets.append(Snippet(self, snippet))
|
||||
else:
|
||||
logging.warning("Exercise on slide {} does not have any ``` snippet."
|
||||
.format(self.number))
|
||||
self.debug()
|
||||
|
||||
def __str__(self):
|
||||
text = self.content
|
||||
for snippet in self.snippets:
|
||||
text = text.replace(snippet.content, ansi("7")(snippet.content))
|
||||
return text
|
||||
|
||||
def debug(self):
|
||||
logging.debug("\n{}\n{}\n{}".format(hrule(), self.content, hrule()))
|
||||
|
||||
# Synchronize slides in a remote browser
|
||||
class Remote(object):
|
||||
|
||||
def __init__(self):
|
||||
self.slide_on_screen = 0
|
||||
|
||||
# Directly go to a specific slide
|
||||
def goto(self, slide_number):
|
||||
subprocess.check_call(["./gotoslide.js", str(slide_number)])
|
||||
self.slide_on_screen = slide_number
|
||||
focus_slides()
|
||||
|
||||
# Offer the opportunity to go step by step to the given slide
|
||||
def catchup(self, slide_number):
|
||||
if self.slide_on_screen > slide_number:
|
||||
return self.goto(slide_number)
|
||||
while self.slide_on_screen < slide_number:
|
||||
if state.interactive:
|
||||
click.clear()
|
||||
print("Catching up on slide: {} -> {}"
|
||||
.format(self.slide_on_screen, slide_number))
|
||||
print("z/⏎ Zoom to target slide")
|
||||
print("n/→/⎵ Next slide")
|
||||
print("p/← Previous slide")
|
||||
print("q Abort remote control")
|
||||
command = click.getchar()
|
||||
else:
|
||||
command = "z"
|
||||
if command in ("z", "\r"):
|
||||
self.goto(slide_number)
|
||||
elif command in ("n", "\x1b[C", " "):
|
||||
self.goto(self.slide_on_screen+1)
|
||||
elif command in ("p", "\x1b[D"):
|
||||
self.goto(self.slide_on_screen-1)
|
||||
elif command == "q":
|
||||
return
|
||||
|
||||
|
||||
def focus_slides():
|
||||
subprocess.check_output(["i3-msg", "workspace", "3"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
def focus_terminal():
|
||||
subprocess.check_output(["i3-msg", "workspace", "2"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
def focus_browser():
|
||||
subprocess.check_output(["i3-msg", "workspace", "4"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
|
||||
remote = Remote()
|
||||
state = State()
|
||||
|
||||
|
||||
def ansi(code):
|
||||
return lambda s: "\x1b[{}m{}\x1b[0m".format(code, s)
|
||||
|
||||
|
||||
def wait_for_string(s, timeout=TIMEOUT):
|
||||
logging.debug("Waiting for string: {}".format(s))
|
||||
deadline = time.time() + timeout
|
||||
while time.time() < deadline:
|
||||
output = capture_pane()
|
||||
if s in output:
|
||||
return
|
||||
time.sleep(1)
|
||||
raise Exception("Timed out while waiting for {}!".format(s))
|
||||
|
||||
|
||||
def wait_for_prompt():
|
||||
logging.debug("Waiting for prompt.")
|
||||
deadline = time.time() + TIMEOUT
|
||||
while time.time() < deadline:
|
||||
output = capture_pane()
|
||||
# If we are not at the bottom of the screen, there will be a bunch of extra \n's
|
||||
output = output.rstrip('\n')
|
||||
if output.endswith("\n$"):
|
||||
return
|
||||
if output.endswith("\n/ #"):
|
||||
return
|
||||
time.sleep(1)
|
||||
raise Exception("Timed out while waiting for prompt!")
|
||||
|
||||
|
||||
def check_exit_status():
|
||||
if not state.verify_status:
|
||||
return
|
||||
token = uuid.uuid4().hex
|
||||
data = "echo {} $?\n".format(token)
|
||||
logging.debug("Sending {!r} to get exit status.".format(data))
|
||||
send_keys(data)
|
||||
time.sleep(0.5)
|
||||
wait_for_prompt()
|
||||
screen = capture_pane()
|
||||
status = re.findall("\n{} ([0-9]+)\n".format(token), screen, re.MULTILINE)
|
||||
logging.debug("Got exit status: {}.".format(status))
|
||||
if len(status) == 0:
|
||||
raise Exception("Couldn't retrieve status code {}. Timed out?".format(token))
|
||||
if len(status) > 1:
|
||||
raise Exception("More than one status code {}. I'm seeing double! Shoot them both.".format(token))
|
||||
code = int(status[0])
|
||||
if code != 0:
|
||||
raise Exception("Non-zero exit status: {}.".format(code))
|
||||
# Otherwise just return peacefully.
|
||||
|
||||
|
||||
def setup_tmux_and_ssh():
|
||||
if subprocess.call(["tmux", "has-session"]):
|
||||
logging.error("Couldn't connect to tmux. Please setup tmux first.")
|
||||
ipaddr = open("../../prepare-vms/ips.txt").read().split("\n")[0]
|
||||
uid = os.getuid()
|
||||
|
||||
raise Exception("""
|
||||
1. If you're running this directly from a node:
|
||||
|
||||
tmux
|
||||
|
||||
2. If you want to control a remote tmux:
|
||||
|
||||
rm -f /tmp/tmux-{uid}/default && ssh -t -L /tmp/tmux-{uid}/default:/tmp/tmux-1001/default docker@{ipaddr} tmux
|
||||
|
||||
3. If you cannot control a remote tmux:
|
||||
|
||||
tmux new-session ssh docker@{ipaddr}
|
||||
""".format(uid=uid, ipaddr=ipaddr))
|
||||
else:
|
||||
logging.info("Found tmux session. Trying to acquire shell prompt.")
|
||||
wait_for_prompt()
|
||||
logging.info("Successfully connected to test cluster in tmux session.")
|
||||
|
||||
|
||||
|
||||
slides = []
|
||||
content = open(sys.argv[1]).read()
|
||||
|
||||
# OK, this part is definitely hackish, and will break if the
|
||||
# excludedClasses parameter is not on a single line.
|
||||
excluded_classes = re.findall("excludedClasses: (\[.*\])", content)
|
||||
excluded_classes = set(eval(excluded_classes[0]))
|
||||
|
||||
for slide in re.split("\n---?\n", content):
|
||||
slide_classes = re.findall("class: (.*)", slide)
|
||||
if slide_classes:
|
||||
slide_classes = slide_classes[0].split(",")
|
||||
slide_classes = [c.strip() for c in slide_classes]
|
||||
if excluded_classes & set(slide_classes):
|
||||
logging.info("Skipping excluded slide.")
|
||||
continue
|
||||
slides.append(Slide(slide))
|
||||
|
||||
actions = []
|
||||
for slide in slides:
|
||||
for snippet in slide.snippets:
|
||||
content = snippet.content
|
||||
# Extract the "method" (e.g. bash, keys, ...)
|
||||
# On multi-line snippets, the method is alone on the first line
|
||||
# On single-line snippets, the data follows the method immediately
|
||||
if '\n' in content:
|
||||
method, data = content.split('\n', 1)
|
||||
else:
|
||||
method, data = content.split(' ', 1)
|
||||
actions.append((slide, snippet, method, data))
|
||||
|
||||
|
||||
def send_keys(data):
|
||||
if state.simulate_type and data[0] != '^':
|
||||
for key in data:
|
||||
if key == ";":
|
||||
key = "\\;"
|
||||
if key == "\n":
|
||||
time.sleep(1)
|
||||
subprocess.check_call(["tmux", "send-keys", key])
|
||||
time.sleep(0.2*random.random())
|
||||
if key == "\n":
|
||||
time.sleep(1)
|
||||
else:
|
||||
subprocess.check_call(["tmux", "send-keys", data])
|
||||
|
||||
def capture_pane():
|
||||
return subprocess.check_output(["tmux", "capture-pane", "-p"]).decode('utf-8')
|
||||
|
||||
|
||||
setup_tmux_and_ssh()
|
||||
|
||||
|
||||
try:
|
||||
state.load()
|
||||
logging.info("Successfully loaded state from file.")
|
||||
# Let's override the starting state, so that when an error occurs,
|
||||
# we can restart the auto-tester and then single-step or debug.
|
||||
# (Instead of running again through the same issue immediately.)
|
||||
state.interactive = True
|
||||
except Exception as e:
|
||||
logging.exception("Could not load state from file.")
|
||||
logging.warning("Using default values.")
|
||||
|
||||
|
||||
while state.next_step < len(actions):
|
||||
state.save()
|
||||
|
||||
slide, snippet, method, data = actions[state.next_step]
|
||||
|
||||
# Remove extra spaces (we don't want them in the terminal) and carriage returns
|
||||
data = data.strip()
|
||||
|
||||
# Synchronize the remote slides
|
||||
remote.catchup(slide.number)
|
||||
|
||||
click.clear()
|
||||
print(hrule())
|
||||
print(slide.content.replace(snippet.content, ansi(7)(snippet.content)))
|
||||
print(hrule())
|
||||
if state.interactive:
|
||||
print("simulate_type:{} verify_status:{}".format(state.simulate_type, state.verify_status))
|
||||
print("[{}/{}] Shall we execute that snippet above?".format(state.next_step, len(actions)))
|
||||
print("y/⎵/⏎ Execute snippet")
|
||||
print("p/← Previous snippet")
|
||||
print("n/→ Next snippet")
|
||||
print("s Simulate keystrokes")
|
||||
print("v Validate exit status")
|
||||
print("g Go to a specific snippet")
|
||||
print("q Quit")
|
||||
print("c Continue non-interactively until next error")
|
||||
command = click.getchar()
|
||||
else:
|
||||
command = "y"
|
||||
|
||||
# For now, remove the `highlighted` sections
|
||||
# (Make sure to use $() in shell snippets!)
|
||||
if '`' in data:
|
||||
logging.info("Stripping ` from snippet.")
|
||||
data = data.replace('`', '')
|
||||
|
||||
if command in ("n", "\x1b[C"):
|
||||
state.next_step += 1
|
||||
elif command in ("p", "\x1b[D"):
|
||||
state.next_step -= 1
|
||||
elif command == "s":
|
||||
state.simulate_type = not state.simulate_type
|
||||
elif command == "v":
|
||||
state.verify_status = not state.verify_status
|
||||
elif command == "g":
|
||||
state.next_step = click.prompt("Enter snippet number", type=int)
|
||||
# Special case: if we go to snippet 0, also reset the slides deck
|
||||
if state.next_step == 0:
|
||||
remote.goto(1)
|
||||
elif command == "q":
|
||||
break
|
||||
elif command == "c":
|
||||
# continue until next timeout
|
||||
state.interactive = False
|
||||
elif command in ("y", "\r", " "):
|
||||
focus_terminal()
|
||||
logging.info("Running with method {}: {}".format(method, data))
|
||||
if method == "keys":
|
||||
send_keys(data)
|
||||
elif method == "bash":
|
||||
# Make sure that we're ready
|
||||
wait_for_prompt()
|
||||
# Strip leading spaces
|
||||
data = re.sub("\n +", "\n", data)
|
||||
# Add "RETURN" at the end of the command :)
|
||||
data += "\n"
|
||||
# Send command
|
||||
send_keys(data)
|
||||
# Force a short sleep to avoid race condition
|
||||
time.sleep(0.5)
|
||||
_, _, next_method, next_data = actions[state.next_step+1]
|
||||
if next_method == "wait":
|
||||
wait_for_string(next_data)
|
||||
elif next_method == "longwait":
|
||||
wait_for_string(next_data, 10*TIMEOUT)
|
||||
else:
|
||||
wait_for_prompt()
|
||||
# Verify return code FIXME should be optional
|
||||
check_exit_status()
|
||||
elif method == "copypaste":
|
||||
screen = capture_pane()
|
||||
matches = re.findall(data, screen, flags=re.DOTALL)
|
||||
if len(matches) == 0:
|
||||
raise Exception("Could not find regex {} in output.".format(data))
|
||||
# Arbitrarily get the most recent match
|
||||
match = matches[-1]
|
||||
# Remove line breaks (like a screen copy paste would do)
|
||||
match = match.replace('\n', '')
|
||||
send_keys(match + '\n')
|
||||
# FIXME: we should factor out the "bash" method
|
||||
wait_for_prompt()
|
||||
check_exit_status()
|
||||
elif method == "open":
|
||||
# Cheap way to get node1's IP address
|
||||
screen = capture_pane()
|
||||
ipaddr = re.findall("^\[(.*)\]", screen, re.MULTILINE)[-1]
|
||||
url = data.replace("/node1", "/{}".format(ipaddr))
|
||||
# This should probably be adapted to run on different OS
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
if state.interactive:
|
||||
print("Press any key to continue to next step...")
|
||||
click.getchar()
|
||||
else:
|
||||
logging.warning("Unknown method {}: {!r}".format(method, data))
|
||||
state.next_step += 1
|
||||
|
||||
else:
|
||||
logging.warning("Unknown command {}.".format(command))
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/* Expects a slide number as first argument.
|
||||
* Will connect to the local pub/sub server,
|
||||
* and issue a "go to slide X" command, which
|
||||
* will be sent to all connected browsers.
|
||||
*/
|
||||
|
||||
var io = require('socket.io-client');
|
||||
var socket = io('http://localhost:3000');
|
||||
socket.on('connect_error', function(){
|
||||
console.log('connection error');
|
||||
socket.close();
|
||||
});
|
||||
socket.emit('slide change', process.argv[2], function(){
|
||||
socket.close();
|
||||
});
|
||||
603
slides/autopilot/package-lock.json
generated
@@ -1,603 +0,0 @@
|
||||
{
|
||||
"name": "container-training-pub-sub-server",
|
||||
"version": "0.0.1",
|
||||
"lockfileVersion": 1,
|
||||
"requires": true,
|
||||
"dependencies": {
|
||||
"accepts": {
|
||||
"version": "1.3.4",
|
||||
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.4.tgz",
|
||||
"integrity": "sha1-hiRnWMfdbSGmR0/whKR0DsBesh8=",
|
||||
"requires": {
|
||||
"mime-types": "2.1.17",
|
||||
"negotiator": "0.6.1"
|
||||
}
|
||||
},
|
||||
"after": {
|
||||
"version": "0.8.2",
|
||||
"resolved": "https://registry.npmjs.org/after/-/after-0.8.2.tgz",
|
||||
"integrity": "sha1-/ts5T58OAqqXaOcCvaI7UF+ufh8="
|
||||
},
|
||||
"array-flatten": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
|
||||
"integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
|
||||
},
|
||||
"arraybuffer.slice": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz",
|
||||
"integrity": "sha1-8zshWfBTKj8xB6JywMz70a0peco="
|
||||
},
|
||||
"async-limiter": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz",
|
||||
"integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg=="
|
||||
},
|
||||
"backo2": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/backo2/-/backo2-1.0.2.tgz",
|
||||
"integrity": "sha1-MasayLEpNjRj41s+u2n038+6eUc="
|
||||
},
|
||||
"base64-arraybuffer": {
|
||||
"version": "0.1.5",
|
||||
"resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz",
|
||||
"integrity": "sha1-c5JncZI7Whl0etZmqlzUv5xunOg="
|
||||
},
|
||||
"base64id": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/base64id/-/base64id-1.0.0.tgz",
|
||||
"integrity": "sha1-R2iMuZu2gE8OBtPnY7HDLlfY5rY="
|
||||
},
|
||||
"better-assert": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/better-assert/-/better-assert-1.0.2.tgz",
|
||||
"integrity": "sha1-QIZrnhueC1W0gYlDEeaPr/rrxSI=",
|
||||
"requires": {
|
||||
"callsite": "1.0.0"
|
||||
}
|
||||
},
|
||||
"blob": {
|
||||
"version": "0.0.4",
|
||||
"resolved": "https://registry.npmjs.org/blob/-/blob-0.0.4.tgz",
|
||||
"integrity": "sha1-vPEwUspURj8w+fx+lbmkdjCpSSE="
|
||||
},
|
||||
"body-parser": {
|
||||
"version": "1.18.2",
|
||||
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.2.tgz",
|
||||
"integrity": "sha1-h2eKGdhLR9hZuDGZvVm84iKxBFQ=",
|
||||
"requires": {
|
||||
"bytes": "3.0.0",
|
||||
"content-type": "1.0.4",
|
||||
"debug": "2.6.9",
|
||||
"depd": "1.1.1",
|
||||
"http-errors": "1.6.2",
|
||||
"iconv-lite": "0.4.19",
|
||||
"on-finished": "2.3.0",
|
||||
"qs": "6.5.1",
|
||||
"raw-body": "2.3.2",
|
||||
"type-is": "1.6.15"
|
||||
}
|
||||
},
|
||||
"bytes": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
|
||||
"integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
|
||||
},
|
||||
"callsite": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/callsite/-/callsite-1.0.0.tgz",
|
||||
"integrity": "sha1-KAOY5dZkvXQDi28JBRU+borxvCA="
|
||||
},
|
||||
"component-bind": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/component-bind/-/component-bind-1.0.0.tgz",
|
||||
"integrity": "sha1-AMYIq33Nk4l8AAllGx06jh5zu9E="
|
||||
},
|
||||
"component-emitter": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz",
|
||||
"integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY="
|
||||
},
|
||||
"component-inherit": {
|
||||
"version": "0.0.3",
|
||||
"resolved": "https://registry.npmjs.org/component-inherit/-/component-inherit-0.0.3.tgz",
|
||||
"integrity": "sha1-ZF/ErfWLcrZJ1crmUTVhnbJv8UM="
|
||||
},
|
||||
"content-disposition": {
|
||||
"version": "0.5.2",
|
||||
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
|
||||
"integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ="
|
||||
},
|
||||
"content-type": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
|
||||
"integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
|
||||
},
|
||||
"cookie": {
|
||||
"version": "0.3.1",
|
||||
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz",
|
||||
"integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s="
|
||||
},
|
||||
"cookie-signature": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
|
||||
"integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
|
||||
},
|
||||
"debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"depd": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/depd/-/depd-1.1.1.tgz",
|
||||
"integrity": "sha1-V4O04cRZ8G+lyif5kfPQbnoxA1k="
|
||||
},
|
||||
"destroy": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
|
||||
"integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
|
||||
},
|
||||
"ee-first": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
|
||||
"integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
|
||||
},
|
||||
"encodeurl": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.1.tgz",
|
||||
"integrity": "sha1-eePVhlU0aQn+bw9Fpd5oEDspTSA="
|
||||
},
|
||||
"engine.io": {
|
||||
"version": "3.1.4",
|
||||
"resolved": "https://registry.npmjs.org/engine.io/-/engine.io-3.1.4.tgz",
|
||||
"integrity": "sha1-PQIRtwpVLOhB/8fahiezAamkFi4=",
|
||||
"requires": {
|
||||
"accepts": "1.3.3",
|
||||
"base64id": "1.0.0",
|
||||
"cookie": "0.3.1",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-parser": "2.1.1",
|
||||
"uws": "0.14.5",
|
||||
"ws": "3.3.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"accepts": {
|
||||
"version": "1.3.3",
|
||||
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.3.tgz",
|
||||
"integrity": "sha1-w8p0NJOGSMPg2cHjKN1otiLChMo=",
|
||||
"requires": {
|
||||
"mime-types": "2.1.17",
|
||||
"negotiator": "0.6.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"engine.io-client": {
|
||||
"version": "3.1.4",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-3.1.4.tgz",
|
||||
"integrity": "sha1-T88TcLRxY70s6b4nM5ckMDUNTqE=",
|
||||
"requires": {
|
||||
"component-emitter": "1.2.1",
|
||||
"component-inherit": "0.0.3",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-parser": "2.1.1",
|
||||
"has-cors": "1.1.0",
|
||||
"indexof": "0.0.1",
|
||||
"parseqs": "0.0.5",
|
||||
"parseuri": "0.0.5",
|
||||
"ws": "3.3.3",
|
||||
"xmlhttprequest-ssl": "1.5.4",
|
||||
"yeast": "0.1.2"
|
||||
}
|
||||
},
|
||||
"engine.io-parser": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-2.1.1.tgz",
|
||||
"integrity": "sha1-4Ps/DgRi9/WLt3waUun1p+JuRmg=",
|
||||
"requires": {
|
||||
"after": "0.8.2",
|
||||
"arraybuffer.slice": "0.0.6",
|
||||
"base64-arraybuffer": "0.1.5",
|
||||
"blob": "0.0.4",
|
||||
"has-binary2": "1.0.2"
|
||||
}
|
||||
},
|
||||
"escape-html": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
|
||||
"integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
|
||||
},
|
||||
"etag": {
|
||||
"version": "1.8.1",
|
||||
"resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
|
||||
"integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
|
||||
},
|
||||
"express": {
|
||||
"version": "4.16.2",
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.16.2.tgz",
|
||||
"integrity": "sha1-41xt/i1kt9ygpc1PIXgb4ymeB2w=",
|
||||
"requires": {
|
||||
"accepts": "1.3.4",
|
||||
"array-flatten": "1.1.1",
|
||||
"body-parser": "1.18.2",
|
||||
"content-disposition": "0.5.2",
|
||||
"content-type": "1.0.4",
|
||||
"cookie": "0.3.1",
|
||||
"cookie-signature": "1.0.6",
|
||||
"debug": "2.6.9",
|
||||
"depd": "1.1.1",
|
||||
"encodeurl": "1.0.1",
|
||||
"escape-html": "1.0.3",
|
||||
"etag": "1.8.1",
|
||||
"finalhandler": "1.1.0",
|
||||
"fresh": "0.5.2",
|
||||
"merge-descriptors": "1.0.1",
|
||||
"methods": "1.1.2",
|
||||
"on-finished": "2.3.0",
|
||||
"parseurl": "1.3.2",
|
||||
"path-to-regexp": "0.1.7",
|
||||
"proxy-addr": "2.0.2",
|
||||
"qs": "6.5.1",
|
||||
"range-parser": "1.2.0",
|
||||
"safe-buffer": "5.1.1",
|
||||
"send": "0.16.1",
|
||||
"serve-static": "1.13.1",
|
||||
"setprototypeof": "1.1.0",
|
||||
"statuses": "1.3.1",
|
||||
"type-is": "1.6.15",
|
||||
"utils-merge": "1.0.1",
|
||||
"vary": "1.1.2"
|
||||
}
|
||||
},
|
||||
"finalhandler": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.0.tgz",
|
||||
"integrity": "sha1-zgtoVbRYU+eRsvzGgARtiCU91/U=",
|
||||
"requires": {
|
||||
"debug": "2.6.9",
|
||||
"encodeurl": "1.0.1",
|
||||
"escape-html": "1.0.3",
|
||||
"on-finished": "2.3.0",
|
||||
"parseurl": "1.3.2",
|
||||
"statuses": "1.3.1",
|
||||
"unpipe": "1.0.0"
|
||||
}
|
||||
},
|
||||
"forwarded": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
|
||||
"integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
|
||||
},
|
||||
"fresh": {
|
||||
"version": "0.5.2",
|
||||
"resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
|
||||
"integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
|
||||
},
|
||||
"has-binary2": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-binary2/-/has-binary2-1.0.2.tgz",
|
||||
"integrity": "sha1-6D26SfC5vk0CbSc2U1DZ8D9Uvpg=",
|
||||
"requires": {
|
||||
"isarray": "2.0.1"
|
||||
}
|
||||
},
|
||||
"has-cors": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/has-cors/-/has-cors-1.1.0.tgz",
|
||||
"integrity": "sha1-XkdHk/fqmEPRu5nCPu9J/xJv/zk="
|
||||
},
|
||||
"http-errors": {
|
||||
"version": "1.6.2",
|
||||
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.2.tgz",
|
||||
"integrity": "sha1-CgAsyFcHGSp+eUbO7cERVfYOxzY=",
|
||||
"requires": {
|
||||
"depd": "1.1.1",
|
||||
"inherits": "2.0.3",
|
||||
"setprototypeof": "1.0.3",
|
||||
"statuses": "1.3.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"setprototypeof": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.0.3.tgz",
|
||||
"integrity": "sha1-ZlZ+NwQ+608E2RvWWMDL77VbjgQ="
|
||||
}
|
||||
}
|
||||
},
|
||||
"iconv-lite": {
|
||||
"version": "0.4.19",
|
||||
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.19.tgz",
|
||||
"integrity": "sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ=="
|
||||
},
|
||||
"indexof": {
|
||||
"version": "0.0.1",
|
||||
"resolved": "https://registry.npmjs.org/indexof/-/indexof-0.0.1.tgz",
|
||||
"integrity": "sha1-gtwzbSMrkGIXnQWrMpOmYFn9Q10="
|
||||
},
|
||||
"inherits": {
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
|
||||
"integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
|
||||
},
|
||||
"ipaddr.js": {
|
||||
"version": "1.5.2",
|
||||
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.5.2.tgz",
|
||||
"integrity": "sha1-1LUFvemUaYfM8PxY2QEP+WB+P6A="
|
||||
},
|
||||
"isarray": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.1.tgz",
|
||||
"integrity": "sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4="
|
||||
},
|
||||
"media-typer": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
|
||||
"integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
|
||||
},
|
||||
"merge-descriptors": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
|
||||
"integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
|
||||
},
|
||||
"methods": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
|
||||
"integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
|
||||
},
|
||||
"mime": {
|
||||
"version": "1.4.1",
|
||||
"resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz",
|
||||
"integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ=="
|
||||
},
|
||||
"mime-db": {
|
||||
"version": "1.30.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.30.0.tgz",
|
||||
"integrity": "sha1-dMZD2i3Z1qRTmZY0ZbJtXKfXHwE="
|
||||
},
|
||||
"mime-types": {
|
||||
"version": "2.1.17",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.17.tgz",
|
||||
"integrity": "sha1-Cdejk/A+mVp5+K+Fe3Cp4KsWVXo=",
|
||||
"requires": {
|
||||
"mime-db": "1.30.0"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
|
||||
},
|
||||
"negotiator": {
|
||||
"version": "0.6.1",
|
||||
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
|
||||
"integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk="
|
||||
},
|
||||
"object-component": {
|
||||
"version": "0.0.3",
|
||||
"resolved": "https://registry.npmjs.org/object-component/-/object-component-0.0.3.tgz",
|
||||
"integrity": "sha1-8MaapQ78lbhmwYb0AKM3acsvEpE="
|
||||
},
|
||||
"on-finished": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
|
||||
"integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
|
||||
"requires": {
|
||||
"ee-first": "1.1.1"
|
||||
}
|
||||
},
|
||||
"parseqs": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/parseqs/-/parseqs-0.0.5.tgz",
|
||||
"integrity": "sha1-1SCKNzjkZ2bikbouoXNoSSGouJ0=",
|
||||
"requires": {
|
||||
"better-assert": "1.0.2"
|
||||
}
|
||||
},
|
||||
"parseuri": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/parseuri/-/parseuri-0.0.5.tgz",
|
||||
"integrity": "sha1-gCBKUNTbt3m/3G6+J3jZDkvOMgo=",
|
||||
"requires": {
|
||||
"better-assert": "1.0.2"
|
||||
}
|
||||
},
|
||||
"parseurl": {
|
||||
"version": "1.3.2",
|
||||
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz",
|
||||
"integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M="
|
||||
},
|
||||
"path-to-regexp": {
|
||||
"version": "0.1.7",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
|
||||
"integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
|
||||
},
|
||||
"proxy-addr": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.2.tgz",
|
||||
"integrity": "sha1-ZXFQT0e7mI7IGAJT+F3X4UlSvew=",
|
||||
"requires": {
|
||||
"forwarded": "0.1.2",
|
||||
"ipaddr.js": "1.5.2"
|
||||
}
|
||||
},
|
||||
"qs": {
|
||||
"version": "6.5.1",
|
||||
"resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz",
|
||||
"integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A=="
|
||||
},
|
||||
"range-parser": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz",
|
||||
"integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4="
|
||||
},
|
||||
"raw-body": {
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.2.tgz",
|
||||
"integrity": "sha1-vNYMd9Prk83gBQKVw/N5OJvIj4k=",
|
||||
"requires": {
|
||||
"bytes": "3.0.0",
|
||||
"http-errors": "1.6.2",
|
||||
"iconv-lite": "0.4.19",
|
||||
"unpipe": "1.0.0"
|
||||
}
|
||||
},
|
||||
"safe-buffer": {
|
||||
"version": "5.1.1",
|
||||
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz",
|
||||
"integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg=="
|
||||
},
|
||||
"send": {
|
||||
"version": "0.16.1",
|
||||
"resolved": "https://registry.npmjs.org/send/-/send-0.16.1.tgz",
|
||||
"integrity": "sha512-ElCLJdJIKPk6ux/Hocwhk7NFHpI3pVm/IZOYWqUmoxcgeyM+MpxHHKhb8QmlJDX1pU6WrgaHBkVNm73Sv7uc2A==",
|
||||
"requires": {
|
||||
"debug": "2.6.9",
|
||||
"depd": "1.1.1",
|
||||
"destroy": "1.0.4",
|
||||
"encodeurl": "1.0.1",
|
||||
"escape-html": "1.0.3",
|
||||
"etag": "1.8.1",
|
||||
"fresh": "0.5.2",
|
||||
"http-errors": "1.6.2",
|
||||
"mime": "1.4.1",
|
||||
"ms": "2.0.0",
|
||||
"on-finished": "2.3.0",
|
||||
"range-parser": "1.2.0",
|
||||
"statuses": "1.3.1"
|
||||
}
|
||||
},
|
||||
"serve-static": {
|
||||
"version": "1.13.1",
|
||||
"resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.1.tgz",
|
||||
"integrity": "sha512-hSMUZrsPa/I09VYFJwa627JJkNs0NrfL1Uzuup+GqHfToR2KcsXFymXSV90hoyw3M+msjFuQly+YzIH/q0MGlQ==",
|
||||
"requires": {
|
||||
"encodeurl": "1.0.1",
|
||||
"escape-html": "1.0.3",
|
||||
"parseurl": "1.3.2",
|
||||
"send": "0.16.1"
|
||||
}
|
||||
},
|
||||
"setprototypeof": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
|
||||
"integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
|
||||
},
|
||||
"socket.io": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/socket.io/-/socket.io-2.0.4.tgz",
|
||||
"integrity": "sha1-waRZDO/4fs8TxyZS8Eb3FrKeYBQ=",
|
||||
"requires": {
|
||||
"debug": "2.6.9",
|
||||
"engine.io": "3.1.4",
|
||||
"socket.io-adapter": "1.1.1",
|
||||
"socket.io-client": "2.0.4",
|
||||
"socket.io-parser": "3.1.2"
|
||||
}
|
||||
},
|
||||
"socket.io-adapter": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-1.1.1.tgz",
|
||||
"integrity": "sha1-KoBeihTWNyEk3ZFZrUUC+MsH8Gs="
|
||||
},
|
||||
"socket.io-client": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-2.0.4.tgz",
|
||||
"integrity": "sha1-CRilUkBtxeVAs4Dc2Xr8SmQzL44=",
|
||||
"requires": {
|
||||
"backo2": "1.0.2",
|
||||
"base64-arraybuffer": "0.1.5",
|
||||
"component-bind": "1.0.0",
|
||||
"component-emitter": "1.2.1",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-client": "3.1.4",
|
||||
"has-cors": "1.1.0",
|
||||
"indexof": "0.0.1",
|
||||
"object-component": "0.0.3",
|
||||
"parseqs": "0.0.5",
|
||||
"parseuri": "0.0.5",
|
||||
"socket.io-parser": "3.1.2",
|
||||
"to-array": "0.1.4"
|
||||
}
|
||||
},
|
||||
"socket.io-parser": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.1.2.tgz",
|
||||
"integrity": "sha1-28IoIVH8T6675Aru3Ady66YZ9/I=",
|
||||
"requires": {
|
||||
"component-emitter": "1.2.1",
|
||||
"debug": "2.6.9",
|
||||
"has-binary2": "1.0.2",
|
||||
"isarray": "2.0.1"
|
||||
}
|
||||
},
|
||||
"statuses": {
|
||||
"version": "1.3.1",
|
||||
"resolved": "https://registry.npmjs.org/statuses/-/statuses-1.3.1.tgz",
|
||||
"integrity": "sha1-+vUbnrdKrvOzrPStX2Gr8ky3uT4="
|
||||
},
|
||||
"to-array": {
|
||||
"version": "0.1.4",
|
||||
"resolved": "https://registry.npmjs.org/to-array/-/to-array-0.1.4.tgz",
|
||||
"integrity": "sha1-F+bBH3PdTz10zaek/zI46a2b+JA="
|
||||
},
|
||||
"type-is": {
|
||||
"version": "1.6.15",
|
||||
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.15.tgz",
|
||||
"integrity": "sha1-yrEPtJCeRByChC6v4a1kbIGARBA=",
|
||||
"requires": {
|
||||
"media-typer": "0.3.0",
|
||||
"mime-types": "2.1.17"
|
||||
}
|
||||
},
|
||||
"ultron": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz",
|
||||
"integrity": "sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og=="
|
||||
},
|
||||
"unpipe": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
|
||||
"integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
|
||||
},
|
||||
"utils-merge": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
|
||||
"integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
|
||||
},
|
||||
"uws": {
|
||||
"version": "0.14.5",
|
||||
"resolved": "https://registry.npmjs.org/uws/-/uws-0.14.5.tgz",
|
||||
"integrity": "sha1-Z6rzPEaypYel9mZtAPdpEyjxSdw=",
|
||||
"optional": true
|
||||
},
|
||||
"vary": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
|
||||
"integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
|
||||
},
|
||||
"ws": {
|
||||
"version": "3.3.3",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-3.3.3.tgz",
|
||||
"integrity": "sha512-nnWLa/NwZSt4KQJu51MYlCcSQ5g7INpOrOMt4XV8j4dqTXdmlUmSHQ8/oLC069ckre0fRsgfvsKwbTdtKLCDkA==",
|
||||
"requires": {
|
||||
"async-limiter": "1.0.0",
|
||||
"safe-buffer": "5.1.1",
|
||||
"ultron": "1.1.1"
|
||||
}
|
||||
},
|
||||
"xmlhttprequest-ssl": {
|
||||
"version": "1.5.4",
|
||||
"resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.4.tgz",
|
||||
"integrity": "sha1-BPVgkVcks4kIhxXMDteBPpZ3v1c="
|
||||
},
|
||||
"yeast": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/yeast/-/yeast-0.1.2.tgz",
|
||||
"integrity": "sha1-AI4G2AlDIMNy28L47XagymyKxBk="
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"name": "container-training-pub-sub-server",
|
||||
"version": "0.0.1",
|
||||
"dependencies": {
|
||||
"express": "^4.16.2",
|
||||
"socket.io": "^2.0.4"
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
/* This snippet is loaded from the workshop HTML file.
|
||||
* It sets up callbacks to synchronize the local slide
|
||||
* number with the remote pub/sub server.
|
||||
*/
|
||||
|
||||
var socket = io();
|
||||
var leader = true;
|
||||
|
||||
slideshow.on('showSlide', function (slide) {
|
||||
if (leader) {
|
||||
var n = slide.getSlideIndex()+1;
|
||||
socket.emit('slide change', n);
|
||||
}
|
||||
});
|
||||
|
||||
socket.on('slide change', function (n) {
|
||||
leader = false;
|
||||
slideshow.gotoSlide(n);
|
||||
leader = true;
|
||||
});
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/* This is a very simple pub/sub server, allowing to
|
||||
* remote control browsers displaying the slides.
|
||||
* The browsers connect to this pub/sub server using
|
||||
* Socket.IO, and the server tells them which slides
|
||||
* to display.
|
||||
*
|
||||
* The server can be controlled with a little CLI,
|
||||
* or by one of the browsers.
|
||||
*/
|
||||
|
||||
var express = require('express');
|
||||
var app = express();
|
||||
var http = require('http').Server(app);
|
||||
var io = require('socket.io')(http);
|
||||
|
||||
app.get('/', function(req, res){
|
||||
res.send('container.training autopilot pub/sub server');
|
||||
});
|
||||
|
||||
/* Serve remote.js from the current directory */
|
||||
app.use(express.static('.'));
|
||||
|
||||
/* Serve slides etc. from current and the parent directory */
|
||||
app.use(express.static('..'));
|
||||
|
||||
io.on('connection', function(socket){
|
||||
console.log('a client connected: ' + socket.handshake.address);
|
||||
socket.on('slide change', function(n, ack){
|
||||
console.log('slide change: ' + n);
|
||||
socket.broadcast.emit('slide change', n);
|
||||
if (typeof ack === 'function') {
|
||||
ack();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
http.listen(3000, function(){
|
||||
console.log('listening on *:3000');
|
||||
});
|
||||
@@ -1,7 +0,0 @@
|
||||
#!/bin/sh
|
||||
# This removes the clock (and other extraneous stuff) from the
|
||||
# tmux status bar, and it gives it a non-default color.
|
||||
tmux set-option -g status-left ""
|
||||
tmux set-option -g status-right ""
|
||||
tmux set-option -g status-style bg=cyan
|
||||
|
||||
@@ -2,16 +2,11 @@
|
||||
case "$1" in
|
||||
once)
|
||||
for YAML in *.yml; do
|
||||
./markmaker.py $YAML > $YAML.html || {
|
||||
./markmaker.py < $YAML > $YAML.html || {
|
||||
rm $YAML.html
|
||||
break
|
||||
}
|
||||
done
|
||||
if [ -n "$SLIDECHECKER" ]; then
|
||||
for YAML in *.yml; do
|
||||
./appendcheck.py $YAML.html
|
||||
done
|
||||
fi
|
||||
;;
|
||||
|
||||
forever)
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
# Declarative vs imperative
|
||||
|
||||
- Our container orchestrator puts a very strong emphasis on being *declarative*
|
||||
|
||||
- Declarative:
|
||||
|
||||
*I would like a cup of tea.*
|
||||
|
||||
- Imperative:
|
||||
|
||||
*Boil some water. Pour it in a teapot. Add tea leaves. Steep for a while. Serve in cup.*
|
||||
|
||||
--
|
||||
|
||||
- Declarative seems simpler at first ...
|
||||
|
||||
--
|
||||
|
||||
- ... As long as you know how to brew tea
|
||||
|
||||
---
|
||||
|
||||
## Declarative vs imperative
|
||||
|
||||
- What declarative would really be:
|
||||
|
||||
*I want a cup of tea, obtained by pouring an infusion¹ of tea leaves in a cup.*
|
||||
|
||||
--
|
||||
|
||||
*¹An infusion is obtained by letting the object steep a few minutes in hot² water.*
|
||||
|
||||
--
|
||||
|
||||
*²Hot liquid is obtained by pouring it in an appropriate container³ and setting it on a stove.*
|
||||
|
||||
--
|
||||
|
||||
*³Ah, finally, containers! Something we know about. Let's get to work, shall we?*
|
||||
|
||||
--
|
||||
|
||||
.footnote[Did you know there was an [ISO standard](https://en.wikipedia.org/wiki/ISO_3103)
|
||||
specifying how to brew tea?]
|
||||
|
||||
---
|
||||
|
||||
## Declarative vs imperative
|
||||
|
||||
- Imperative systems:
|
||||
|
||||
- simpler
|
||||
|
||||
- if a task is interrupted, we have to restart from scratch
|
||||
|
||||
- Declarative systems:
|
||||
|
||||
- if a task is interrupted (or if we show up to the party half-way through),
|
||||
we can figure out what's missing and do only what's necessary
|
||||
|
||||
- we need to be able to *observe* the system
|
||||
|
||||
- ... and compute a "diff" between *what we have* and *what we want*
|
||||
@@ -1,272 +0,0 @@
|
||||
# Pre-requirements
|
||||
|
||||
- Be comfortable with the UNIX command line
|
||||
|
||||
- navigating directories
|
||||
|
||||
- editing files
|
||||
|
||||
- a little bit of bash-fu (environment variables, loops)
|
||||
|
||||
- Some Docker knowledge
|
||||
|
||||
- `docker run`, `docker ps`, `docker build`
|
||||
|
||||
- ideally, you know how to write a Dockerfile and build it
|
||||
<br/>
|
||||
(even if it's a `FROM` line and a couple of `RUN` commands)
|
||||
|
||||
- It's totally OK if you are not a Docker expert!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Extra details
|
||||
|
||||
- This slide should have a little magnifying glass in the top left corner
|
||||
|
||||
(If it doesn't, it's because CSS is hard — Jérôme is only a backend person, alas)
|
||||
|
||||
- Slides with that magnifying glass indicate slides providing extra details
|
||||
|
||||
- Feel free to skip them if you're in a hurry!
|
||||
|
||||
---
|
||||
|
||||
class: title
|
||||
|
||||
*Tell me and I forget.*
|
||||
<br/>
|
||||
*Teach me and I remember.*
|
||||
<br/>
|
||||
*Involve me and I learn.*
|
||||
|
||||
Misattributed to Benjamin Franklin
|
||||
|
||||
[(Probably inspired by Chinese Confucian philosopher Xunzi)](https://www.barrypopik.com/index.php/new_york_city/entry/tell_me_and_i_forget_teach_me_and_i_may_remember_involve_me_and_i_will_lear/)
|
||||
|
||||
---
|
||||
|
||||
## Hands-on sections
|
||||
|
||||
- The whole workshop is hands-on
|
||||
|
||||
- We are going to build, ship, and run containers!
|
||||
|
||||
- You are invited to reproduce all the demos
|
||||
|
||||
- All hands-on sections are clearly identified, like the gray rectangle below
|
||||
|
||||
.exercise[
|
||||
|
||||
- This is the stuff you're supposed to do!
|
||||
|
||||
- Go to [swarm2017.container.training](http://swarm2017.container.training/) to view these slides
|
||||
|
||||
- Join the chat room on @@CHAT@@
|
||||
|
||||
<!-- ```open http://container.training/``` -->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Where are we going to run our containers?
|
||||
|
||||
---
|
||||
|
||||
class: in-person, pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## You get five VMs
|
||||
|
||||
- Each person gets 5 private VMs (not shared with anybody else)
|
||||
|
||||
- They'll remain up for the duration of the workshop
|
||||
|
||||
- You should have a little card with login+password+IP addresses
|
||||
|
||||
- You can automatically SSH from one VM to another
|
||||
|
||||
- The nodes have aliases: `node1`, `node2`, etc.
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Why don't we run containers locally?
|
||||
|
||||
- Installing that stuff can be hard on some machines
|
||||
|
||||
(32 bits CPU or OS... Laptops without administrator access... etc.)
|
||||
|
||||
- *"The whole team downloaded all these container images from the WiFi!
|
||||
<br/>... and it went great!"* (Literally no-one ever)
|
||||
|
||||
- All you need is a computer (or even a phone or tablet!), with:
|
||||
|
||||
- an internet connection
|
||||
|
||||
- a web browser
|
||||
|
||||
- an SSH client
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## SSH clients
|
||||
|
||||
- On Linux, OS X, FreeBSD... you are probably all set
|
||||
|
||||
- On Windows, get one of these:
|
||||
|
||||
- [putty](http://www.putty.org/)
|
||||
- Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH)
|
||||
- [Git BASH](https://git-for-windows.github.io/)
|
||||
- [MobaXterm](http://mobaxterm.mobatek.net/)
|
||||
|
||||
- On Android, [JuiceSSH](https://juicessh.com/)
|
||||
([Play Store](https://play.google.com/store/apps/details?id=com.sonelli.juicessh))
|
||||
works pretty well
|
||||
|
||||
- Nice-to-have: [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets
|
||||
<br/>(available with `(apt|yum|brew) install mosh`; then connect with `mosh user@host`)
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with SSH or MOSH
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(seq 1 5); do
|
||||
ssh -o StrictHostKeyChecking=no node$N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
if which kubectl; then
|
||||
kubectl get all -o name | grep -v services/kubernetes | xargs -n1 kubectl delete
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to node1
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://medium.com/@marcosnils/introducing-pwk-play-with-k8s-159fcfeb787b)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://github.com/jpetazzo/container.training/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
*These remarks apply only when using multiple nodes, of course.*
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
- If we had to troubleshoot issues, we would use a combination of:
|
||||
|
||||
- SSH (to access system logs, daemon status...)
|
||||
|
||||
- Docker API (to check running containers and container engine status)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
@@ -1,19 +0,0 @@
|
||||
## Using Play-With-Docker
|
||||
|
||||
- Open a new browser tab to [www.play-with-docker.com](http://www.play-with-docker.com/)
|
||||
|
||||
- Confirm that you're not a robot
|
||||
|
||||
- Click on "ADD NEW INSTANCE": congratulations, you have your first Docker node!
|
||||
|
||||
- When you will need more nodes, just click on "ADD NEW INSTANCE" again
|
||||
|
||||
- Note the countdown in the corner; when it expires, your instances are destroyed
|
||||
|
||||
- If you give your URL to somebody else, they can access your nodes too
|
||||
<br/>
|
||||
(You can use that for pair programming, or to get help from a mentor)
|
||||
|
||||
- Loving it? Not loving it? Tell it to the wonderful authors,
|
||||
[@marcosnils](https://twitter.com/marcosnils) &
|
||||
[@xetorthio](https://twitter.com/xetorthio)!
|
||||
@@ -1,16 +1,16 @@
|
||||
# Our sample application
|
||||
|
||||
- Visit the GitHub repository with all the materials of this workshop:
|
||||
<br/>https://github.com/jpetazzo/container.training
|
||||
<br/>https://github.com/jpetazzo/orchestration-workshop
|
||||
|
||||
- The application is in the [dockercoins](
|
||||
https://github.com/jpetazzo/container.training/tree/master/dockercoins)
|
||||
https://github.com/jpetazzo/orchestration-workshop/tree/master/dockercoins)
|
||||
subdirectory
|
||||
|
||||
- Let's look at the general layout of the source code:
|
||||
|
||||
there is a Compose file [docker-compose.yml](
|
||||
https://github.com/jpetazzo/container.training/blob/master/dockercoins/docker-compose.yml) ...
|
||||
https://github.com/jpetazzo/orchestration-workshop/blob/master/dockercoins/docker-compose.yml) ...
|
||||
|
||||
... and 4 other services, each in its own directory:
|
||||
|
||||
@@ -59,32 +59,25 @@ class: extra-details
|
||||
|
||||
## Example in `worker/worker.py`
|
||||
|
||||
```python
|
||||
redis = Redis("`redis`")
|
||||
|
||||
|
||||
def get_random_bytes():
|
||||
r = requests.get("http://`rng`/32")
|
||||
return r.content
|
||||
|
||||
|
||||
def hash_bytes(data):
|
||||
r = requests.post("http://`hasher`/",
|
||||
data=data,
|
||||
headers={"Content-Type": "application/octet-stream"})
|
||||
```
|
||||
|
||||
(Full source code available [here](
|
||||
https://github.com/jpetazzo/container.training/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17
|
||||
))
|
||||

|
||||
|
||||
---
|
||||
|
||||
## What's this application?
|
||||
|
||||
--
|
||||
---
|
||||
|
||||
- It is a DockerCoin miner! .emoji[💰🐳📦🚢]
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
(DockerCoins 2016 logo courtesy of [@XtlCnslt](https://twitter.com/xtlcnslt) and [@ndeloof](https://twitter.com/ndeloof). Thanks!)
|
||||
|
||||
---
|
||||
|
||||
## What's this application?
|
||||
|
||||
- It is a DockerCoin miner! 💰🐳📦🚢
|
||||
|
||||
--
|
||||
|
||||
@@ -116,15 +109,15 @@ https://github.com/jpetazzo/container.training/blob/8279a3bce9398f7c1a53bdd95187
|
||||
|
||||
<!--
|
||||
```bash
|
||||
if [ -d container.training ]; then
|
||||
mv container.training container.training.$$
|
||||
if [ -d orchestration-workshop ]; then
|
||||
mv orchestration-workshop orchestration-workshop.$$
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Clone the repository on `node1`:
|
||||
```bash
|
||||
git clone git://github.com/jpetazzo/container.training
|
||||
git clone git://github.com/jpetazzo/orchestration-workshop
|
||||
```
|
||||
|
||||
]
|
||||
@@ -141,7 +134,7 @@ Without further ado, let's start our application.
|
||||
|
||||
- Go to the `dockercoins` directory, in the cloned repo:
|
||||
```bash
|
||||
cd ~/container.training/dockercoins
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
```
|
||||
|
||||
- Use Compose to build and run all containers:
|
||||
@@ -150,7 +143,7 @@ Without further ado, let's start our application.
|
||||
```
|
||||
|
||||
<!--
|
||||
```longwait units of work done```
|
||||
```wait units of work done```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
@@ -270,31 +263,11 @@ class: extra-details
|
||||
|
||||
]
|
||||
|
||||
A drawing area should show up, and after a few seconds, a blue
|
||||
graph will appear.
|
||||
You should see a speed of approximately 4 hashes/second.
|
||||
|
||||
---
|
||||
|
||||
class: self-paced, extra-details
|
||||
|
||||
## If the graph doesn't load
|
||||
|
||||
If you just see a `Page not found` error, it might be because your
|
||||
Docker Engine is running on a different machine. This can be the case if:
|
||||
|
||||
- you are using the Docker Toolbox
|
||||
|
||||
- you are using a VM (local or remote) created with Docker Machine
|
||||
|
||||
- you are controlling a remote Docker Engine
|
||||
|
||||
When you run DockerCoins in development mode, the web UI static files
|
||||
are mapped to the container using a volume. Alas, volumes can only
|
||||
work on a local environment, or when using Docker4Mac or Docker4Windows.
|
||||
|
||||
How to fix this?
|
||||
|
||||
Edit `dockercoins.yml` and comment out the `volumes` section, and try again.
|
||||
More precisely: 4 hashes/second, with regular dips down to zero.
|
||||
<br/>This is because Jérôme is incapable of writing good frontend code.
|
||||
<br/>Don't ask. Seriously, don't ask. This is embarrassing.
|
||||
|
||||
---
|
||||
|
||||
@@ -302,43 +275,19 @@ class: extra-details
|
||||
|
||||
## Why does the speed seem irregular?
|
||||
|
||||
- It *looks like* the speed is approximately 4 hashes/second
|
||||
|
||||
- Or more precisely: 4 hashes/second, with regular dips down to zero
|
||||
|
||||
- Why?
|
||||
|
||||
--
|
||||
|
||||
class: extra-details
|
||||
|
||||
- The app actually has a constant, steady speed: 3.33 hashes/second
|
||||
<br/>
|
||||
(which corresponds to 1 hash every 0.3 seconds, for *reasons*)
|
||||
|
||||
- Yes, and?
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The reason why this graph is *not awesome*
|
||||
|
||||
- The worker doesn't update the counter after every loop, but up to once per second
|
||||
|
||||
- The speed is computed by the browser, checking the counter about once per second
|
||||
|
||||
- Between two consecutive updates, the counter will increase either by 4, or by 0
|
||||
|
||||
- The perceived speed will therefore be 4 - 4 - 4 - 0 - 4 - 4 - 0 etc.
|
||||
- The perceived speed will therefore be 4 - 4 - 4 - 0 - 4 - 4 - etc.
|
||||
|
||||
- What can we conclude from this?
|
||||
|
||||
--
|
||||
|
||||
class: extra-details
|
||||
|
||||
- Jérôme is clearly incapable of writing good frontend code
|
||||
*We told you to not ask!!!*
|
||||
|
||||
---
|
||||
|
||||
@@ -484,12 +433,12 @@ We will use `httping`.
|
||||
|
||||
- Check the latency of `rng`:
|
||||
```bash
|
||||
httping -c 3 localhost:8001
|
||||
httping -c 10 localhost:8001
|
||||
```
|
||||
|
||||
- Check the latency of `hasher`:
|
||||
```bash
|
||||
httping -c 3 localhost:8002
|
||||
httping -c 10 localhost:8002
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
class: title, self-paced
|
||||
|
||||
Thank you!
|
||||
|
||||
---
|
||||
|
||||
class: title, in-person
|
||||
|
||||
That's all folks! <br/> Questions?
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
# Links and resources
|
||||
|
||||
- [Docker Community Slack](https://community.docker.com/registrations/groups/4316)
|
||||
- [Docker Community Forums](https://forums.docker.com/)
|
||||
- [Docker Hub](https://hub.docker.com)
|
||||
- [Docker Blog](http://blog.docker.com/)
|
||||
- [Docker documentation](http://docs.docker.com/)
|
||||
- [Docker on StackOverflow](https://stackoverflow.com/questions/tagged/docker)
|
||||
- [Docker on Twitter](http://twitter.com/docker)
|
||||
- [Play With Docker Hands-On Labs](http://training.play-with-docker.com/)
|
||||
|
||||
.footnote[These slides (and future updates) are on → http://container.training/]
|
||||
@@ -1,19 +0,0 @@
|
||||
class: title, self-paced
|
||||
|
||||
@@TITLE@@
|
||||
|
||||
---
|
||||
|
||||
class: title, in-person
|
||||
|
||||
@@TITLE@@<br/></br>
|
||||
|
||||
.footnote[
|
||||
**Be kind to the WiFi!**<br/>
|
||||
<!-- *Use the 5G network.* -->
|
||||
*Don't use your hotspot.*<br/>
|
||||
*Don't stream videos or download big files during the workshop.*<br/>
|
||||
*Thank you!*
|
||||
|
||||
**Slides: http://container.training/**
|
||||
]
|
||||
@@ -1,4 +0,0 @@
|
||||
|
||||
@@TOC@@
|
||||
|
||||
|
||||
182
slides/dockercon.yml
Normal file
@@ -0,0 +1,182 @@
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7ET1GY4Q)"
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- auto-btp
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
title: "Swarm: from Zero to Hero (DC17EU)"
|
||||
chapters:
|
||||
- |
|
||||
class: title
|
||||
|
||||
.small[
|
||||
|
||||
Swarm: from Zero to Hero
|
||||
|
||||
.small[.small[
|
||||
|
||||
**Be kind to the WiFi!**
|
||||
|
||||
*Use the 5G network*
|
||||
<br/>
|
||||
*Don't use your hotspot*
|
||||
<br/>
|
||||
*Don't stream videos from YouTube, Netflix, etc.
|
||||
<br/>(if you're bored, watch local content instead)*
|
||||
|
||||
Also: share the power outlets
|
||||
<br/>
|
||||
*(with limited power comes limited responsibility?)*
|
||||
<br/>
|
||||
*(or something?)*
|
||||
|
||||
Thank you!
|
||||
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Intros
|
||||
|
||||
<!--
|
||||
- Hello! We are
|
||||
AJ ([@s0ulshake](https://twitter.com/s0ulshake))
|
||||
&
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
-->
|
||||
|
||||
- Hello! We are Jérôme, Lee, Nicholas, and Scott
|
||||
|
||||
<!--
|
||||
I am
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
-->
|
||||
|
||||
--
|
||||
|
||||
- This is our collective Docker knowledge:
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## "From zero to hero"
|
||||
|
||||
--
|
||||
|
||||
- It rhymes, but it's a pretty bad title, to be honest
|
||||
|
||||
--
|
||||
|
||||
- None of you is a "zero"
|
||||
|
||||
--
|
||||
|
||||
- None of us is a "hero"
|
||||
|
||||
--
|
||||
|
||||
- None of us should even try to be a hero
|
||||
|
||||
--
|
||||
|
||||
*The hero syndrome is a phenomenon affecting people who seek heroism or recognition,
|
||||
usually by creating a desperate situation which they can resolve.
|
||||
This can include unlawful acts, such as arson.
|
||||
The phenomenon has been noted to affect civil servants,
|
||||
such as firefighters, nurses, police officers, and security guards.*
|
||||
|
||||
(Wikipedia page on [hero syndrome](https://en.wikipedia.org/wiki/Hero_syndrome))
|
||||
|
||||
---
|
||||
|
||||
## Agenda
|
||||
|
||||
.small[
|
||||
- 09:00-09:10 Hello!
|
||||
- 09:10-10:30 Part 1
|
||||
- 10:30-11:00 coffee break
|
||||
- 11:00-12:30 Part 2
|
||||
- 12:30-13:30 lunch break
|
||||
- 13:30-15:00 Part 3
|
||||
- 15:00-15:30 coffee break
|
||||
- 15:30-17:00 Part 4
|
||||
- 17:00-18:00 Afterhours and Q&A
|
||||
]
|
||||
|
||||
<!--
|
||||
- The tutorial will run from 9:00am to 12:20pm
|
||||
|
||||
- This will be fast-paced, but DON'T PANIC!
|
||||
|
||||
- There will be a coffee break at 10:30am
|
||||
<br/>
|
||||
(please remind me if I forget about it!)
|
||||
-->
|
||||
|
||||
- All the content is publicly available (slides, code samples, scripts)
|
||||
|
||||
Upstream URL: https://github.com/jpetazzo/orchestration-workshop
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- Live feedback, questions, help on [Gitter](chat)
|
||||
|
||||
http://container.training/chat
|
||||
|
||||
- swarm/intro.md
|
||||
- |
|
||||
@@TOC@@
|
||||
- - swarm/prereqs.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
class: title
|
||||
|
||||
All right!
|
||||
<br/>
|
||||
We're all set.
|
||||
<br/>
|
||||
Let's do this.
|
||||
- common/sampleapp.md
|
||||
- swarm/swarmkit.md
|
||||
- swarm/creatingswarm.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- swarm/end.md
|
||||
- |
|
||||
class: title
|
||||
|
||||
That's all folks! <br/> Questions?
|
||||
|
||||
.small[.small[
|
||||
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo)) — [@docker](https://twitter.com/docker)
|
||||
|
||||
]]
|
||||
|
||||
<!--
|
||||
Tiffany ([@tiffanyfayj](https://twitter.com/tiffanyfayj))
|
||||
AJ ([@s0ulshake](https://twitter.com/s0ulshake))
|
||||
-->
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
grep --color=auto -P -n "[^\x00-\x80]" */*.md
|
||||
|
Before Width: | Height: | Size: 97 KiB After Width: | Height: | Size: 97 KiB |
|
Before Width: | Height: | Size: 174 KiB After Width: | Height: | Size: 174 KiB |
|
Before Width: | Height: | Size: 927 KiB After Width: | Height: | Size: 927 KiB |
|
Before Width: | Height: | Size: 595 KiB After Width: | Height: | Size: 595 KiB |
|
Before Width: | Height: | Size: 80 KiB After Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 64 KiB |
BIN
slides/images/construction2.jpg
Normal file
|
After Width: | Height: | Size: 64 KiB |
|
Before Width: | Height: | Size: 187 KiB |
|
Before Width: | Height: | Size: 101 KiB After Width: | Height: | Size: 101 KiB |
|
Before Width: | Height: | Size: 49 KiB After Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 84 KiB After Width: | Height: | Size: 84 KiB |
|
Before Width: | Height: | Size: 203 KiB After Width: | Height: | Size: 203 KiB |
|
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 9.8 KiB After Width: | Height: | Size: 9.8 KiB |
|
Before Width: | Height: | Size: 43 KiB After Width: | Height: | Size: 43 KiB |
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
|
Before Width: | Height: | Size: 41 KiB After Width: | Height: | Size: 41 KiB |
|
Before Width: | Height: | Size: 68 KiB After Width: | Height: | Size: 68 KiB |
|
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 62 KiB |
|
Before Width: | Height: | Size: 190 KiB After Width: | Height: | Size: 190 KiB |
|
Before Width: | Height: | Size: 122 KiB After Width: | Height: | Size: 122 KiB |
|
Before Width: | Height: | Size: 350 KiB After Width: | Height: | Size: 350 KiB |
|
Before Width: | Height: | Size: 230 KiB After Width: | Height: | Size: 230 KiB |
|
Before Width: | Height: | Size: 155 KiB After Width: | Height: | Size: 155 KiB |
|
Before Width: | Height: | Size: 49 KiB After Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 174 KiB After Width: | Height: | Size: 174 KiB |
|
Before Width: | Height: | Size: 72 KiB |
|
Before Width: | Height: | Size: 85 KiB After Width: | Height: | Size: 85 KiB |
|
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
@@ -1,160 +0,0 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>Container Training</title>
|
||||
<style type="text/css">
|
||||
body {
|
||||
background-image: url("images/container-background.jpg");
|
||||
max-width: 1024px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
table {
|
||||
font-size: 20px;
|
||||
font-family: sans-serif;
|
||||
background: white;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
padding: 20px;
|
||||
}
|
||||
.header {
|
||||
font-size: 300%;
|
||||
font-weight: bold;
|
||||
}
|
||||
.title {
|
||||
font-size: 150%;
|
||||
font-weight: bold;
|
||||
}
|
||||
td {
|
||||
padding: 1px;
|
||||
height: 1em;
|
||||
}
|
||||
td.spacer {
|
||||
height: unset;
|
||||
}
|
||||
td.footer {
|
||||
padding-top: 80px;
|
||||
height: 100px;
|
||||
}
|
||||
td.title {
|
||||
border-bottom: thick solid black;
|
||||
padding-bottom: 2px;
|
||||
padding-top: 20px;
|
||||
}
|
||||
a {
|
||||
text-decoration: none;
|
||||
}
|
||||
a:hover {
|
||||
background: yellow;
|
||||
}
|
||||
a.attend:after {
|
||||
content: "📅 attend";
|
||||
}
|
||||
a.slides:after {
|
||||
content: "📚 slides";
|
||||
}
|
||||
a.chat:after {
|
||||
content: "💬 chat";
|
||||
}
|
||||
a.video:after {
|
||||
content: "📺 video";
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="main">
|
||||
<table>
|
||||
<tr><td class="header" colspan="4">Container Training</td></tr>
|
||||
|
||||
<tr><td class="title" colspan="4">Coming soon at a conference near you</td></tr>
|
||||
|
||||
<!--
|
||||
<td><a class="attend" href="https://qconsf.com/sf2017/workshop/orchestrating-microservices-docker-swarm" /></td>
|
||||
-->
|
||||
|
||||
<tr>
|
||||
<td>Nothing for now (stay tuned...)</td>
|
||||
</tr>
|
||||
|
||||
<tr><td class="title" colspan="4">Past workshops</td></tr>
|
||||
|
||||
<tr>
|
||||
<td>Kubernetes enablement at Docker</td>
|
||||
<td><a class="slides" href="http://kube.container.training/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>QCON SF: Orchestrating Microservices with Docker Swarm</td>
|
||||
<td><a class="slides" href="http://qconsf2017swarm.container.training/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>QCON SF: Introduction to Docker and Containers</td>
|
||||
<td><a class="slides" href="http://qconsf2017intro.container.training/" /></td>
|
||||
<td><a class="video" href="https://www.youtube.com/playlist?list=PLBAFXs0YjviLgqTum8MkspG_8VzGl6C07" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>LISA17 M7: Getting Started with Docker and Containers</td>
|
||||
<td><a class="slides" href="http://lisa17m7.container.training/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>LISA17 T9: Build, Ship, and Run Microservices on a Docker Swarm Cluster</td>
|
||||
<td><a class="slides" href="http://lisa17t9.container.training/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Deploying and scaling microservices with Docker and Kubernetes</td>
|
||||
<td><a class="slides" href="http://osseu17.container.training/" /></td>
|
||||
<td><a class="video" href="https://www.youtube.com/playlist?list=PLBAFXs0YjviLrsyydCzxWrIP_1-wkcSHS" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>DockerCon Workshop: from Zero to Hero (full day, B3 M1-2)</td>
|
||||
<td><a class="slides" href="http://dc17eu.container.training/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>DockerCon Workshop: Orchestration for Advanced Users (afternoon, B4 M5-6)</td>
|
||||
<td><a class="slides" href="https://www.bretfisher.com/dockercon17eu/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>LISA16 T1: Deploying and Scaling Applications with Docker Swarm</td>
|
||||
<td><a class="slides" href="http://lisa16t1.container.training/" /></td>
|
||||
<td><a class="video" href="https://www.youtube.com/playlist?list=PLBAFXs0YjviIDDhr8vIwCN1wkyNGXjbbc" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>PyCon2016: Introduction to Docker and containers</td>
|
||||
<td><a class="slides" href="https://us.pycon.org/2016/site_media/media/tutorial_handouts/DockerSlides.pdf" /></td>
|
||||
<td><a class="video" href="https://www.youtube.com/watch?v=ZVaRK10HBjo" /></td>
|
||||
</tr>
|
||||
|
||||
<tr><td class="title" colspan="4">Self-paced tutorials</td></tr>
|
||||
|
||||
<tr>
|
||||
<td>Introduction to Docker and Containers</td>
|
||||
<td><a class="slides" href="intro-fullday.yml.html" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Container Orchestration with Docker and Swarm</td>
|
||||
<td><a class="slides" href="swarm-selfpaced.yml.html" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Deploying and Scaling Microservices with Docker and Kubernetes</td>
|
||||
<td><a class="slides" href="kube-halfday.yml.html" /></td>
|
||||
</tr>
|
||||
|
||||
<tr><td class="spacer"></td></tr>
|
||||
|
||||
<tr>
|
||||
<td class="footer">
|
||||
Maintained by Jérôme Petazzoni (<a href="https://twitter.com/jpetazzo">@jpetazzo</a>)
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,15 +0,0 @@
|
||||
https://gallant-turing-d0d520.netlify.com/containers/Container-Ship-Freighter-Navigation-Elbe-Romance-1782991.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/ShippingContainerSFBay.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/aerial-view-of-containers.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/blue-containers.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/chinook-helicopter-container.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/container-cranes.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/container-housing.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/containers-by-the-water.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/distillery-containers.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/lots-of-containers.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/plastic-containers.JPG
|
||||
https://gallant-turing-d0d520.netlify.com/containers/train-of-containers-1.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/train-of-containers-2.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/two-containers-on-a-truck.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/wall-of-containers.jpeg
|
||||
@@ -1,42 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Docker and
|
||||
Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- common/title.md
|
||||
- logistics.md
|
||||
- common/intro.md
|
||||
- common/toc.md
|
||||
- - intro/Docker_Overview.md
|
||||
#- intro/Docker_History.md
|
||||
- intro/Training_Environment.md
|
||||
- intro/Installing_Docker.md
|
||||
- intro/First_Containers.md
|
||||
- intro/Background_Containers.md
|
||||
- intro/Start_And_Attach.md
|
||||
- - intro/Initial_Images.md
|
||||
- intro/Building_Images_Interactively.md
|
||||
- intro/Building_Images_With_Dockerfiles.md
|
||||
- intro/Cmd_And_Entrypoint.md
|
||||
- intro/Copying_Files_During_Build.md
|
||||
- intro/Multi_Stage_Builds.md
|
||||
- intro/Publishing_To_Docker_Hub.md
|
||||
- intro/Dockerfile_Tips.md
|
||||
- - intro/Naming_And_Inspecting.md
|
||||
- intro/Container_Networking_Basics.md
|
||||
- intro/Network_Drivers.md
|
||||
- intro/Container_Network_Model.md
|
||||
#- intro/Connecting_Containers_With_Links.md
|
||||
- intro/Ambassadors.md
|
||||
- - intro/Local_Development_Workflow.md
|
||||
- intro/Working_With_Volumes.md
|
||||
- intro/Compose_For_Dev_Stacks.md
|
||||
- intro/Advanced_Dockerfiles.md
|
||||
- common/thankyou.md
|
||||
@@ -1,42 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Docker and
|
||||
Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
chapters:
|
||||
- common/title.md
|
||||
# - common/logistics.md
|
||||
- common/intro.md
|
||||
- common/toc.md
|
||||
- - intro/Docker_Overview.md
|
||||
#- intro/Docker_History.md
|
||||
- intro/Training_Environment.md
|
||||
- intro/Installing_Docker.md
|
||||
- intro/First_Containers.md
|
||||
- intro/Background_Containers.md
|
||||
- intro/Start_And_Attach.md
|
||||
- - intro/Initial_Images.md
|
||||
- intro/Building_Images_Interactively.md
|
||||
- intro/Building_Images_With_Dockerfiles.md
|
||||
- intro/Cmd_And_Entrypoint.md
|
||||
- intro/Copying_Files_During_Build.md
|
||||
- intro/Multi_Stage_Builds.md
|
||||
- intro/Publishing_To_Docker_Hub.md
|
||||
- intro/Dockerfile_Tips.md
|
||||
- - intro/Naming_And_Inspecting.md
|
||||
- intro/Container_Networking_Basics.md
|
||||
- intro/Network_Drivers.md
|
||||
- intro/Container_Network_Model.md
|
||||
#- intro/Connecting_Containers_With_Links.md
|
||||
- intro/Ambassadors.md
|
||||
- - intro/Local_Development_Workflow.md
|
||||
- intro/Working_With_Volumes.md
|
||||
- intro/Compose_For_Dev_Stacks.md
|
||||
- intro/Advanced_Dockerfiles.md
|
||||
- common/thankyou.md
|
||||
@@ -1,6 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Advanced Dockerfiles
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ class: title
|
||||
|
||||
# Ambassadors
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -40,7 +40,7 @@ ambassador containers.
|
||||
|
||||
---
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Background containers
|
||||
# Background Containers
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Building images interactively
|
||||
# Building Images Interactively
|
||||
|
||||
In this section, we will create our first container image.
|
||||
|
||||
@@ -16,21 +16,27 @@ We will:
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
## Building Images Interactively
|
||||
|
||||
1. Create a container (with `docker run`) using our base distro of choice.
|
||||
As we have seen, the images on the Docker Hub are sometimes very basic.
|
||||
|
||||
2. Run a bunch of commands to install and set up our software in the container.
|
||||
How do we want to construct our own images?
|
||||
|
||||
3. (Optionally) review changes in the container with `docker diff`.
|
||||
As an example, we will build an image that has `figlet`.
|
||||
|
||||
4. Turn the container into a new image with `docker commit`.
|
||||
First, we will do it manually with `docker commit`.
|
||||
|
||||
5. (Optionally) add tags to the image with `docker tag`.
|
||||
Then, in an upcoming chapter, we will use a `Dockerfile` and `docker build`.
|
||||
|
||||
---
|
||||
|
||||
## Setting up our container
|
||||
## Building from a base
|
||||
|
||||
Our base will be the `ubuntu` image.
|
||||
|
||||
---
|
||||
|
||||
## Create a new container and make some changes
|
||||
|
||||
Start an Ubuntu container:
|
||||
|
||||
@@ -101,7 +107,7 @@ As explained before:
|
||||
|
||||
---
|
||||
|
||||
## Commit our changes into a new image
|
||||
## Commit and run your image
|
||||
|
||||
The `docker commit` command will create a new layer with those changes,
|
||||
and a new image using this new layer.
|
||||
@@ -113,13 +119,7 @@ $ docker commit <yourContainerId>
|
||||
|
||||
The output of the `docker commit` command will be the ID for your newly created image.
|
||||
|
||||
We can use it as an argument to `docker run`.
|
||||
|
||||
---
|
||||
|
||||
## Testing our new image
|
||||
|
||||
Let's run this image:
|
||||
We can run this image:
|
||||
|
||||
```bash
|
||||
$ docker run -it <newImageId>
|
||||
@@ -131,8 +131,6 @@ root@fcfb62f0bfde:/# figlet hello
|
||||
|_| |_|\___|_|_|\___/
|
||||
```
|
||||
|
||||
It works! .emoji[🎉]
|
||||
|
||||
---
|
||||
|
||||
## Tagging images
|
||||
|
||||
@@ -3,7 +3,7 @@ class: title
|
||||
|
||||
# Building Docker images with a Dockerfile
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -188,7 +188,7 @@ root@91f3c974c9a1:/# figlet hello
|
||||
```
|
||||
|
||||
|
||||
Yay! .emoji[🎉]
|
||||
Yay! 🎉
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
class: title
|
||||
|
||||
# `CMD` and `ENTRYPOINT`
|
||||
# CMD and ENTRYPOINT
|
||||
|
||||

|
||||
|
||||
@@ -141,7 +141,7 @@ Why did we use JSON syntax for our `ENTRYPOINT`?
|
||||
|
||||
* When CMD or ENTRYPOINT use string syntax, they get wrapped in `sh -c`.
|
||||
|
||||
* To avoid this wrapping, we can use JSON syntax.
|
||||
* To avoid this wrapping, you must use JSON syntax.
|
||||
|
||||
What if we used `ENTRYPOINT` with string syntax?
|
||||
|
||||
@@ -178,6 +178,8 @@ $ docker run figlet salut
|
||||
\/ \_/|_/|__/ \_/|_/|_/
|
||||
```
|
||||
|
||||
Great success!
|
||||
|
||||
---
|
||||
|
||||
## Using `CMD` and `ENTRYPOINT` together
|
||||
@@ -225,8 +227,9 @@ $ docker build -t figlet .
|
||||
Successfully built 6e0b6a048a07
|
||||
```
|
||||
|
||||
Run it without parameters:
|
||||
And run it:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker run figlet
|
||||
_ _ _ _
|
||||
@@ -234,15 +237,7 @@ $ docker run figlet
|
||||
| | _ | | | | __ __ ,_ | | __|
|
||||
|/ \ |/ |/ |/ / \_ | | |_/ \_/ | |/ / |
|
||||
| |_/|__/|__/|__/\__/ \/ \/ \__/ |_/|__/\_/|_/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Overriding the image default parameters
|
||||
|
||||
Now let's pass extra arguments to the image.
|
||||
|
||||
```bash
|
||||
$ docker run figlet hola mundo
|
||||
_ _
|
||||
| | | | |
|
||||
@@ -250,8 +245,7 @@ $ docker run figlet hola mundo
|
||||
|/ \ / \_|/ / | / |/ |/ | | | / |/ | / | / \_
|
||||
| |_/\__/ |__/\_/|_/ | | |_/ \_/|_/ | |_/\_/|_/\__/
|
||||
```
|
||||
|
||||
We overrode `CMD` but still used `ENTRYPOINT`.
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Compose for development stacks
|
||||
|
||||
# Compose For Development Stacks
|
||||
|
||||
Dockerfiles are great to build container images.
|
||||
|
||||
@@ -112,7 +113,6 @@ them.
|
||||
|
||||
Here is the file used in the demo:
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
version: "2"
|
||||
|
||||
@@ -131,7 +131,6 @@ services:
|
||||
redis:
|
||||
image: redis
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Connecting containers with links
|
||||
# Connecting Containers With Links
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ class: title
|
||||
|
||||
# The Container Network Model
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -126,16 +126,13 @@ $ docker run -d --name es --net dev elasticsearch:2
|
||||
|
||||
Now, create another container on this network.
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker run -ti --net dev alpine sh
|
||||
root@0ecccdfa45ef:/#
|
||||
```
|
||||
]
|
||||
|
||||
From this new container, we can resolve and ping the other one, using its assigned name:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
/ # ping es
|
||||
PING es (172.18.0.2) 56(84) bytes of data.
|
||||
@@ -148,7 +145,6 @@ PING es (172.18.0.2) 56(84) bytes of data.
|
||||
rtt min/avg/max/mdev = 0.114/0.149/0.221/0.052 ms
|
||||
root@0ecccdfa45ef:/#
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -159,7 +155,6 @@ class: extra-details
|
||||
In Docker Engine 1.9, name resolution is implemented with `/etc/hosts`, and
|
||||
updating it each time containers are added/removed.
|
||||
|
||||
.small[
|
||||
```bash
|
||||
[root@0ecccdfa45ef /]# cat /etc/hosts
|
||||
172.18.0.3 0ecccdfa45ef
|
||||
@@ -172,7 +167,6 @@ ff02::2 ip6-allrouters
|
||||
172.18.0.2 es
|
||||
172.18.0.2 es.dev
|
||||
```
|
||||
]
|
||||
|
||||
In Docker Engine 1.10, this has been replaced by a dynamic resolver.
|
||||
|
||||
@@ -180,7 +174,7 @@ In Docker Engine 1.10, this has been replaced by a dynamic resolver.
|
||||
|
||||
---
|
||||
|
||||
# Service discovery with containers
|
||||
## Connecting multiple containers together
|
||||
|
||||
* Let's try to run an application that requires two containers.
|
||||
|
||||
@@ -216,7 +210,9 @@ $ docker ps -l
|
||||
|
||||
* If we connect to the application now, we will see an error page:
|
||||
|
||||
.small[
|
||||

|
||||
]
|
||||
|
||||
* This is because the Redis service is not running.
|
||||
* This container tries to resolve the name `redis`.
|
||||
@@ -245,7 +241,9 @@ $ docker run --net dev --name redis -d redis
|
||||
|
||||
* If we connect to the application now, we should see that the app is working correctly:
|
||||
|
||||
.small[
|
||||

|
||||
]
|
||||
|
||||
* When the app tries to resolve `redis`, instead of getting a DNS error, it gets the IP address of our Redis container.
|
||||
|
||||
@@ -364,7 +362,6 @@ Each ElasticSearch instance has a name (generated when it is started). This name
|
||||
|
||||
Try the following command a few times:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker run --rm --net dev centos curl -s es:9200
|
||||
{
|
||||
@@ -372,11 +369,9 @@ $ docker run --rm --net dev centos curl -s es:9200
|
||||
...
|
||||
}
|
||||
```
|
||||
]
|
||||
|
||||
Then try it a few times by replacing `--net dev` with `--net prod`:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker run --rm --net prod centos curl -s es:9200
|
||||
{
|
||||
@@ -384,7 +379,6 @@ $ docker run --rm --net prod centos curl -s es:9200
|
||||
...
|
||||
}
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -492,7 +486,7 @@ Very short instructions:
|
||||
- `docker network create mynet --driver overlay`
|
||||
- `docker service create --network mynet myimage`
|
||||
|
||||
See http://jpetazzo.github.io/container.training for all the deets about clustering!
|
||||
See http://jpetazzo.github.io/orchestration-workshop for all the deets about clustering!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Container networking basics
|
||||
# Container Networking Basics
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -69,7 +69,7 @@ But first, let's make sure that everything works properly.
|
||||
Point your browser to the IP address of your Docker host, on the port
|
||||
shown by `docker ps` for container port 80.
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -189,6 +189,87 @@ $ ping <ipAddress>
|
||||
|
||||
---
|
||||
|
||||
## The different network drivers
|
||||
|
||||
A container can use one of the following drivers:
|
||||
|
||||
* `bridge` (default)
|
||||
* `none`
|
||||
* `host`
|
||||
* `container`
|
||||
|
||||
The driver is selected with `docker run --net ...`.
|
||||
|
||||
The different drivers are explained with more details on the following slides.
|
||||
|
||||
---
|
||||
|
||||
## The default bridge
|
||||
|
||||
* By default, the container gets a virtual `eth0` interface.
|
||||
<br/>(In addition to its own private `lo` loopback interface.)
|
||||
|
||||
* That interface is provided by a `veth` pair.
|
||||
|
||||
* It is connected to the Docker bridge.
|
||||
<br/>(Named `docker0` by default; configurable with `--bridge`.)
|
||||
|
||||
* Addresses are allocated on a private, internal subnet.
|
||||
<br/>(Docker uses 172.17.0.0/16 by default; configurable with `--bip`.)
|
||||
|
||||
* Outbound traffic goes through an iptables MASQUERADE rule.
|
||||
|
||||
* Inbound traffic goes through an iptables DNAT rule.
|
||||
|
||||
* The container can have its own routes, iptables rules, etc.
|
||||
|
||||
---
|
||||
|
||||
## The null driver
|
||||
|
||||
* Container is started with `docker run --net none ...`
|
||||
|
||||
* It only gets the `lo` loopback interface. No `eth0`.
|
||||
|
||||
* It can't send or receive network traffic.
|
||||
|
||||
* Useful for isolated/untrusted workloads.
|
||||
|
||||
---
|
||||
|
||||
## The host driver
|
||||
|
||||
* Container is started with `docker run --net host ...`
|
||||
|
||||
* It sees (and can access) the network interfaces of the host.
|
||||
|
||||
* It can bind any address, any port (for ill and for good).
|
||||
|
||||
* Network traffic doesn't have to go through NAT, bridge, or veth.
|
||||
|
||||
* Performance = native!
|
||||
|
||||
Use cases:
|
||||
|
||||
* Performance sensitive applications (VOIP, gaming, streaming...)
|
||||
|
||||
* Peer discovery (e.g. Erlang port mapper, Raft, Serf...)
|
||||
|
||||
---
|
||||
|
||||
## The container driver
|
||||
|
||||
* Container is started with `docker run --net container:id ...`
|
||||
|
||||
* It re-uses the network stack of another container.
|
||||
|
||||
* It shares with this other container the same interfaces, IP address(es), routes, iptables rules, etc.
|
||||
|
||||
* Those containers can communicate over their `lo` interface.
|
||||
<br/>(i.e. one can bind to 127.0.0.1 and the others can connect to it.)
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
|
||||
We've learned how to:
|
||||
|
||||
@@ -3,7 +3,7 @@ class: title
|
||||
|
||||
# Copying files during the build
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
32
slides/intro/Course_Conclusion.md
Normal file
@@ -0,0 +1,32 @@
|
||||
class: title
|
||||
|
||||
# Course Conclusion
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Questions & Next Steps
|
||||
|
||||
A bunch of useful links:
|
||||
|
||||
* Docker homepage - http://www.docker.com/
|
||||
* Docker Hub - https://hub.docker.com
|
||||
* Docker blog - http://blog.docker.com/
|
||||
* Docker documentation - http://docs.docker.com/
|
||||
* Docker code on GitHub - https://github.com/docker/docker
|
||||
* Docker mailing list - [https://groups.google.com/forum/#!forum/docker-user
|
||||
* Docker on IRC: irc.freenode.net and channels `#docker` and `#docker-dev`
|
||||
* Docker on Twitter - http://twitter.com/docker
|
||||
* Get Docker help on Stack Overflow - http://stackoverflow.com/search?q=docker
|
||||
* Play With Docker Hands-On Labs - http://training.play-with-docker.com/
|
||||
|
||||
These slides are at: http://container.training/
|
||||
|
||||
---
|
||||
|
||||
class: title
|
||||
|
||||
Thank You!
|
||||
|
||||
.small[http://container.training/]
|
||||
@@ -20,7 +20,7 @@ class: pic
|
||||
|
||||
## The VPS age (until 2007-2008)
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
35
slides/intro/Docker_Hub_Tease.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Publishing images to the Docker Hub
|
||||
|
||||
We have built our first images.
|
||||
|
||||
If we were so inclined, we could share those images through the Docker Hub.
|
||||
|
||||
We won't do it since we don't want to force everyone to create a Docker Hub account (although it's free, yay!) but the steps would be:
|
||||
|
||||
* have an account on the Docker Hub
|
||||
|
||||
* tag our image accordingly (i.e. `username/imagename`)
|
||||
|
||||
* `docker push username/imagename`
|
||||
|
||||
Anybody can now `docker run username/imagename` from any Docker host.
|
||||
|
||||
Images can be set to be private as well.
|
||||
|
||||
---
|
||||
|
||||
## The goodness of automated builds
|
||||
|
||||
* You can link a Docker Hub repository with a GitHub or BitBucket repository
|
||||
|
||||
* Each push to GitHub or BitBucket will trigger a build on Docker Hub
|
||||
|
||||
* If the build succeeds, the new image is available on Docker Hub
|
||||
|
||||
* You can map tags and branches between source and container images
|
||||
|
||||
* If you work with public repositories, this is free
|
||||
|
||||
* Corollary: this gives you a very simple way to get free, basic CI
|
||||
|
||||
(With the technique presented earlier)
|
||||
@@ -58,7 +58,7 @@ class: pic
|
||||
|
||||
## The deployment problem
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -66,7 +66,7 @@ class: pic
|
||||
|
||||
## The matrix from hell
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -74,7 +74,7 @@ class: pic
|
||||
|
||||
## The parallel with the shipping indsutry
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -82,7 +82,7 @@ class: pic
|
||||
|
||||
## Intermodal shipping containers
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -90,7 +90,7 @@ class: pic
|
||||
|
||||
## A new shipping ecosystem
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -98,7 +98,7 @@ class: pic
|
||||
|
||||
## A shipping container system for applications
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -106,29 +106,17 @@ class: pic
|
||||
|
||||
## Eliminate the matrix from hell
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
## Results
|
||||
|
||||
* [Dev-to-prod reduced from 9 months to 15 minutes (ING)](
|
||||
https://www.docker.com/sites/default/files/CS_ING_01.25.2015_1.pdf)
|
||||
* Dev-to-prod reduced from 9 months to 15 minutes (ING)
|
||||
|
||||
* [Continuous integration job time reduced by more than 60% (BBC)](
|
||||
https://www.docker.com/sites/default/files/CS_BBCNews_01.25.2015_1.pdf)
|
||||
* Continuous integration job time reduced by more than 60% (BBC)
|
||||
|
||||
* [Deploy 100 times a day instead of once a week (GILT)](
|
||||
https://www.docker.com/sites/default/files/CS_Gilt%20Groupe_03.18.2015_0.pdf)
|
||||
|
||||
* [70% infrastructure consolidation (MetLife)](
|
||||
https://www.docker.com/customers/metlife-transforms-customer-experience-legacy-and-microservices-mashup)
|
||||
|
||||
* [60% infrastructure consolidation (Intesa Sanpaolo)](
|
||||
https://blog.docker.com/2017/11/intesa-sanpaolo-builds-resilient-foundation-banking-docker-enterprise-edition/)
|
||||
|
||||
* [14x application density; 60% of legacy datacenter migrated in 4 months (GE Appliances)](
|
||||
https://www.docker.com/customers/ge-uses-docker-enable-self-service-their-developers)
|
||||
* Dev-to-prod reduced from weeks to minutes (GILT)
|
||||
|
||||
* etc.
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Our first containers
|
||||
# Our First Containers
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -51,13 +51,10 @@ root@04c0bb0a6c07:/#
|
||||
```
|
||||
|
||||
* This is a brand new container.
|
||||
|
||||
* It runs a bare-bones, no-frills `ubuntu` system.
|
||||
|
||||
* `-it` is shorthand for `-i -t`.
|
||||
|
||||
* `-i` tells Docker to connect us to the container's stdin.
|
||||
|
||||
* `-t` tells Docker that we want a pseudo-terminal.
|
||||
|
||||
---
|
||||
@@ -75,6 +72,22 @@ Alright, we need to install it.
|
||||
|
||||
---
|
||||
|
||||
## An observation
|
||||
|
||||
Let's check how many packages are installed here.
|
||||
|
||||
```bash
|
||||
root@04c0bb0a6c07:/# dpkg -l | wc -l
|
||||
189
|
||||
```
|
||||
|
||||
* `dpkg -l` lists the packages installed in our container
|
||||
* `wc -l` counts them
|
||||
* If you have a Debian or Ubuntu machine, you can run the same command
|
||||
and compare the results.
|
||||
|
||||
---
|
||||
|
||||
## Install a package in our container
|
||||
|
||||
We want `figlet`, so let's install it:
|
||||
@@ -91,12 +104,6 @@ Reading package lists... Done
|
||||
|
||||
One minute later, `figlet` is installed!
|
||||
|
||||
---
|
||||
|
||||
## Try to run our freshly installed program
|
||||
|
||||
The `figlet` program takes a message as parameter.
|
||||
|
||||
```bash
|
||||
root@04c0bb0a6c07:/# figlet hello
|
||||
_ _ _
|
||||
@@ -106,30 +113,11 @@ root@04c0bb0a6c07:/# figlet hello
|
||||
|_| |_|\___|_|_|\___/
|
||||
```
|
||||
|
||||
Beautiful! .emoji[😍]
|
||||
|
||||
---
|
||||
|
||||
## Counting packages in the container
|
||||
## Exiting our container
|
||||
|
||||
Let's check how many packages are installed there.
|
||||
|
||||
```bash
|
||||
root@04c0bb0a6c07:/# dpkg -l | wc -l
|
||||
190
|
||||
```
|
||||
|
||||
* `dpkg -l` lists the packages installed in our container
|
||||
|
||||
* `wc -l` counts them
|
||||
|
||||
How many packages do we have on our host?
|
||||
|
||||
---
|
||||
|
||||
## Counting packages on the host
|
||||
|
||||
Exit the container by logging out of the shell, like you would usually do.
|
||||
Just exit the shell, like you would usually do.
|
||||
|
||||
(E.g. with `^D` or `exit`)
|
||||
|
||||
@@ -137,36 +125,10 @@ Exit the container by logging out of the shell, like you would usually do.
|
||||
root@04c0bb0a6c07:/# exit
|
||||
```
|
||||
|
||||
Now, try to:
|
||||
|
||||
* run `dpkg -l | wc -l`. How many packages are installed?
|
||||
|
||||
* run `figlet`. Does that work?
|
||||
|
||||
---
|
||||
|
||||
## Host and containers are independent things
|
||||
|
||||
* We ran an `ubuntu` container on an `ubuntu` host.
|
||||
|
||||
* But they have different, independent packages.
|
||||
|
||||
* Installing something on the host doesn't expose it to the container.
|
||||
|
||||
* And vice-versa.
|
||||
|
||||
* We can run *any container* on *any host*.
|
||||
|
||||
---
|
||||
|
||||
## Where's our container?
|
||||
|
||||
* Our container is now in a *stopped* state.
|
||||
|
||||
* It still exists on disk, but all compute resources have been freed up.
|
||||
|
||||
* We will see later how to get back to that container.
|
||||
|
||||
---
|
||||
|
||||
## Starting another container
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Understanding Docker images
|
||||
# Understanding Docker Images
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Installing Docker
|
||||
# Install Docker
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Local development workflow with Docker
|
||||
# Local Development Workflow with Docker
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -17,7 +17,7 @@ At the end of this section, you will be able to:
|
||||
|
||||
---
|
||||
|
||||
## Containerized local development environments
|
||||
## Using a Docker container for local development
|
||||
|
||||
We want to solve the following issues:
|
||||
|
||||
@@ -31,25 +31,28 @@ By using Docker containers, we will get a consistent development environment.
|
||||
|
||||
---
|
||||
|
||||
## Working on the "namer" application
|
||||
## Our "namer" application
|
||||
|
||||
* We have to work on some application whose code is at:
|
||||
* The code is available on https://github.com/jpetazzo/namer.
|
||||
|
||||
https://github.com/jpetazzo/namer.
|
||||
* The image jpetazzo/namer is automatically built by the Docker Hub.
|
||||
|
||||
* What is it? We don't know yet!
|
||||
|
||||
* Let's download the code.
|
||||
Let's run it with:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/jpetazzo/namer
|
||||
$ docker run -dP jpetazzo/namer
|
||||
```
|
||||
|
||||
Check the port number with `docker ps` and open the application.
|
||||
|
||||
---
|
||||
|
||||
## Looking at the code
|
||||
## Let's look at the code
|
||||
|
||||
Let's download our application's source code.
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/jpetazzo/namer
|
||||
$ cd namer
|
||||
$ ls -1
|
||||
company_name_generator.rb
|
||||
@@ -59,13 +62,11 @@ Dockerfile
|
||||
Gemfile
|
||||
```
|
||||
|
||||
--
|
||||
|
||||
Aha, a `Gemfile`! This is Ruby. Probably. We know this. Maybe?
|
||||
|
||||
---
|
||||
|
||||
## Looking at the `Dockerfile`
|
||||
## Where's my code?
|
||||
|
||||
According to the Dockerfile, the code is copied into `/src` :
|
||||
|
||||
```dockerfile
|
||||
FROM ruby
|
||||
@@ -79,85 +80,9 @@ CMD ["rackup", "--host", "0.0.0.0"]
|
||||
EXPOSE 9292
|
||||
```
|
||||
|
||||
* This application is using a base `ruby` image.
|
||||
* The code is copied in `/src`.
|
||||
* Dependencies are installed with `bundler`.
|
||||
* The application is started with `rackup`.
|
||||
* It is listening on port 9292.
|
||||
We want to make changes *inside the container* without rebuilding it each time.
|
||||
|
||||
---
|
||||
|
||||
## Building and running the "namer" application
|
||||
|
||||
* Let's build the application with the `Dockerfile`!
|
||||
|
||||
--
|
||||
|
||||
```bash
|
||||
$ docker build -t namer .
|
||||
```
|
||||
|
||||
--
|
||||
|
||||
* Then run it. *We need to expose its ports.*
|
||||
|
||||
--
|
||||
|
||||
```bash
|
||||
$ docker run -dP namer
|
||||
```
|
||||
|
||||
--
|
||||
|
||||
* Check on which port the container is listening.
|
||||
|
||||
--
|
||||
|
||||
```bash
|
||||
$ docker ps -l
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Connecting to our application
|
||||
|
||||
* Point our browser to our Docker node, on the port allocated to the container.
|
||||
|
||||
--
|
||||
|
||||
* Hit "reload" a few times.
|
||||
|
||||
--
|
||||
|
||||
* This is an enterprise-class, carrier-grade, ISO-compliant company name generator!
|
||||
|
||||
(With 50% more bullshit than the average competition!)
|
||||
|
||||
(Wait, was that 50% more, or 50% less? *Anyway!*)
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Making changes to the code
|
||||
|
||||
Option 1:
|
||||
|
||||
* Edit the code locally
|
||||
* Rebuild the image
|
||||
* Re-run the container
|
||||
|
||||
Option 2:
|
||||
|
||||
* Enter the container (with `docker exec`)
|
||||
* Install an editor
|
||||
* Make changes from within the container
|
||||
|
||||
Option 3:
|
||||
|
||||
* Use a *volume* to mount local files into the container
|
||||
* Make changes locally
|
||||
* Changes are reflected into the container
|
||||
For that, we will use a *volume*.
|
||||
|
||||
---
|
||||
|
||||
@@ -166,16 +91,16 @@ Option 3:
|
||||
We will tell Docker to map the current directory to `/src` in the container.
|
||||
|
||||
```bash
|
||||
$ docker run -d -v $(pwd):/src -P namer
|
||||
$ docker run -d -v $(pwd):/src -p 80:9292 jpetazzo/namer
|
||||
```
|
||||
|
||||
* `-d`: the container should run in detached mode (in the background).
|
||||
|
||||
* `-v`: the following host directory should be mounted inside the container.
|
||||
|
||||
* `-P`: publish all the ports exposed by this image.
|
||||
* `-p`: connections to port 80 on the host should be routed to port 9292 in the container.
|
||||
|
||||
* `namer` is the name of the image we will run.
|
||||
* `jpetazzo/namer` is the name of the image we will run.
|
||||
|
||||
* We don't specify a command to run because is is already set in the Dockerfile.
|
||||
|
||||
@@ -183,15 +108,14 @@ $ docker run -d -v $(pwd):/src -P namer
|
||||
|
||||
## Mounting volumes inside containers
|
||||
|
||||
The `-v` flag mounts a directory from your host into your Docker container.
|
||||
|
||||
The flag structure is:
|
||||
The `-v` flag mounts a directory from your host into your Docker
|
||||
container. The flag structure is:
|
||||
|
||||
```bash
|
||||
[host-path]:[container-path]:[rw|ro]
|
||||
```
|
||||
|
||||
* If `[host-path]` or `[container-path]` doesn't exist it is created.
|
||||
* If [host-path] or [container-path] doesn't exist it is created.
|
||||
|
||||
* You can control the write status of the volume with the `ro` and
|
||||
`rw` options.
|
||||
@@ -204,15 +128,27 @@ There will be a full chapter about volumes!
|
||||
|
||||
## Testing the development container
|
||||
|
||||
* Check the port used by our new container.
|
||||
Now let us see if our new container is running.
|
||||
|
||||
```bash
|
||||
$ docker ps -l
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
045885b68bc5 namer rackup 3 seconds ago Up ... 0.0.0.0:32770->9292/tcp ...
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
045885b68bc5 trai... rackup 3 seconds ago Up ... 0.0.0.0:80->9292/tcp ...
|
||||
```
|
||||
|
||||
* Open the application in your web browser.
|
||||
---
|
||||
|
||||
## Viewing our application
|
||||
|
||||
Now let's browse to our web application on:
|
||||
|
||||
```bash
|
||||
http://<yourHostIP>:80
|
||||
```
|
||||
|
||||
We can see our company naming application.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -238,121 +174,53 @@ color: red;
|
||||
|
||||
---
|
||||
|
||||
## Viewing our changes
|
||||
## Refreshing our application
|
||||
|
||||
* Reload the application in our browser.
|
||||
Now let's refresh our browser:
|
||||
|
||||
--
|
||||
```bash
|
||||
http://<yourHostIP>:80
|
||||
```
|
||||
|
||||
* The color should have changed.
|
||||
We can see the updated color of our company naming application.
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
## Understanding volumes
|
||||
## Improving the workflow with Compose
|
||||
|
||||
* Volumes are *not* copying or synchronizing files between the host and the container.
|
||||
* You can also start the container with the following command:
|
||||
|
||||
* Volumes are *bind mounts*: a kernel mechanism associating a path to another.
|
||||
```bash
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
* Bind mounts are *kind of* similar to symbolic links, but at a very different level.
|
||||
* This works thanks to the Compose file, `docker-compose.yml`:
|
||||
|
||||
* Changes made on the host or on the container will be visible on the other side.
|
||||
|
||||
(Since under the hood, it's the same file on both anyway.)
|
||||
```yaml
|
||||
www:
|
||||
build: .
|
||||
volumes:
|
||||
- .:/src
|
||||
ports:
|
||||
- 80:9292
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Trash your servers and burn your code
|
||||
## Why Compose?
|
||||
|
||||
*(This is the title of a
|
||||
[2013 blog post](http://chadfowler.com/2013/06/23/immutable-deployments.html)
|
||||
by Chad Fowler, where he explains the concept of immutable infrastructure.)*
|
||||
* Specifying all those "docker run" parameters is tedious.
|
||||
|
||||
--
|
||||
* And error-prone.
|
||||
|
||||
* Let's mess up majorly with our container.
|
||||
* We can "encode" those parameters in a "Compose file."
|
||||
|
||||
(Remove files or whatever.)
|
||||
|
||||
* Now, how can we fix this?
|
||||
|
||||
--
|
||||
|
||||
* Our old container (with the blue version of the code) is still running.
|
||||
|
||||
* See on which port it is exposed:
|
||||
```bash
|
||||
docker ps
|
||||
```
|
||||
|
||||
* Point our browser to it to confirm that it still works fine.
|
||||
|
||||
---
|
||||
|
||||
## Immutable infrastructure in a nutshell
|
||||
|
||||
* Instead of *updating* a server, we deploy a new one.
|
||||
|
||||
* This might be challenging with classical servers, but it's trivial with containers.
|
||||
|
||||
* In fact, with Docker, the most logical workflow is to build a new image and run it.
|
||||
|
||||
* If something goes wrong with the new image, we can always restart the old one.
|
||||
|
||||
* We can even keep both versions running side by side.
|
||||
|
||||
If this pattern sounds interesting, you might want to read about *blue/green deployment*
|
||||
and *canary deployments*.
|
||||
|
||||
---
|
||||
|
||||
## Improving the workflow
|
||||
|
||||
The workflow that we showed is nice, but it requires us to:
|
||||
|
||||
* keep track of all the `docker run` flags required to run the container,
|
||||
|
||||
* inspect the `Dockerfile` to know which path(s) to mount,
|
||||
|
||||
* write scripts to hide that complexity.
|
||||
|
||||
There has to be a better way!
|
||||
|
||||
---
|
||||
|
||||
## Docker Compose to the rescue
|
||||
|
||||
* Docker Compose allows us to "encode" `docker run` parameters in a YAML file.
|
||||
|
||||
* Here is the `docker-compose.yml` file that we can use for our "namer" app:
|
||||
|
||||
```yaml
|
||||
www:
|
||||
build: .
|
||||
volumes:
|
||||
- .:/src
|
||||
ports:
|
||||
- 80:9292
|
||||
```
|
||||
|
||||
* Try it:
|
||||
```bash
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Working with Docker Compose
|
||||
|
||||
* When you see a `docker-compose.yml` file, you can use `docker-compose up`.
|
||||
|
||||
* It can build images and run them with the required parameters.
|
||||
* When you see a `docker-compose.yml` file, you know that you can use `docker-compose up`.
|
||||
|
||||
* Compose can also deal with complex, multi-container apps.
|
||||
|
||||
(More on this later!)
|
||||
<br/>(More on this later.)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
|
||||
---
|
||||
|
||||
## Multi-stage builds for our C program
|
||||
## Implementing multi-stage builds for our C program
|
||||
|
||||
We will change our Dockerfile to:
|
||||
|
||||
@@ -65,7 +65,7 @@ The resulting Dockerfile is on the next slide.
|
||||
|
||||
---
|
||||
|
||||
## Multi-stage build `Dockerfile`
|
||||
## Revised Dockerfile implementing multi-stage build
|
||||
|
||||
Here is the final Dockerfile:
|
||||
|
||||
@@ -89,7 +89,7 @@ docker run hellomultistage
|
||||
|
||||
---
|
||||
|
||||
## Comparing single/multi-stage build image sizes
|
||||
## Comparing single-stage and multi-stage image sizes
|
||||
|
||||
List our images with `docker images`, and check the size of:
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ class: title
|
||||
|
||||
# Naming and inspecting containers
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -85,8 +85,16 @@ The `docker inspect` command will output a very detailed JSON map.
|
||||
```bash
|
||||
$ docker inspect <containerID>
|
||||
[{
|
||||
...
|
||||
(many pages of JSON here)
|
||||
"AppArmorProfile": "",
|
||||
"Args": [],
|
||||
"Config": {
|
||||
"AttachStderr": true,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": true,
|
||||
"Cmd": [
|
||||
"bash"
|
||||
],
|
||||
"CpuShares": 0,
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
# Container network drivers
|
||||
|
||||
The Docker Engine supports many different network drivers.
|
||||
|
||||
The built-in drivers include:
|
||||
|
||||
* `bridge` (default)
|
||||
|
||||
* `none`
|
||||
|
||||
* `host`
|
||||
|
||||
* `container`
|
||||
|
||||
The driver is selected with `docker run --net ...`.
|
||||
|
||||
The different drivers are explained with more details on the following slides.
|
||||
|
||||
---
|
||||
|
||||
## The default bridge
|
||||
|
||||
* By default, the container gets a virtual `eth0` interface.
|
||||
<br/>(In addition to its own private `lo` loopback interface.)
|
||||
|
||||
* That interface is provided by a `veth` pair.
|
||||
|
||||
* It is connected to the Docker bridge.
|
||||
<br/>(Named `docker0` by default; configurable with `--bridge`.)
|
||||
|
||||
* Addresses are allocated on a private, internal subnet.
|
||||
<br/>(Docker uses 172.17.0.0/16 by default; configurable with `--bip`.)
|
||||
|
||||
* Outbound traffic goes through an iptables MASQUERADE rule.
|
||||
|
||||
* Inbound traffic goes through an iptables DNAT rule.
|
||||
|
||||
* The container can have its own routes, iptables rules, etc.
|
||||
|
||||
---
|
||||
|
||||
## The null driver
|
||||
|
||||
* Container is started with `docker run --net none ...`
|
||||
|
||||
* It only gets the `lo` loopback interface. No `eth0`.
|
||||
|
||||
* It can't send or receive network traffic.
|
||||
|
||||
* Useful for isolated/untrusted workloads.
|
||||
|
||||
---
|
||||
|
||||
## The host driver
|
||||
|
||||
* Container is started with `docker run --net host ...`
|
||||
|
||||
* It sees (and can access) the network interfaces of the host.
|
||||
|
||||
* It can bind any address, any port (for ill and for good).
|
||||
|
||||
* Network traffic doesn't have to go through NAT, bridge, or veth.
|
||||
|
||||
* Performance = native!
|
||||
|
||||
Use cases:
|
||||
|
||||
* Performance sensitive applications (VOIP, gaming, streaming...)
|
||||
|
||||
* Peer discovery (e.g. Erlang port mapper, Raft, Serf...)
|
||||
|
||||
---
|
||||
|
||||
## The container driver
|
||||
|
||||
* Container is started with `docker run --net container:id ...`
|
||||
|
||||
* It re-uses the network stack of another container.
|
||||
|
||||
* It shares with this other container the same interfaces, IP address(es), routes, iptables rules, etc.
|
||||
|
||||
* Those containers can communicate over their `lo` interface.
|
||||
<br/>(i.e. one can bind to 127.0.0.1 and the others can connect to it.)
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
# Publishing images to the Docker Hub
|
||||
|
||||
We have built our first images.
|
||||
|
||||
We can now publish it to the Docker Hub!
|
||||
|
||||
*You don't have to do the exercises in this section,
|
||||
because they require an account on the Docker Hub, and we
|
||||
don't want to force anyone to create one.*
|
||||
|
||||
*Note, however, that creating an account on the Docker Hub
|
||||
is free (and doesn't require a credit card), and hosting
|
||||
public images is free as well.*
|
||||
|
||||
---
|
||||
|
||||
## Logging into our Docker Hub account
|
||||
|
||||
* This can be done from the Docker CLI:
|
||||
```bash
|
||||
docker login
|
||||
```
|
||||
|
||||
.warning[When running Docker4Mac, Docker4Windows, or
|
||||
Docker on a Linux workstation, it can (and will when
|
||||
possible) integrate with your system's keyring to
|
||||
store your credentials securely. However, on most Linux
|
||||
servers, it will store your credentials in `~/.docker/config`.]
|
||||
|
||||
---
|
||||
|
||||
## Image tags and registry addresses
|
||||
|
||||
* Docker images tags are like Git tags and branches.
|
||||
|
||||
* They are like *bookmarks* pointing at a specific image ID.
|
||||
|
||||
* Tagging an image doesn't *rename* an image: it adds another tag.
|
||||
|
||||
* When pushing an image to a registry, the registry address is in the tag.
|
||||
|
||||
Example: `registry.example.net:5000/image`
|
||||
|
||||
* What about Docker Hub images?
|
||||
|
||||
--
|
||||
|
||||
* `jpetazzo/clock` is, in fact, `index.docker.io/jpetazzo/clock`
|
||||
|
||||
* `ubuntu` is, in fact, `library/ubuntu`, i.e. `index.docker.io/library/ubuntu`
|
||||
|
||||
---
|
||||
|
||||
## Tagging an image to push it on the Hub
|
||||
|
||||
* Let's tag our `figlet` image (or any other to our liking):
|
||||
```bash
|
||||
docker tag figlet jpetazzo/figlet
|
||||
```
|
||||
|
||||
* And push it to the Hub:
|
||||
```bash
|
||||
docker push jpetazzo/figlet
|
||||
```
|
||||
|
||||
* That's it!
|
||||
|
||||
--
|
||||
|
||||
* Anybody can now `docker run jpetazzo/figlet` anywhere.
|
||||
|
||||
---
|
||||
|
||||
## The goodness of automated builds
|
||||
|
||||
* You can link a Docker Hub repository with a GitHub or BitBucket repository
|
||||
|
||||
* Each push to GitHub or BitBucket will trigger a build on Docker Hub
|
||||
|
||||
* If the build succeeds, the new image is available on Docker Hub
|
||||
|
||||
* You can map tags and branches between source and container images
|
||||
|
||||
* If you work with public repositories, this is free
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Setting up an automated build
|
||||
|
||||
* We need a Dockerized repository!
|
||||
* Let's go to https://github.com/jpetazzo/trainingwheels and fork it.
|
||||
* Go to the Docker Hub (https://hub.docker.com/).
|
||||
* Select "Create" in the top-right bar, and select "Create Automated Build."
|
||||
* Connect your Docker Hub account to your GitHub account.
|
||||
* Select your user and the repository that we just forked.
|
||||
* Create.
|
||||
* Then go to "Build Settings."
|
||||
* Put `/www` in "Dockerfile Location" (or whichever directory the Dockerfile is in).
|
||||
* Click "Trigger" to build the repository immediately (without waiting for a git push).
|
||||
* Subsequent builds will happen automatically, thanks to GitHub hooks.
|
||||
@@ -2,7 +2,7 @@ class: title
|
||||
|
||||
# Our training environment
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Working with volumes
|
||||
# Working with Volumes
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -19,7 +19,7 @@ At the end of this section, you will be able to:
|
||||
|
||||
---
|
||||
|
||||
## Working with volumes
|
||||
## Working with Volumes
|
||||
|
||||
Docker volumes can be used to achieve many things, including:
|
||||
|
||||
@@ -95,12 +95,12 @@ We will see an example in the following slides.
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Sharing app server logs with another container
|
||||
## Sharing web application logs with another container
|
||||
|
||||
Let's start a Tomcat container:
|
||||
|
||||
```bash
|
||||
$ docker run --name webapp -d -p 8080:8080 -v /usr/local/tomcat/logs tomcat
|
||||
$ docker run --name webapp -d -p 8080:8080 -v /usr/local/tomcat/logs
|
||||
```
|
||||
|
||||
Now, start an `alpine` container accessing the same volume:
|
||||
@@ -311,9 +311,9 @@ QUIT
|
||||
|
||||
---
|
||||
|
||||
## Volumes lifecycle
|
||||
## What happens when you remove containers with volumes?
|
||||
|
||||
* When you remove a container, its volumes are kept around.
|
||||
* Volumes are kept around.
|
||||
|
||||
* You can list them with `docker volume ls`.
|
||||
|
||||
@@ -371,9 +371,9 @@ $ docker inspect <yourContainerID>
|
||||
|
||||
---
|
||||
|
||||
## Sharing a single file
|
||||
## Sharing a single file between the host and a container
|
||||
|
||||
The same `-v` flag can be used to share a single file (instead of a directory).
|
||||
The same `-v` flag can be used to share a single file.
|
||||
|
||||
One of the most interesting examples is to share the Docker control socket.
|
||||
|
||||
@@ -381,11 +381,8 @@ One of the most interesting examples is to share the Docker control socket.
|
||||
$ docker run -it -v /var/run/docker.sock:/var/run/docker.sock docker sh
|
||||
```
|
||||
|
||||
From that container, you can now run `docker` commands communicating with
|
||||
the Docker Engine running on the host. Try `docker ps`!
|
||||
|
||||
.warning[Since that container has access to the Docker socket, it
|
||||
has root-like access to the host.]
|
||||
Warning: when using such mounts, the container gains root-like access to the host.
|
||||
It can potentially do bad things.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Docker and Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- common/title.md
|
||||
- logistics.md
|
||||
- common/intro.md
|
||||
- common/toc.md
|
||||
- - common/prereqs.md
|
||||
- kube/versions-k8s.md
|
||||
- common/sampleapp.md
|
||||
- - kube/concepts-k8s.md
|
||||
- common/declarative.md
|
||||
- kube/declarative.md
|
||||
- kube/kubenet.md
|
||||
- kube/kubectlget.md
|
||||
- kube/setup-k8s.md
|
||||
- kube/kubectlrun.md
|
||||
- - kube/kubectlexpose.md
|
||||
- kube/ourapponkube.md
|
||||
- kube/dashboard.md
|
||||
- - kube/kubectlscale.md
|
||||
- kube/daemonset.md
|
||||
- kube/rollout.md
|
||||
- kube/whatsnext.md
|
||||
- common/thankyou.md
|
||||
@@ -1,33 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Docker and Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
chapters:
|
||||
- common/title.md
|
||||
#- logistics.md
|
||||
- common/intro.md
|
||||
- common/toc.md
|
||||
- - common/prereqs.md
|
||||
- kube/versions-k8s.md
|
||||
- common/sampleapp.md
|
||||
- - kube/concepts-k8s.md
|
||||
- common/declarative.md
|
||||
- kube/declarative.md
|
||||
- kube/kubenet.md
|
||||
- kube/kubectlget.md
|
||||
- kube/setup-k8s.md
|
||||
- kube/kubectlrun.md
|
||||
- - kube/kubectlexpose.md
|
||||
- kube/ourapponkube.md
|
||||
- kube/dashboard.md
|
||||
- - kube/kubectlscale.md
|
||||
- kube/daemonset.md
|
||||
- kube/rollout.md
|
||||
- kube/whatsnext.md
|
||||
- common/thankyou.md
|
||||
@@ -184,7 +184,7 @@ Yes!
|
||||
|
||||
*Probably not (in the future)*
|
||||
|
||||
.footnote[More information about CRI [on the Kubernetes blog](http://blog.kubernetes.io/2016/12/container-runtime-interface-cri-in-kubernetes.html)]
|
||||
.footnote[More information about CRI [on the Kubernetes blog](http://blog.kubernetes.io/2016/12/]container-runtime-interface-cri-in-kubernetes.html).
|
||||
|
||||
---
|
||||
|
||||
@@ -208,6 +208,89 @@ Yes!
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||

|
||||
|
||||
(Diagram courtesy of Weave Works, used with permission.)
|
||||
|
||||
---
|
||||
|
||||
# Declarative vs imperative
|
||||
|
||||
- Kubernetes puts a very strong emphasis on being *declarative*
|
||||
|
||||
- Declarative:
|
||||
|
||||
*I would like a cup of tea.*
|
||||
|
||||
- Imperative:
|
||||
|
||||
*Boil some water. Pour it in a teapot. Add tea leaves. Steep for a while. Serve in cup.*
|
||||
|
||||
--
|
||||
|
||||
- Declarative seems simpler at first ...
|
||||
|
||||
--
|
||||
|
||||
- ... As long as you know how to brew tea
|
||||
|
||||
---
|
||||
|
||||
## Declarative vs imperative
|
||||
|
||||
- What declarative would really be:
|
||||
|
||||
*I want a cup of tea, obtained by pouring an infusion¹ of tea leaves in a cup.*
|
||||
|
||||
--
|
||||
|
||||
*¹An infusion is obtained by letting the object steep a few minutes in hot² water.*
|
||||
|
||||
--
|
||||
|
||||
*²Hot liquid is obtained by pouring it in an appropriate container³ and setting it on a stove.*
|
||||
|
||||
--
|
||||
|
||||
*³Ah, finally, containers! Something we know about. Let's get to work, shall we?*
|
||||
|
||||
--
|
||||
|
||||
.footnote[Did you know there was an [ISO standard](https://en.wikipedia.org/wiki/ISO_3103)
|
||||
specifying how to brew tea?]
|
||||
|
||||
---
|
||||
|
||||
## Declarative vs imperative
|
||||
|
||||
- Imperative systems:
|
||||
|
||||
- simpler
|
||||
|
||||
- if a task is interrupted, we have to restart from scratch
|
||||
|
||||
- Declarative systems:
|
||||
|
||||
- if a task is interrupted (or if we show up to the party half-way through),
|
||||
we can figure out what's missing and do only what's necessary
|
||||
|
||||
- we need to be able to *observe* the system
|
||||
|
||||
- ... and compute a "diff" between *what we have* and *what we want*
|
||||
|
||||
---
|
||||
|
||||
## Declarative vs imperative in Kubernetes
|
||||
|
||||
- Virtually everything we create in Kubernetes is created from a *spec*
|
||||
|
||||
- Watch for the `spec` fields in the YAML files later!
|
||||
|
||||
- The *spec* describes *how we want the thing to be*
|
||||
|
||||
- Kubernetes will *reconcile* the current state with the spec
|
||||
<br/>(technically, this is done by a number of *controllers*)
|
||||
|
||||
- When we want to change some resource, we update the *spec*
|
||||
|
||||
- Kubernetes will then *converge* that resource
|
||||
|
||||
@@ -336,7 +336,7 @@ Of course, option 2 offers more learning opportunities. Right?
|
||||
|
||||
---
|
||||
|
||||
## We've put resources in your resources
|
||||
## We've put resources in your resources all the way down
|
||||
|
||||
- Reminder: a daemon set is a resource that creates more resources!
|
||||
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
## Declarative vs imperative in Kubernetes
|
||||
|
||||
- Virtually everything we create in Kubernetes is created from a *spec*
|
||||
|
||||
- Watch for the `spec` fields in the YAML files later!
|
||||
|
||||
- The *spec* describes *how we want the thing to be*
|
||||
|
||||
- Kubernetes will *reconcile* the current state with the spec
|
||||
<br/>(technically, this is done by a number of *controllers*)
|
||||
|
||||
- When we want to change some resource, we update the *spec*
|
||||
|
||||
- Kubernetes will then *converge* that resource
|
||||
15
slides/kube/intro-ks.md
Normal file
@@ -0,0 +1,15 @@
|
||||
## About these slides
|
||||
|
||||
- Your one-stop shop to awesomeness:
|
||||
|
||||
http://container.training/
|
||||
|
||||
- The content that you're viewing right now is in a public GitHub repository:
|
||||
|
||||
https://github.com/jpetazzo/orchestration-workshop
|
||||
|
||||
- Typos? Mistakes? Questions? Feel free to hover over the bottom of the slide ...
|
||||
|
||||
--
|
||||
|
||||
.footnote[👇 Try it! The source file will be shown and you can view it on GitHub and fork and edit it.]
|
||||
@@ -42,7 +42,7 @@
|
||||
|
||||
---
|
||||
|
||||
## Obtaining machine-readable output
|
||||
## From human-readable to machine-readable output
|
||||
|
||||
- `kubectl get` can output JSON, YAML, or be directly formatted
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ We should see the following things:
|
||||
|
||||
---
|
||||
|
||||
## What are these different things?
|
||||
## Deployments, replica sets, and replication controllers
|
||||
|
||||
- A *deployment* is a high-level construct
|
||||
|
||||
@@ -236,13 +236,14 @@ Unfortunately, `--follow` cannot (yet) be used to stream the logs from multiple
|
||||
|
||||
class: title
|
||||
|
||||
Meanwhile,
|
||||
.small[
|
||||
Meanwhile, at the Google NOC ...
|
||||
|
||||
.small[
|
||||
Why the hell
|
||||
<br/>
|
||||
at the Google NOC ...
|
||||
are we getting 1000 packets per second
|
||||
<br/>
|
||||
<br/>
|
||||
.small[“Why the hell]
|
||||
<br/>
|
||||
.small[are we getting 1000 packets per second]
|
||||
<br/>
|
||||
.small[of ICMP ECHO traffic from EC2 ?!?”]
|
||||
of ICMP ECHO traffic from EC2 ?!?
|
||||
]
|
||||
]
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes network model: the less good
|
||||
## Kubernetes network model: the bad and the ugly
|
||||
|
||||
- Everything can reach everything
|
||||
|
||||
|
||||