Compare commits
163 Commits
srecon2018
...
avril2018
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
051dd13c21 | ||
|
|
8c3d4c2c56 | ||
|
|
817e17a3a8 | ||
|
|
6ad7a285e7 | ||
|
|
e529eaed2d | ||
|
|
4697c6c6ad | ||
|
|
56e47c3550 | ||
|
|
b3a9ba339c | ||
|
|
8d0ce37a59 | ||
|
|
a1bbbd6f7b | ||
|
|
e48016a0de | ||
|
|
39765c9ad0 | ||
|
|
9d4a72a4ba | ||
|
|
19e39aea49 | ||
|
|
ca06269f00 | ||
|
|
da064a6005 | ||
|
|
a12a38a7a9 | ||
|
|
2c3a442a4c | ||
|
|
25d560cf46 | ||
|
|
c3324cf64c | ||
|
|
053bbe7028 | ||
|
|
9876a9aaa6 | ||
|
|
853ba7ec39 | ||
|
|
5ef96a29ac | ||
|
|
f261e7aa96 | ||
|
|
8e44e911ca | ||
|
|
fce69b6bb2 | ||
|
|
1183e2e4bf | ||
|
|
de3082e48f | ||
|
|
3acac34e4b | ||
|
|
3bac124921 | ||
|
|
ba44603d0f | ||
|
|
3d5c89774c | ||
|
|
358f844c88 | ||
|
|
21bb5fa9e1 | ||
|
|
3fe4d730e7 | ||
|
|
74bf2d742c | ||
|
|
acba3d5467 | ||
|
|
056b3a7127 | ||
|
|
292885566d | ||
|
|
cfc066c8ea | ||
|
|
4f69f19866 | ||
|
|
c508f88af2 | ||
|
|
9757fdb42f | ||
|
|
24d57f535b | ||
|
|
e42dfc0726 | ||
|
|
c7198b3538 | ||
|
|
af1347ca17 | ||
|
|
f741cf5b23 | ||
|
|
d3c0a60de9 | ||
|
|
83bba80f3b | ||
|
|
44e0cfb878 | ||
|
|
a58e21e313 | ||
|
|
1131635006 | ||
|
|
c6e477e6ab | ||
|
|
a54287a6bb | ||
|
|
e1fe41b7d7 | ||
|
|
18a81120bc | ||
|
|
17cd67f4d0 | ||
|
|
817e3f9217 | ||
|
|
bb94c6fe76 | ||
|
|
fd05530fff | ||
|
|
38a40d56a0 | ||
|
|
86f2395b2c | ||
|
|
96fd2e26fd | ||
|
|
60f68351c6 | ||
|
|
035d015a61 | ||
|
|
581bbc847d | ||
|
|
83efd145b8 | ||
|
|
da7cbc41d2 | ||
|
|
282e22acb9 | ||
|
|
c6c1a942e7 | ||
|
|
59f5ff7788 | ||
|
|
1fbf7b7dbd | ||
|
|
249947b0dd | ||
|
|
9374eebdf6 | ||
|
|
e9af03e976 | ||
|
|
ab583e2670 | ||
|
|
dcd5c5b39a | ||
|
|
974f8ee244 | ||
|
|
8212aa378a | ||
|
|
403d4c6408 | ||
|
|
142681fa27 | ||
|
|
69c9141817 | ||
|
|
9ed88e7608 | ||
|
|
b216f4d90b | ||
|
|
26ee07d8ba | ||
|
|
a8e5b02fb4 | ||
|
|
80a8912a53 | ||
|
|
1ba6797f25 | ||
|
|
11a2167dea | ||
|
|
af4eeb6e6b | ||
|
|
ea6459e2bd | ||
|
|
2dfa5a9660 | ||
|
|
b86434fbd3 | ||
|
|
223525cc69 | ||
|
|
fd63c079c8 | ||
|
|
ebe4511c57 | ||
|
|
e1a81ef8f3 | ||
|
|
3382c83d6e | ||
|
|
a89430673f | ||
|
|
fcea6dbdb6 | ||
|
|
c744a7d168 | ||
|
|
0256dc8640 | ||
|
|
41819794d7 | ||
|
|
836903cb02 | ||
|
|
7f822d33b5 | ||
|
|
232fdbb1ff | ||
|
|
f3f6111622 | ||
|
|
a8378e7e7f | ||
|
|
eb3165096f | ||
|
|
90ca58cda8 | ||
|
|
5a81526387 | ||
|
|
8df073b8ac | ||
|
|
0f7356b002 | ||
|
|
0c2166fb5f | ||
|
|
d228222fa6 | ||
|
|
e4b7d3244e | ||
|
|
7d0e841a73 | ||
|
|
9859e441e1 | ||
|
|
e1c638439f | ||
|
|
253aaaad97 | ||
|
|
a249ccc12b | ||
|
|
22fb898267 | ||
|
|
e038797875 | ||
|
|
7b9f9e23c0 | ||
|
|
01d062a68f | ||
|
|
a66dfb5faf | ||
|
|
ac1480680a | ||
|
|
13a9b5ca00 | ||
|
|
0cdf6abf0b | ||
|
|
2071694983 | ||
|
|
12e2b18a6f | ||
|
|
28e128756d | ||
|
|
a15109a12c | ||
|
|
e500fb57e8 | ||
|
|
f1849092eb | ||
|
|
f1dbd7e8a6 | ||
|
|
d417f454dd | ||
|
|
d79718d834 | ||
|
|
de9c3a1550 | ||
|
|
90fc7a4ed3 | ||
|
|
09edbc24bc | ||
|
|
92f8701c37 | ||
|
|
c828888770 | ||
|
|
bb7728e7e7 | ||
|
|
5f544f9c78 | ||
|
|
5b6a7d1995 | ||
|
|
b21185dde7 | ||
|
|
deaee0dc82 | ||
|
|
4206346496 | ||
|
|
6658b632b3 | ||
|
|
d9be7160ef | ||
|
|
d56424a287 | ||
|
|
2d397c5cb8 | ||
|
|
08004caa5d | ||
|
|
522358a004 | ||
|
|
e00a6c36e3 | ||
|
|
4664497cbc | ||
|
|
6be424bde5 | ||
|
|
0903438242 | ||
|
|
b874b68e57 | ||
|
|
a3add3d816 |
@@ -296,7 +296,7 @@ If you have attended this workshop and have feedback,
|
||||
or if you want somebody to deliver that workshop at your
|
||||
conference or for your company: you can contact one of us!
|
||||
|
||||
- jerome at docker dot com
|
||||
- jerome dot petazzoni at gmail dot com
|
||||
- bret at bretfisher dot com
|
||||
|
||||
If you are willing and able to deliver such workshops,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Trainer tools to create and prepare VMs for Docker workshops on AWS
|
||||
# Trainer tools to create and prepare VMs for Docker workshops on AWS or Azure
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -14,8 +14,9 @@ And if you want to generate printable cards:
|
||||
## General Workflow
|
||||
|
||||
- fork/clone repo
|
||||
- set required environment variables for AWS
|
||||
- set required environment variables
|
||||
- create your own setting file from `settings/example.yaml`
|
||||
- if necessary, increase allowed open files: `ulimit -Sn 10000`
|
||||
- run `./workshopctl` commands to create instances, install docker, setup each users environment in node1, other management tasks
|
||||
- run `./workshopctl cards` command to generate PDF for printing handouts of each users host IP's and login info
|
||||
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set url = "avril2018.container.training" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set workshop_name = "formation" -%}
|
||||
{%- set cluster_or_machine = "votre VM" -%}
|
||||
{%- set machine_is_or_machines_are = "Votre VM" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "orchestration workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set workshop_name = "formation" -%}
|
||||
{%- set cluster_or_machine = "votre cluster" -%}
|
||||
{%- set machine_is_or_machines_are = "Votre cluster" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_swarm -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
@@ -75,9 +73,9 @@ img {
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
Voici les informations pour vous connecter à
|
||||
{{ cluster_or_machine }} pour cette formation.
|
||||
Vous pouvez vous connecter avec n'importe quel client SSH.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
@@ -90,14 +88,14 @@ img {
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
{{ machine_is_or_machines_are }} :
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<p>Les slides sont à l'adresse suivante :
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -45,7 +45,7 @@ def system(cmd):
|
||||
|
||||
# On EC2, the ephemeral disk might be mounted on /mnt.
|
||||
# If /mnt is a mountpoint, place Docker workspace on it.
|
||||
system("if mountpoint -q /mnt; then sudo mkdir /mnt/docker && sudo ln -s /mnt/docker /var/lib/docker; fi")
|
||||
system("if mountpoint -q /mnt; then sudo mkdir -p /mnt/docker && sudo ln -sfn /mnt/docker /var/lib/docker; fi")
|
||||
|
||||
# Put our public IP in /tmp/ipv4
|
||||
# ipv4_retrieval_endpoint = "http://169.254.169.254/latest/meta-data/public-ipv4"
|
||||
|
||||
@@ -7,7 +7,7 @@ clustersize: 1
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
@@ -17,8 +17,8 @@ paper_margin: 0.2in
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.17.1
|
||||
machine_version: 0.13.0
|
||||
compose_version: 1.20.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
@@ -17,8 +17,8 @@ paper_margin: 0.2in
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.18.0
|
||||
machine_version: 0.13.0
|
||||
compose_version: 1.20.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
@@ -7,7 +7,7 @@ clustersize: 5
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
@@ -17,8 +17,8 @@ paper_margin: 0.2in
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.17.1
|
||||
machine_version: 0.13.0
|
||||
compose_version: 1.20.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
@@ -19,6 +19,9 @@ logging.basicConfig(level=os.environ.get("LOG_LEVEL", "INFO"))
|
||||
|
||||
TIMEOUT = 60 # 1 minute
|
||||
|
||||
# This one is not a constant. It's an ugly global.
|
||||
IPADDR = None
|
||||
|
||||
|
||||
class State(object):
|
||||
|
||||
@@ -163,6 +166,9 @@ def wait_for_prompt():
|
||||
last_line = output.split('\n')[-1]
|
||||
# Our custom prompt on the VMs has two lines; the 2nd line is just '$'
|
||||
if last_line == "$":
|
||||
# This is a perfect opportunity to grab the node's IP address
|
||||
global IPADDR
|
||||
IPADDR = re.findall("^\[(.*)\]", output, re.MULTILINE)[-1]
|
||||
return
|
||||
# When we are in an alpine container, the prompt will be "/ #"
|
||||
if last_line == "/ #":
|
||||
@@ -397,8 +403,7 @@ while True:
|
||||
elif method == "open":
|
||||
# Cheap way to get node1's IP address
|
||||
screen = capture_pane()
|
||||
ipaddr = re.findall("^\[(.*)\]", screen, re.MULTILINE)[-1]
|
||||
url = data.replace("/node1", "/{}".format(ipaddr))
|
||||
url = data.replace("/node1", "/{}".format(IPADDR))
|
||||
# This should probably be adapted to run on different OS
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
|
||||
@@ -26,3 +26,23 @@
|
||||
```open https://github.com/jpetazzo/container.training/tree/master/slides/common/about-slides.md```
|
||||
]
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Extra details
|
||||
|
||||
- This slide has a little magnifying glass in the top left corner
|
||||
|
||||
- This magnifiying glass indicates slides that provide extra details
|
||||
|
||||
- Feel free to skip them if:
|
||||
|
||||
- you are in a hurry
|
||||
|
||||
- you are new to this and want to avoid cognitive overload
|
||||
|
||||
- you want only the most essential information
|
||||
|
||||
- You can review these slides another time if you want, they'll be waiting for you ☺
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
- Imperative:
|
||||
|
||||
*Boil some water. Pour it in a teapot. Add tea leaves. Steep for a while. Serve in cup.*
|
||||
*Boil some water. Pour it in a teapot. Add tea leaves. Steep for a while. Serve in a cup.*
|
||||
|
||||
--
|
||||
|
||||
|
||||
@@ -20,26 +20,6 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Extra details
|
||||
|
||||
- This slide has a little magnifying glass in the top left corner
|
||||
|
||||
- This magnifiying glass indicates slides that provide extra details
|
||||
|
||||
- Feel free to skip them if:
|
||||
|
||||
- you are in a hurry
|
||||
|
||||
- you are new to this and want to avoid cognitive overload
|
||||
|
||||
- you want only the most essential information
|
||||
|
||||
- You can review these slides another time if you want, they'll be waiting for you ☺
|
||||
|
||||
---
|
||||
|
||||
class: title
|
||||
|
||||
*Tell me and I forget.*
|
||||
@@ -145,7 +125,50 @@ class: in-person
|
||||
works pretty well
|
||||
|
||||
- Nice-to-have: [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets
|
||||
<br/>(available with `(apt|yum|brew) install mosh`; then connect with `mosh user@host`)
|
||||
|
||||
---
|
||||
|
||||
class: in-person, extra-details
|
||||
|
||||
## What is this Mosh thing?
|
||||
|
||||
*You don't have to use Mosh or even know about it to follow along.
|
||||
<br/>
|
||||
We're just telling you about it because some of us think it's cool!*
|
||||
|
||||
- Mosh is "the mobile shell"
|
||||
|
||||
- It is essentially SSH over UDP, with roaming features
|
||||
|
||||
- It retransmits packets quickly, so it works great even on lossy connections
|
||||
|
||||
(Like hotel or conference WiFi)
|
||||
|
||||
- It has intelligent local echo, so it works great even in high-latency connections
|
||||
|
||||
(Like hotel or conference WiFi)
|
||||
|
||||
- It supports transparent roaming when your client IP address changes
|
||||
|
||||
(Like when you hop from hotel to conference WiFi)
|
||||
|
||||
---
|
||||
|
||||
class: in-person, extra-details
|
||||
|
||||
## Using Mosh
|
||||
|
||||
- To install it: `(apt|yum|brew) install mosh`
|
||||
|
||||
- It has been pre-installed on the VMs that we are using
|
||||
|
||||
- To connect to a remote machine: `mosh user@host`
|
||||
|
||||
(It is going to establish an SSH connection, then hand off to UDP)
|
||||
|
||||
- It requires UDP ports to be open
|
||||
|
||||
(By default, it uses a UDP port between 60000 and 61000)
|
||||
|
||||
---
|
||||
|
||||
@@ -155,18 +178,18 @@ class: in-person
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with SSH or MOSH
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(awk '/node/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no node$N true
|
||||
for N in $(awk '/\Wnode/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no $N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
if which kubectl; then
|
||||
kubectl get all -o name | grep -v services/kubernetes | xargs -n1 kubectl delete
|
||||
kubectl get all -o name | grep -v service/kubernetes | xargs -n1 kubectl delete
|
||||
fi
|
||||
```
|
||||
-->
|
||||
@@ -266,6 +289,14 @@ You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
|
||||
@@ -1,5 +1,60 @@
|
||||
# Our sample application
|
||||
|
||||
- We will clone the GitHub repository onto our `node1`
|
||||
|
||||
- The repository also contains scripts and tools that we will use through the workshop
|
||||
|
||||
.exercise[
|
||||
|
||||
<!--
|
||||
```bash
|
||||
if [ -d container.training ]; then
|
||||
mv container.training container.training.$$
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Clone the repository on `node1`:
|
||||
```bash
|
||||
git clone git://github.com/jpetazzo/container.training
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(You can also fork the repository on GitHub and clone your fork if you prefer that.)
|
||||
|
||||
---
|
||||
|
||||
## Downloading and running the application
|
||||
|
||||
Let's start this before we look around, as downloading will take a little time...
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `dockercoins` directory, in the cloned repo:
|
||||
```bash
|
||||
cd ~/container.training/dockercoins
|
||||
```
|
||||
|
||||
- Use Compose to build and run all containers:
|
||||
```bash
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
<!--
|
||||
```longwait units of work done```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
Compose tells Docker to build all container images (pulling
|
||||
the corresponding base images), then starts all containers,
|
||||
and displays aggregated logs.
|
||||
|
||||
---
|
||||
|
||||
## More detail on our sample application
|
||||
|
||||
- Visit the GitHub repository with all the materials of this workshop:
|
||||
<br/>https://github.com/jpetazzo/container.training
|
||||
|
||||
@@ -120,61 +175,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Getting the application source code
|
||||
|
||||
- We will clone the GitHub repository
|
||||
|
||||
- The repository also contains scripts and tools that we will use through the workshop
|
||||
|
||||
.exercise[
|
||||
|
||||
<!--
|
||||
```bash
|
||||
if [ -d container.training ]; then
|
||||
mv container.training container.training.$$
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Clone the repository on `node1`:
|
||||
```bash
|
||||
git clone git://github.com/jpetazzo/container.training
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(You can also fork the repository on GitHub and clone your fork if you prefer that.)
|
||||
|
||||
---
|
||||
|
||||
# Running the application
|
||||
|
||||
Without further ado, let's start our application.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `dockercoins` directory, in the cloned repo:
|
||||
```bash
|
||||
cd ~/container.training/dockercoins
|
||||
```
|
||||
|
||||
- Use Compose to build and run all containers:
|
||||
```bash
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
<!--
|
||||
```longwait units of work done```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
Compose tells Docker to build all container images (pulling
|
||||
the corresponding base images), then starts all containers,
|
||||
and displays aggregated logs.
|
||||
|
||||
---
|
||||
|
||||
## Our application at work
|
||||
|
||||
- On the left-hand side, the "rainbow strip" shows the container names
|
||||
@@ -299,5 +299,5 @@ class: extra-details
|
||||
|
||||
Some containers exit immediately, others take longer.
|
||||
|
||||
The containers that do not handle `SIGTERM` end up being killed after a 10s timeout.
|
||||
The containers that do not handle `SIGTERM` end up being killed after a 10s timeout. If we are very impatient, we can hit `^C` a second time!
|
||||
|
||||
|
||||
@@ -11,11 +11,9 @@ class: title, in-person
|
||||
@@TITLE@@<br/></br>
|
||||
|
||||
.footnote[
|
||||
**Be kind to the WiFi!**<br/>
|
||||
<!-- *Use the 5G network.* -->
|
||||
*Don't use your hotspot.*<br/>
|
||||
*Don't stream videos or download big files during the workshop.*<br/>
|
||||
*Thank you!*
|
||||
**WiFI: `ArtyLoft`** ou **`ArtyLoft 5 GHz`**
|
||||
<br/>
|
||||
**Mot de passe: `TFLEVENT5`**
|
||||
|
||||
**Slides: http://container.training/**
|
||||
]
|
||||
**Slides: http://avril2018.container.training/**
|
||||
]
|
||||
|
||||
10
slides/generate-chapter-sizes.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
INPUT=$1
|
||||
|
||||
{
|
||||
echo "# Front matter"
|
||||
cat "$INPUT"
|
||||
} |
|
||||
grep -e "^# " -e ^---$ | uniq -c |
|
||||
sed "s/^ *//" | sed s/---// |
|
||||
paste -d "\t" - -
|
||||
BIN
slides/images/binpacking-1d-1.gif
Normal file
|
After Width: | Height: | Size: 9.4 KiB |
BIN
slides/images/binpacking-1d-2.gif
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
BIN
slides/images/binpacking-2d.gif
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
slides/images/binpacking-3d.gif
Normal file
|
After Width: | Height: | Size: 15 KiB |
BIN
slides/images/conductor.jpg
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
slides/images/demo.jpg
Normal file
|
After Width: | Height: | Size: 178 KiB |
213
slides/images/docker-con-15-logo.svg
Normal file
@@ -0,0 +1,213 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 18.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 445 390" enable-background="new 0 0 445 390" xml:space="preserve">
|
||||
<g>
|
||||
<path fill="#3A4D54" d="M158.8,352.2h-25.9c3.2,0,5.8-2.6,5.8-5.9s-2.6-5.9-5.8-5.9h-19c3.2,0,5.8-2.6,5.8-5.9s-2.6-5.9-5.8-5.9
|
||||
h25.3c3.2,0,5.8-2.6,5.8-5.9c0-3.2-2.6-5.9-5.8-5.9h-15.9c3.2,0,5.8-2.6,5.8-5.9s-2.6-5.9-5.8-5.9h6.8c3.2,0,5.8-2.6,5.8-5.9
|
||||
c0-3.2-2.6-5.9-5.8-5.9H64.9c-0.1,0-0.3,0-0.4,0c3,0.2,5.4,2.7,5.4,5.9c0,3.1-2.4,5.7-5.4,5.9c0.1,0,0.3,0,0.4,0h-0.8h-6.1
|
||||
c-3.2,0-5.8,2.6-5.8,5.9s2.6,5.9,5.8,5.9H74h3.7c3.2,0,5.8,2.6,5.8,5.9c0,3.2-2.6,5.9-5.8,5.9H74H47.9c-3.2,0-5.8,2.6-5.8,5.9
|
||||
s2.6,5.9,5.8,5.9h44.8H93c0,0-0.1,0-0.1,0c3.1,0.1,5.6,2.7,5.6,5.9c0,3.2-2.5,5.8-5.6,5.9c0,0,0.1,0,0.1,0h-0.2
|
||||
c-3.2,0-5.8,2.6-5.8,5.9c0,3.2,2.6,5.9,5.8,5.9h66c3.2,0,5.8-2.6,5.8-5.9C164.6,354.8,162,352.2,158.8,352.2z"/>
|
||||
<circle fill="#FBBF45" stroke="#3A4D54" stroke-width="4" stroke-miterlimit="10" cx="214.6" cy="124.2" r="68.7"/>
|
||||
<circle fill="#3A4D54" cx="367.5" cy="335.5" r="5.9"/>
|
||||
<g>
|
||||
<polygon fill="#E8593A" stroke="#3A4D54" stroke-width="4" stroke-miterlimit="10" points="116.1,199.1 116.1,214.6 302.9,214.5
|
||||
302.9,199.1 "/>
|
||||
<rect x="159.4" y="78.6" fill="#3A4D54" width="4.2" height="50.4"/>
|
||||
<rect x="174.5" y="93.8" fill="#3A4D54" width="4.2" height="35.1"/>
|
||||
<rect x="280.2" y="108.2" fill="#3A4D54" width="4.2" height="20.8"/>
|
||||
<rect x="190.2" y="106.9" fill="#3A4D54" width="4.2" height="22"/>
|
||||
<rect x="143.3" y="59.8" fill="#3A4D54" width="4.2" height="69.1"/>
|
||||
<path fill="#3A4D54" d="M294.3,107.9c3.5-2.3,6.9-4.8,10.4-7.4V87.7c-5.2,4.3-10.6,8.2-15.9,11.6c-7.8,4.9-15.1,8.5-22.4,11
|
||||
c-7.9,2.8-15.7,4.3-23.4,4.7c-7.6,0.3-15.3-0.5-22.8-2.6c-6.9-1.9-13.7-4.7-20.4-8.6C188.8,97.5,178.4,89,168,77.6
|
||||
c-7.7-8.4-14.7-17.7-21.6-28.2c-5-7.8-9.6-15.8-13.6-23.9c-4-8.1-6.1-13.5-6.9-16c-0.7-1.8-1-3.1-1.2-3.8l0-0.1l0.1-2.7l-0.5,0
|
||||
l0-0.1H123l-8.1-0.6l-3.1-0.1l-0.1,3.4l0,0.4c0,1.2,0.2,1.9,0.3,2.5l0,0.1c0.3,1.4,0.9,3.2,1.7,5.3c1.2,3.4,3.6,9.1,7.7,17.2
|
||||
c4.3,8.4,9.2,16.8,14.6,25c7.3,11.1,14.9,20.8,23.2,29.6c11.4,12.1,22.9,21.3,35.1,28.1c7.6,4.2,15.4,7.4,23.2,9.4
|
||||
c7,1.8,14.2,2.7,21.4,2.7c0,0,0,0,0,0c1.6,0,3.2,0,4.7-0.1c8.7-0.5,17.6-2.4,26.4-5.6 M141.1,52.8c-5.2-7.9-10-16.1-14.2-24.4
|
||||
c-4-7.9-6.3-13.4-7.5-16.6c-0.5-1.3-0.8-2.4-1.1-3.3l1,0.1c0.3,0.9,0.6,1.9,1,2.9c1.6,4.5,4.2,10.4,7.2,16.6
|
||||
c4.1,8.3,8.8,16.5,13.9,24.5c5.5,8.5,11.1,16.2,17.1,23.3C152.4,68.9,146.7,61.3,141.1,52.8z"/>
|
||||
<path fill="#E8593A" stroke="#3A4D54" stroke-width="4" stroke-miterlimit="10" d="M340.9,53h-7.9h-4.3v8.2h-19.4V53h-4.3h-7.9
|
||||
h-4.3v8.2v2.7v186.7c0,0.8,0.6,1.4,1.3,1.4h3h42.4h4.3c0.7,0,1.3-0.6,1.3-1.4V62v-0.8V53H340.9z M334.8,206.6h-31.5V152
|
||||
c0-0.4,0.3-0.7,0.6-0.7h30.2c0.4,0,0.6,0.3,0.6,0.7V206.6z M334.8,142.1h-31.5V125c0-0.4,0.3-0.7,0.6-0.7h30.2
|
||||
c0.4,0,0.6,0.3,0.6,0.7V142.1z M334.8,115.1h-31.5V97.9c0-0.4,0.3-0.7,0.6-0.7h30.2c0.4,0,0.6,0.3,0.6,0.7V115.1z M334.8,88h-31.5
|
||||
V70.9c0-0.4,0.3-0.7,0.6-0.7h30.2c0.4,0,0.6,0.3,0.6,0.7V88z"/>
|
||||
<polygon fill="#E8593A" points="272.2,203 286.7,201.1 297.2,201.1 297.2,214.6 271.7,214.6 "/>
|
||||
<path fill="#E8593A" d="M298.7,96.2c-2.7,2-5.5,3.9-8.3,5.7c-7.3,4.6-15,8.5-23,11.3c-7.9,2.8-16.1,4.5-24.3,4.8
|
||||
c-8.1,0.4-16.1-0.6-23.7-2.7c-7.6-2-14.6-5.1-21.1-8.9c-13-7.5-23.7-17.1-32.6-26.8c-8.9-9.8-16-19.6-21.9-28.6
|
||||
c-5.8-9-10.3-17.3-13.7-24.2c-3.4-6.9-5.7-12.5-7.1-16.3c-0.7-1.9-1.1-3.3-1.3-4.2c-0.1-0.4-0.1-0.7-0.1-0.4l0,0.1
|
||||
c0,0,0-0.1,0-0.1c0-0.1,0-0.1,0-0.1c0-0.1,0-0.1,0-0.1l-7-0.5c0,0,0,0,0,0.1c0,0,0,0.1,0,0.1c0,0,0,0.1,0,0.1c0,0.1,0,0.2,0,0.3
|
||||
c0,0.9,0.1,1.4,0.3,2.1c0.3,1.3,0.8,2.9,1.6,5c1.5,4.1,4,9.8,7.6,16.9c3.6,7.1,8.3,15.5,14.4,24.7c6.1,9.2,13.5,19.2,22.9,29.2
|
||||
c9.3,9.9,20.5,19.8,34.3,27.5c6.9,3.8,14.4,7,22.5,9.1c8,2.1,16.6,3,25.2,2.5c8.6-0.5,17.3-2.4,25.5-5.4c8.3-3,16.2-7.2,23.7-12
|
||||
c2-1.3,4.1-2.7,6-4.2V96.2z"/>
|
||||
<path fill="#E8593A" stroke="#3A4D54" stroke-width="4" stroke-miterlimit="10" d="M122.9,4.2h-3.2h-6.6v11.7H66.1V4.2h-4.6h-6.2
|
||||
h-6.6v11.7v3.8v265.1c0,1.1,0.9,2,2,2h4.6h65.7h6.6c1.1,0,2-0.9,2-2V17v-1.1V4.2H122.9z M113.5,204.2H64.7v-59.4c0-0.6,0.4-1,1-1
|
||||
h46.7c0.6,0,1,0.4,1,1V204.2z M113.5,130.8H64.7v-24.3c0-0.6,0.4-1,1-1h46.7c0.6,0,1,0.4,1,1V130.8z M113.5,92.4H64.7V68.1
|
||||
c0-0.6,0.4-1,1-1h46.7c0.6,0,1,0.4,1,1V92.4z M113.5,54H64.7V29.7c0-0.6,0.4-1,1-1h46.7c0.6,0,1,0.4,1,1V54z"/>
|
||||
<g>
|
||||
<g>
|
||||
<path fill="#2BB8EB" stroke="#3A4D54" stroke-width="5" stroke-miterlimit="10" d="M435.8,132.9H364c-1.4,0-2.6,1.3-2.6,3v44.2
|
||||
c0,1.7,1.2,3,2.6,3h71.8c2.5,0,3.6-3.7,1.5-5.4l-11.4-13.5c-3.2-3.3-3.2-9,0-12.3l11.4-13.5
|
||||
C439.3,136.6,438.3,132.9,435.8,132.9z"/>
|
||||
<path fill="#FFFFFF" stroke="#3A4D54" stroke-width="5" stroke-miterlimit="10" d="M9.8,183.1h129.7c1.4,0,2.6-1.3,2.6-3v-44.2
|
||||
c0-1.7-1.2-3-2.6-3H9.8c-2.5,0-3.6,3.7-1.5,5.4l11.4,13.5c3.2,3.3,3.2,9,0,12.3L8.3,177.7C6.2,179.4,7.3,183.1,9.8,183.1z"/>
|
||||
<path fill="#FFFFFF" stroke="#3A4E55" stroke-width="5" stroke-miterlimit="10" d="M402.5,190H42.1c-3.6,0-6.5-1.1-6.5-4.6
|
||||
v-54.7c0-3.6,2.9-6.5,6.5-6.5h360.4c3.6,0,6.5,2.9,6.5,6.5v52.9C409,187.1,406.1,190,402.5,190z"/>
|
||||
<path fill="#2BB8EB" d="M402.5,124.2h-46.3V190h46.3c3.6,0,6.5-2.9,6.5-6.5v-52.9C409,127.1,406.1,124.2,402.5,124.2z"/>
|
||||
<g>
|
||||
<path fill="#FFFFFF" d="M376.2,144.3v21.3c0,1.1-0.9,2-2,2c-1.1,0-2-0.9-2-2v-17.8l-1.4,0.8c-0.3,0.2-0.7,0.3-1,0.3
|
||||
c-0.7,0-1.3-0.4-1.7-1c-0.6-0.9-0.3-2.2,0.7-2.7l4.4-2.6c0,0,0.1,0,0.1-0.1c0.1,0,0.1-0.1,0.2-0.1c0.1,0,0.1,0,0.2,0
|
||||
c0,0,0.1,0,0.1,0c0.1,0,0.2,0,0.3,0c0,0,0.1,0,0.1,0h0c0.1,0,0.2,0,0.3,0c0,0,0.1,0,0.1,0c0.1,0,0.1,0,0.2,0.1c0,0,0.1,0,0.1,0
|
||||
c0.1,0.1,0.1,0.1,0.2,0.1c0,0,0.1,0.1,0.1,0.1c0,0,0.1,0.1,0.1,0.1c0.1,0,0.1,0.1,0.1,0.1c0,0,0.1,0.1,0.1,0.1
|
||||
c0,0,0.1,0.1,0.1,0.1l0,0.1c0,0,0,0.1,0,0.1c0,0.1,0.1,0.1,0.1,0.2c0,0.1,0,0.1,0.1,0.2c0,0.1,0,0.1,0,0.2c0,0.1,0,0.2,0.1,0.3
|
||||
C376.2,144.3,376.2,144.3,376.2,144.3z"/>
|
||||
<path fill="#FFFFFF" d="M393.4,152.3c1.8,1.7,2.6,4.1,2.6,6.4c0,2.3-0.9,4.6-2.6,6.3c-1.7,1.8-4.1,2.6-6.3,2.6
|
||||
c-0.1,0-0.1,0-0.1,0c-2.2,0-4.6-0.9-6.3-2.6c-0.8-0.8-0.8-2.1,0-2.9c0.8-0.8,2.1-0.8,2.9,0c0.9,1,2.2,1.4,3.5,1.4
|
||||
c1.2,0,2.5-0.5,3.4-1.4c0.9-0.9,1.4-2.2,1.4-3.4c0-1.3-0.5-2.5-1.4-3.5c-0.9-1-2.2-1.4-3.4-1.4c-1.2,0-2.5,0.4-3.5,1.4
|
||||
c-0.8,0.8-2.1,0.8-2.9,0c-0.1-0.1-0.3-0.3-0.4-0.5c0-0.1,0-0.1,0-0.1c0-0.1,0-0.1-0.1-0.2c0-0.1,0-0.2,0-0.3c0,0,0,0,0-0.1
|
||||
c0-0.2,0-0.4,0-0.6l1.1-9.4c0.1-0.6,0.4-1.1,0.9-1.4c0.1,0,0.1,0,0.1-0.1c0,0,0.1,0,0.1-0.1c0.3-0.1,0.6-0.2,0.9-0.2h9.2
|
||||
c1.2,0,2.1,0.9,2.1,2.1c0,1.1-0.9,2-2.1,2h-7.4l-0.4,3.6c0.8-0.2,1.6-0.3,2.4-0.3C389.4,149.7,391.7,150.6,393.4,152.3z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill="#3A4D54" d="M157.8,142.1L157.8,142.1l-0.9,0c-0.7,0-2.6,2-3,2.5c-1.7,1.7-3.5,3.4-5.2,5.1v-13.7
|
||||
c0-1.2-0.8-2.2-2-2.2h-0.3c-1.3,0-2,1-2,2.2v29.9c0,1.2,0.8,2.2,2,2.2h0.3c1.3,0,2-1,2-2.2v-5.3l3.4,3.3c1,1,2,2,3,3
|
||||
c0.5,0.5,1.3,1.3,2.1,1.3h0.4c1.1,0,1.8-0.8,2-1.8l0-0.1v-0.5c0-0.4-0.1-0.7-0.3-1c-0.2-0.3-0.5-0.6-0.7-0.8
|
||||
c-0.6-0.7-1.2-1.3-1.9-1.9c-2.3-2.3-4.6-4.6-6.9-6.9l5.3-5.4c1-1.1,2.1-2.1,3.1-3.2c0.5-0.5,1.3-1.4,1.3-2.1V144
|
||||
C159.6,142.9,158.9,142.3,157.8,142.1z"/>
|
||||
<path fill="#3A4D54" d="M138.9,143.9l-0.2-0.1c-1.9-1.3-4.1-2-6.5-2h-0.9c-2.2,0-4.3,0.6-6.2,1.7c-4.1,2.4-6.5,6.2-6.5,11v0.9
|
||||
c0,1.1,0.1,2.2,0.5,3.3c1.9,6.3,6.8,9.9,13.4,9.5c1.9-0.1,6.8-0.7,6.8-3.4v-0.4c0-1.1-0.8-1.7-1.8-1.9l-0.1,0h-0.8l-0.2,0.1
|
||||
c-1.1,0.5-2.7,1.2-3.9,1.2c-1.3,0-2.9-0.1-4.2-0.7c-3.4-1.6-5.4-4.3-5.4-8c0-1.2,0.2-2.4,0.8-3.6c1.6-3.3,4.2-5.3,7.9-5.2
|
||||
c0.7,0,2,0.1,2.6,0.4c0.6,0.3,2.1,1,2.7,1h0.3l0.1,0c1-0.2,1.9-0.8,1.9-1.9v-0.4c0-0.4-0.2-0.8-0.4-1.2L138.9,143.9z"/>
|
||||
<path fill="#3A4D54" d="M85.2,133.7h-0.4c-1.3,0-2,1-2,2.2v9.3c-2.3-2-5.1-3.3-8.3-3.3h-0.9c-2.2,0-4.3,0.6-6.2,1.7
|
||||
c-4.1,2.4-6.5,6.2-6.5,11v0.9c0,2.2,0.6,4.3,1.7,6.2c2.4,4.1,6.2,6.5,11,6.5h0.9c2.2,0,4.3-0.6,6.2-1.7c4.1-2.4,6.5-6.2,6.5-11
|
||||
v-19.6C87.2,134.6,86.5,133.7,85.2,133.7z M81.6,159.3c-1.7,2.9-4.2,4.5-7.6,4.5c-1.4,0-2.7-0.4-3.9-1c-3-1.7-4.7-4.3-4.7-7.7
|
||||
c0-1.2,0.2-2.4,0.8-3.6c1.6-3.3,4.3-5.2,8-5.2c1.8,0,3.4,0.5,4.9,1.6c2.4,1.7,3.8,4.1,3.8,7.1C82.8,156.5,82.4,158,81.6,159.3z
|
||||
"/>
|
||||
<path fill="#3A4D54" d="M103.1,141.9h-0.6c-2.2,0-4.3,0.6-6.2,1.7c-4.1,2.4-6.5,6.2-6.5,11v0.9c0,2.2,0.6,4.3,1.7,6.2
|
||||
c2.4,4.1,6.2,6.5,11,6.5h0.9c2.2,0,4.3-0.6,6.2-1.7c4.1-2.4,6.5-6.2,6.5-11v-0.9c0-2-0.5-4-1.5-5.8
|
||||
C112.1,144.4,108.2,141.9,103.1,141.9z M110.5,159.3c-1.7,2.8-4.2,4.5-7.5,4.5c-1.6,0-3-0.4-4.3-1.2c-2.8-1.7-4.5-4.2-4.5-7.6
|
||||
c0-1.2,0.2-2.4,0.8-3.6c1.6-3.3,4.3-5.2,8-5.2c1.7,0,3.3,0.5,4.7,1.4c2.6,1.7,4.1,4.1,4.1,7.2
|
||||
C111.7,156.5,111.3,158,110.5,159.3z"/>
|
||||
<path fill="#3A4D54" d="M186.4,148c-1.2-2.1-3-3.7-5.2-4.8c-4-2-8.3-2.2-12.2,0.1l-0.6,0.3c-1.6,0.9-3,2.1-4,3.6
|
||||
c-3,4.4-3.4,9.3-0.7,14l0.3,0.5c1.1,2,2.7,3.6,4.6,4.6c4.2,2.3,8.6,2.6,12.8,0.2l0.4-0.2c1.1-0.7,1.4-1.8,0.8-3
|
||||
c-0.2-0.5-0.7-0.8-1.2-1.1l-0.1-0.1l-0.1,0c-0.8-0.1-2.9,0.8-3.8,1.2c-1.6,0.3-3.5,0.4-5.1-0.2c2.9-2.5,5.8-5.1,8.8-7.6
|
||||
c1.3-1.1,2.7-2.4,4.1-3.5c1.2-0.9,2.3-2.2,1.4-3.8L186.4,148z M178.4,152.1c-3.3,2.8-6.5,5.6-9.8,8.4c-0.3-0.4-0.6-0.8-0.9-1.2
|
||||
c-0.7-1.2-1.1-2.5-1.1-3.9c-0.1-3.5,1.2-6.3,4.2-8.1c2.3-1.3,4.8-1.7,7.4-0.7c1.3,0.5,2.7,1.3,3.6,2.4
|
||||
C180.7,150.2,179.5,151.2,178.4,152.1z"/>
|
||||
<path fill="#3A4D54" d="M204.2,142.1h-0.4c-2.6,0-5,0.8-7.1,2.3c-3.5,2.5-5.6,6-5.6,10.4V166c0,1.2,0.8,2.2,2,2.2h0.3
|
||||
c1.3,0,2-1,2-2.2v-10.7c0-2.4,0.7-4.5,2.4-6.2c1.4-1.3,3.3-2.5,5.2-2.5c1.5,0,3.3-0.5,3.3-2.3
|
||||
C206.4,142.9,205.5,142.1,204.2,142.1z"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#2BB8EB" d="M281.3,146.6c-0.7-0.3-1.9-0.4-2.6-0.4
|
||||
c-3.7-0.1-6.4,1.9-7.9,5.2c-0.5,1.1-0.8,2.3-0.8,3.6c0,3.8,2,6.4,5.4,8c1.2,0.6,2.8,0.7,4.2,0.7c1.2,0,2.9-0.7,3.9-1.2l0.2-0.1
|
||||
h0.8l0.1,0c1,0.2,1.8,0.8,1.8,1.9v0.4c0,2.7-4.9,3.3-6.8,3.4c-6.6,0.5-11.6-3.2-13.4-9.5c-0.3-1.1-0.5-2.2-0.5-3.3v-0.9
|
||||
c0-4.8,2.4-8.6,6.5-11c1.9-1.1,4-1.7,6.2-1.7h0.9c2.4,0,4.5,0.7,6.5,2l0.2,0.1l0.1,0.2c0.2,0.3,0.4,0.7,0.4,1.2v0.4
|
||||
c0,1.1-0.8,1.7-1.9,1.9l-0.1,0H284C283.4,147.6,281.9,146.9,281.3,146.6z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#2BB8EB" d="M301.3,141.9h0.6c5.1,0,9,2.5,11.5,6.9c1,1.8,1.5,3.7,1.5,5.8
|
||||
v0.9c0,4.8-2.4,8.6-6.5,11c-1.9,1.1-4,1.7-6.2,1.7h-0.9c-4.8,0-8.6-2.4-11-6.5c-1.1-1.9-1.7-4-1.7-6.2v-0.9
|
||||
c0-4.8,2.4-8.6,6.5-11C297,142.4,299.1,141.9,301.3,141.9z M293,155c0,3.4,1.6,5.8,4.5,7.6c1.3,0.8,2.8,1.2,4.3,1.2
|
||||
c3.3,0,5.8-1.7,7.5-4.5c0.8-1.3,1.2-2.8,1.2-4.4c0-3.1-1.5-5.5-4.1-7.2c-1.4-0.9-3-1.4-4.7-1.4c-3.7,0-6.4,1.9-8,5.2
|
||||
C293.3,152.6,293,153.8,293,155z"/>
|
||||
<path fill="#2BB8EB" d="M344,148.8c-2.5-4.5-6.4-6.9-11.5-6.9h-0.6c-2.2,0-4.3,0.6-6.2,1.7c-4.1,2.4-6.5,6.2-6.5,11v0.3v11
|
||||
c0,1.2,0.8,2.2,2,2.2h0.3c1.3,0,2-1,2-2.2v-11h0c0-1.2,0.3-2.4,0.8-3.5c1.6-3.3,4.3-5.2,8-5.2c1.7,0,3.3,0.5,4.7,1.4
|
||||
c2.6,1.7,4.1,4.1,4.1,7.2v11c0,1.2,0.8,2.2,2,2.2h0.3c1.3,0,2-1,2-2.2v-11v-0.3C345.5,152.6,345,150.6,344,148.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
<path fill="none" stroke="#3A4D54" stroke-width="5" stroke-miterlimit="10" d="M402.5,190H42.1c-3.6,0-6.5-2.9-6.5-6.5v-52.9
|
||||
c0-3.6,2.9-6.5,6.5-6.5h360.4c3.6,0,6.5,2.9,6.5,6.5v52.9C409,187.1,406.1,190,402.5,190z"/>
|
||||
</g>
|
||||
<polygon fill="#E8593A" points="147.8,203 133.3,201.1 122.8,201.1 122.8,214.6 148.3,214.6 "/>
|
||||
<rect x="353.6" y="124.2" fill="#3A4D54" width="5.1" height="55.2"/>
|
||||
</g>
|
||||
<g>
|
||||
<path fill="#3A4D54" d="M91.8,293.4H20.2c-3.2,0-5.8-2.6-5.8-5.9s2.6-5.9,5.8-5.9h71.6c3.2,0,5.8,2.6,5.8,5.9S95,293.4,91.8,293.4
|
||||
z"/>
|
||||
</g>
|
||||
<path fill="#3A4D54" d="M428.9,282.7h-83c-3.2,0-5.8,2.6-5.8,5.9c0,3.2,2.6,5.9,5.8,5.9h-54.7c-3.2,0-5.8,2.6-5.8,5.9
|
||||
c0,3.2,2.6,5.9,5.8,5.9H308c-3.2,0-5.8,2.6-5.8,5.9c0,3.2,2.6,5.9,5.8,5.9h-28.9c-3.2,0-5.8,2.6-5.8,5.9c0,3.2,2.6,5.9,5.8,5.9H262
|
||||
c-3.2,0-5.8,2.6-5.8,5.9s2.6,5.9,5.8,5.9h13.7c-3.2,0-5.8,2.6-5.8,5.9s2.6,5.9,5.8,5.9h-37.8c-3.2,0-5.8,2.6-5.8,5.9
|
||||
c0,3,2.2,5.5,5.1,5.8h-48.8c-0.9-0.6-2-1-3.2-1h-47.1c3.2,0,5.8,2.6,5.8,5.9c0,3.2-2.6,5.9-5.8,5.9h-2.8c-3.2,0-5.8,2.9-5.8,6.4
|
||||
c0,3.5,2.6,6.4,5.8,6.4h58.5h7.5H286c3.2,0,5.8-2.6,5.8-5.9c0-3.2-2.6-5.9-5.8-5.9H286h-2.7c-3.2,0-5.8-2.6-5.8-5.9
|
||||
c0-3.2,2.6-5.9,5.8-5.9h66c0.2,0,0.4,0,0.6,0h6.7c3.2,0,5.8-2.6,5.8-5.9c0-3.2-2.6-5.9-5.8-5.9h-27.2c0,0,0,0,0,0h-0.7
|
||||
c-3.2,0-5.8-2.6-5.8-5.9c0-3.2,2.6-5.9,5.8-5.9h0.7h14.1c3.2,0,5.8-2.6,5.8-5.9s-2.6-5.9-5.8-5.9h0.2c-3.2,0-5.8-2.6-5.8-5.9
|
||||
c0-3.2,2.6-5.9,5.8-5.9h0.7h28.9c3.2,0,5.8-2.6,5.8-5.9c0-3.2-2.6-5.9-5.8-5.9h-16.1h-0.8c0.1,0,0.3,0,0.4,0
|
||||
c-3-0.2-5.4-2.7-5.4-5.9c0-3.1,2.4-5.7,5.4-5.9c-0.1,0-0.3,0-0.4,0h0.8h65.2h6.5c3.2,0,5.8-2.6,5.8-5.9
|
||||
C434.6,285.3,432.1,282.7,428.9,282.7z"/>
|
||||
<g>
|
||||
<path id="outline_3_" fill-rule="evenodd" clip-rule="evenodd" fill="#3A4D54" d="M258,210.8h37v37.8h18.7
|
||||
c8.6,0,17.5-1.5,25.7-4.3c4-1.4,8.5-3.3,12.5-5.6c-5.2-6.8-7.9-15.4-8.7-23.9c-1.1-11.5,1.3-26.5,9.1-35.6l3.9-4.5l4.6,3.7
|
||||
c11.7,9.4,21.5,22.5,23.2,37.4c14-4.1,30.5-3.2,42.9,4l5.1,2.9l-2.7,5.2c-10.5,20.4-32.3,26.7-53.7,25.6
|
||||
C343.5,333.3,273.8,371,189.4,371c-43.6,0-83.7-16.3-106.5-55l-0.4-0.6l-3.3-6.8c-7.7-17-10.3-35.7-8.5-54.4l0.5-5.6h31.6v-37.8
|
||||
h37v-37h73.9v-37H258V210.8z"/>
|
||||
<g id="body_colors_3_">
|
||||
<path fill="#08AADA" d="M377.8,224.8c2.5-19.3-11.9-34.4-20.9-41.6c-10.3,11.9-11.9,43.1,4.3,56.3c-9,8-28,15.3-47.5,15.3H76.8
|
||||
c-1.9,20.3,1.7,39,9.8,55l2.7,4.9c1.7,2.9,3.6,5.7,5.6,8.4h0c9.7,0.6,18.7,0.8,26.9,0.7c0,0,0,0,0,0c16.1-0.4,29.3-2.3,39.3-5.7
|
||||
c1.5-0.5,3.1,0.3,3.6,1.8c0.5,1.5-0.3,3.1-1.8,3.6c-1.3,0.5-2.7,0.9-4.1,1.3c0,0,0,0,0,0c-7.9,2.2-16.3,3.8-27.2,4.4
|
||||
c0.6,0-0.7,0.1-0.7,0.1c-0.4,0-0.8,0.1-1.2,0.1c-4.3,0.2-8.9,0.3-13.6,0.3c-5.2,0-10.3-0.1-15.9-0.4l-0.1,0.1
|
||||
c19.7,22.2,50.6,35.5,89.3,35.5c81.9,0,151.3-36.3,182.1-117.8c21.8,2.2,42.8-3.3,52.3-21.9C408.6,216.4,389,219.2,377.8,224.8z"
|
||||
/>
|
||||
<path fill="#2BB8EB" d="M377.8,224.8c2.5-19.3-11.9-34.4-20.9-41.6c-10.3,11.9-11.9,43.1,4.3,56.3c-9,8-28,15.3-47.5,15.3H90.8
|
||||
c-1,31.1,10.6,54.7,31,69c0,0,0,0,0,0c16.1-0.4,29.3-2.3,39.3-5.7c1.5-0.5,3.1,0.3,3.6,1.8c0.5,1.5-0.3,3.1-1.8,3.6
|
||||
c-1.3,0.5-2.7,0.9-4.1,1.3c0,0,0,0,0,0c-7.9,2.2-17,3.9-27.9,4.6c0,0-0.3-0.3-0.3-0.3c27.9,14.3,68.3,14.2,114.6-3.6
|
||||
c51.9-20,100.3-58,134-101.5C378.8,224.3,378.3,224.6,377.8,224.8z"/>
|
||||
<path fill="#088CB9" d="M76.6,279.5c1.5,10.9,4.7,21.1,9.4,30.4l2.7,4.9c1.7,2.9,3.6,5.7,5.6,8.4c9.7,0.6,18.7,0.8,26.9,0.7
|
||||
c16.1-0.4,29.3-2.3,39.3-5.7c1.5-0.5,3.1,0.3,3.6,1.8c0.5,1.5-0.3,3.1-1.8,3.6c-1.3,0.5-2.7,0.9-4.1,1.3c0,0,0,0,0,0
|
||||
c-7.9,2.2-17,3.9-27.8,4.5c-0.4,0-1,0-1.4,0c-4.3,0.2-8.9,0.4-13.6,0.4c-5.2,0-10.4-0.1-16.1-0.4c19.7,22.2,50.8,35.5,89.5,35.5
|
||||
c70.1,0,131.1-26.6,166.5-85.4H76.6z"/>
|
||||
<path fill="#069BC6" d="M92.9,279.5c4.2,19.1,14.3,34.1,28.9,44.3c16.1-0.4,29.3-2.3,39.3-5.7c1.5-0.5,3.1,0.3,3.6,1.8
|
||||
c0.5,1.5-0.3,3.1-1.8,3.6c-1.3,0.5-2.7,0.9-4.1,1.3c0,0,0,0,0,0c-7.9,2.2-17.2,3.9-28,4.5c27.9,14.3,68.2,14.1,114.5-3.7
|
||||
c28-10.8,55-26.8,79.2-46.1H92.9z"/>
|
||||
</g>
|
||||
<g id="Containers_3_">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#00ACD2" d="M135.8,219.7h2.5v26.7h-2.5V219.7z M130.9,219.7h2.6v26.7h-2.6
|
||||
V219.7z M126.1,219.7h2.6v26.7h-2.6V219.7z M121.2,219.7h2.6v26.7h-2.6V219.7z M116.3,219.7h2.6v26.7h-2.6V219.7z M111.6,219.7
|
||||
h2.5v26.7h-2.5V219.7z M108.9,217h32v32h-32V217z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#00ACD2" d="M172.7,182.7h2.5v26.7h-2.5V182.7z M167.9,182.7h2.6v26.7h-2.6
|
||||
V182.7z M163,182.7h2.6v26.7H163V182.7z M158.2,182.7h2.6v26.7h-2.6V182.7z M153.3,182.7h2.6v26.7h-2.6V182.7z M148.6,182.7h2.5
|
||||
v26.7h-2.5V182.7z M145.9,180h32v32h-32V180z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#26C2EE" d="M172.7,219.7h2.5v26.7h-2.5V219.7z M167.9,219.7h2.6v26.7h-2.6
|
||||
V219.7z M163,219.7h2.6v26.7H163V219.7z M158.2,219.7h2.6v26.7h-2.6V219.7z M153.3,219.7h2.6v26.7h-2.6V219.7z M148.6,219.7h2.5
|
||||
v26.7h-2.5V219.7z M145.9,217h32v32h-32V217z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#00ACD2" d="M209.7,219.7h2.5v26.7h-2.5V219.7z M204.8,219.7h2.6v26.7h-2.6
|
||||
V219.7z M200,219.7h2.6v26.7H200V219.7z M195.1,219.7h2.6v26.7h-2.6V219.7z M190.3,219.7h2.6v26.7h-2.6V219.7z M185.5,219.7h2.5
|
||||
v26.7h-2.5V219.7z M182.9,217h32v32h-32V217z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#26C2EE" d="M209.7,182.7h2.5v26.7h-2.5V182.7z M204.8,182.7h2.6v26.7h-2.6
|
||||
V182.7z M200,182.7h2.6v26.7H200V182.7z M195.1,182.7h2.6v26.7h-2.6V182.7z M190.3,182.7h2.6v26.7h-2.6V182.7z M185.5,182.7h2.5
|
||||
v26.7h-2.5V182.7z M182.9,180h32v32h-32V180z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#26C2EE" d="M246.7,219.7h2.5v26.7h-2.5V219.7z M241.8,219.7h2.6v26.7h-2.6
|
||||
V219.7z M237,219.7h2.6v26.7H237V219.7z M232.1,219.7h2.6v26.7h-2.6V219.7z M227.3,219.7h2.6v26.7h-2.6V219.7z M222.5,219.7h2.5
|
||||
v26.7h-2.5V219.7z M219.8,217h32v32h-32V217z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#00ACD2" d="M246.7,182.7h2.5v26.7h-2.5V182.7z M241.8,182.7h2.6v26.7h-2.6
|
||||
V182.7z M237,182.7h2.6v26.7H237V182.7z M232.1,182.7h2.6v26.7h-2.6V182.7z M227.3,182.7h2.6v26.7h-2.6V182.7z M222.5,182.7h2.5
|
||||
v26.7h-2.5V182.7z M219.8,180h32v32h-32V180z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#26C2EE" d="M246.7,145.7h2.5v26.7h-2.5V145.7z M241.8,145.7h2.6v26.7h-2.6
|
||||
V145.7z M237,145.7h2.6v26.7H237V145.7z M232.1,145.7h2.6v26.7h-2.6V145.7z M227.3,145.7h2.6v26.7h-2.6V145.7z M222.5,145.7h2.5
|
||||
v26.7h-2.5V145.7z M219.8,143.1h32v32h-32V143.1z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#00ACD2" d="M283.6,219.7h2.5v26.7h-2.5V219.7z M278.8,219.7h2.6v26.7h-2.6
|
||||
V219.7z M273.9,219.7h2.6v26.7h-2.6V219.7z M269.1,219.7h2.6v26.7h-2.6V219.7z M264.2,219.7h2.6v26.7h-2.6V219.7z M259.5,219.7
|
||||
h2.5v26.7h-2.5V219.7z M256.8,217h32v32h-32V217z"/>
|
||||
</g>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#D4EDF1" d="M175.9,301c4.9,0,8.8,4,8.8,8.8s-4,8.8-8.8,8.8
|
||||
c-4.9,0-8.8-4-8.8-8.8S171,301,175.9,301"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#3A4D54" d="M175.9,303.5c0.8,0,1.6,0.2,2.3,0.4c-0.8,0.4-1.3,1.3-1.3,2.2
|
||||
c0,1.4,1.2,2.6,2.6,2.6c1,0,1.8-0.5,2.3-1.3c0.3,0.7,0.5,1.6,0.5,2.4c0,3.5-2.8,6.3-6.3,6.3c-3.5,0-6.3-2.8-6.3-6.3
|
||||
C169.6,306.3,172.4,303.5,175.9,303.5"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#3A4D54" d="M19.6,282.7h193.6h23.9h190.5c0.4,0,1.6,0.1,1.2,0
|
||||
c-9.2-2.2-24.9-6.2-23.5-15.8c0.1-0.7-0.2-0.8-0.6-0.3c-16.6,17.5-54.1,12.2-64.3,3.2c-0.2-0.1-0.4-0.1-0.5,0.1
|
||||
c-11.5,15.4-73.3,9.7-79.3-2.3c-0.1-0.2-0.4-0.3-0.6-0.1c-14.1,15.7-55.7,15.7-69.8,0c-0.2-0.2-0.5-0.1-0.6,0.1
|
||||
c-6,12-67.8,17.7-79.3,2.3c-0.1-0.2-0.3-0.2-0.5-0.1c-10.1,8.9-44.5,14.3-61.2-3c-0.3-0.3-0.8-0.1-0.8,0.4
|
||||
C48.9,277.6,28.1,280.5,19.6,282.7"/>
|
||||
<path fill="#C0DBE0" d="M199.4,364.7c-21.9-10.4-33.9-24.5-40.6-39.9c-8.1,2.3-17.9,3.8-29.3,4.4c-4.3,0.2-8.8,0.4-13.5,0.4
|
||||
c-5.4,0-11.2-0.2-17.2-0.5c20.1,20.1,44.8,35.5,90.5,35.8C192.7,364.9,196.1,364.8,199.4,364.7z"/>
|
||||
<path fill="#D4EDF1" d="M167,339c-3-4.1-6-9.3-8.1-14.2c-8.1,2.3-17.9,3.8-29.3,4.4C137.4,333.4,148.5,337.4,167,339z"/>
|
||||
</g>
|
||||
<circle fill="#3A4D54" cx="34.8" cy="311" r="5.9"/>
|
||||
<path fill="#3A4D54" d="M346.8,297.2l-1-2.8c0,0,5.3-11.7-7.4-11.7c-12.7,0,3.5-4.7,3.5-4.7l21.8,2.8l9.6,6.8l-16.1,4.1
|
||||
L346.8,297.2z"/>
|
||||
<path fill="#3A4D54" d="M78.7,297.2l1-2.8c0,0-5.3-11.7,7.4-11.7s-3.5-4.7-3.5-4.7l-21.8,2.8l-9.6,6.8l16.1,4.1L78.7,297.2z"/>
|
||||
<path fill="#3A4D54" d="M361.7,279.5v4.4l15.6,6.7l45.5-4.1l7.3-3.7c0,0-3.8-0.6-7.3-1.7c-3.6-1.1-15.2-1.6-15.2-1.6h-28.3
|
||||
l-13.6,1.8L361.7,279.5z"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 20 KiB |
BIN
slides/images/docker-ecosystem-2015.png
Normal file
|
After Width: | Height: | Size: 1.0 MiB |
2597
slides/images/docker-engine-architecture.svg
Normal file
|
After Width: | Height: | Size: 183 KiB |
BIN
slides/images/dockerd-and-containerd.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
slides/images/fu-face.jpg
Normal file
|
After Width: | Height: | Size: 150 KiB |
BIN
slides/images/getting-inside.png
Normal file
|
After Width: | Height: | Size: 301 KiB |
BIN
slides/images/tangram.gif
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
slides/images/tesla.jpg
Normal file
|
After Width: | Height: | Size: 484 KiB |
BIN
slides/images/tetris-1.png
Normal file
|
After Width: | Height: | Size: 8.8 KiB |
BIN
slides/images/tetris-2.gif
Normal file
|
After Width: | Height: | Size: 730 KiB |
BIN
slides/images/tetris-3.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
slides/images/traffic-graph.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
slides/images/trollface.png
Normal file
|
After Width: | Height: | Size: 2.9 KiB |
@@ -1,188 +1,29 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>Container Training</title>
|
||||
<style type="text/css">
|
||||
body {
|
||||
background-image: url("images/container-background.jpg");
|
||||
max-width: 1024px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
table {
|
||||
font-size: 20px;
|
||||
font-family: sans-serif;
|
||||
background: white;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
padding: 20px;
|
||||
}
|
||||
.header {
|
||||
font-size: 300%;
|
||||
font-weight: bold;
|
||||
}
|
||||
.title {
|
||||
font-size: 150%;
|
||||
font-weight: bold;
|
||||
}
|
||||
td {
|
||||
padding: 1px;
|
||||
height: 1em;
|
||||
}
|
||||
td.spacer {
|
||||
height: unset;
|
||||
}
|
||||
td.footer {
|
||||
padding-top: 80px;
|
||||
height: 100px;
|
||||
}
|
||||
td.title {
|
||||
border-bottom: thick solid black;
|
||||
padding-bottom: 2px;
|
||||
padding-top: 20px;
|
||||
}
|
||||
a {
|
||||
text-decoration: none;
|
||||
}
|
||||
a:hover {
|
||||
background: yellow;
|
||||
}
|
||||
a.attend:after {
|
||||
content: "📅 attend";
|
||||
}
|
||||
a.slides:after {
|
||||
content: "📚 slides";
|
||||
}
|
||||
a.chat:after {
|
||||
content: "💬 chat";
|
||||
}
|
||||
a.video:after {
|
||||
content: "📺 video";
|
||||
}
|
||||
</style>
|
||||
<link rel="stylesheet" type="text/css" href="theme.css">
|
||||
<title>Formation/workshop containers, orchestration, et Kubernetes à Paris en avril</title>
|
||||
</head>
|
||||
<body>
|
||||
<div class="main">
|
||||
<table>
|
||||
<tr><td class="header" colspan="4">Container Training</td></tr>
|
||||
|
||||
<tr><td class="title" colspan="4">Coming soon near you</td></tr>
|
||||
|
||||
<!--
|
||||
<td>Nothing for now (stay tuned...)</td>
|
||||
thing for now (stay tuned...)</td>
|
||||
-->
|
||||
|
||||
<tr>
|
||||
<td>March 27, 2018: SREcon Americas — Kubernetes 101</td>
|
||||
<td><a class="slides" href="http://srecon2018.container.training/" /></td>
|
||||
<td><a class="attend" href="https://www.usenix.org/conference/srecon18americas/presentation/kromhout" />
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>April 11-12, 2018: Introduction aux conteneurs (in French)</td>
|
||||
<td> </td>
|
||||
<td><a class="attend" href="http://paris.container.training/intro.html" />
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>April 13, 2018: Introduction à l'orchestration (in French)</td>
|
||||
<td> </td>
|
||||
<td><a class="attend" href="http://paris.container.training/kube.html" />
|
||||
</tr>
|
||||
|
||||
<tr><td class="title" colspan="4">Past workshops</td></tr>
|
||||
|
||||
<tr>
|
||||
<td>Boosterconf: Kubernetes 101</td>
|
||||
<td><a class="slides" href="http://boosterconf2018.container.training/" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<!-- February 22, 2018 -->
|
||||
<td>IndexConf: Kubernetes 101</td>
|
||||
<td><a class="slides" href="http://indexconf2018.container.training/" /></td>
|
||||
<!--
|
||||
<td><a class="attend" href="https://developer.ibm.com/indexconf/sessions/#!?id=5474" />
|
||||
-->
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Kubernetes enablement at Docker</td>
|
||||
<td><a class="slides" href="http://kube.container.training/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>QCON SF: Orchestrating Microservices with Docker Swarm</td>
|
||||
<td><a class="slides" href="http://qconsf2017swarm.container.training/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>QCON SF: Introduction to Docker and Containers</td>
|
||||
<td><a class="slides" href="http://qconsf2017intro.container.training/" /></td>
|
||||
<td><a class="video" href="https://www.youtube.com/playlist?list=PLBAFXs0YjviLgqTum8MkspG_8VzGl6C07" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>LISA17 M7: Getting Started with Docker and Containers</td>
|
||||
<td><a class="slides" href="http://lisa17m7.container.training/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>LISA17 T9: Build, Ship, and Run Microservices on a Docker Swarm Cluster</td>
|
||||
<td><a class="slides" href="http://lisa17t9.container.training/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Deploying and scaling microservices with Docker and Kubernetes</td>
|
||||
<td><a class="slides" href="http://osseu17.container.training/" /></td>
|
||||
<td><a class="video" href="https://www.youtube.com/playlist?list=PLBAFXs0YjviLrsyydCzxWrIP_1-wkcSHS" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>DockerCon Workshop: from Zero to Hero (full day, B3 M1-2)</td>
|
||||
<td><a class="slides" href="http://dc17eu.container.training/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>DockerCon Workshop: Orchestration for Advanced Users (afternoon, B4 M5-6)</td>
|
||||
<td><a class="slides" href="https://www.bretfisher.com/dockercon17eu/" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>LISA16 T1: Deploying and Scaling Applications with Docker Swarm</td>
|
||||
<td><a class="slides" href="http://lisa16t1.container.training/" /></td>
|
||||
<td><a class="video" href="https://www.youtube.com/playlist?list=PLBAFXs0YjviIDDhr8vIwCN1wkyNGXjbbc" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>PyCon2016: Introduction to Docker and containers</td>
|
||||
<td><a class="slides" href="https://us.pycon.org/2016/site_media/media/tutorial_handouts/DockerSlides.pdf" /></td>
|
||||
<td><a class="video" href="https://www.youtube.com/watch?v=ZVaRK10HBjo" /></td>
|
||||
</tr>
|
||||
|
||||
<tr><td class="title" colspan="4">Self-paced tutorials</td></tr>
|
||||
|
||||
<tr>
|
||||
<td>Introduction to Docker and Containers</td>
|
||||
<td><a class="slides" href="intro-fullday.yml.html" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Container Orchestration with Docker and Swarm</td>
|
||||
<td><a class="slides" href="swarm-selfpaced.yml.html" /></td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Deploying and Scaling Microservices with Docker and Kubernetes</td>
|
||||
<td><a class="slides" href="kube-halfday.yml.html" /></td>
|
||||
</tr>
|
||||
|
||||
<tr><td class="spacer"></td></tr>
|
||||
|
||||
<tr>
|
||||
<td class="footer">
|
||||
Maintained by Jérôme Petazzoni (<a href="https://twitter.com/jpetazzo">@jpetazzo</a>)
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div class="index">
|
||||
<div class="block">
|
||||
<h4>Introduction aux conteneurs</h4>
|
||||
<h5>De la pratique … aux bonnes pratiques</h5>
|
||||
<h6>(11-12 avril 2018)</h6>
|
||||
<p>
|
||||
<a href="intro.yml.html">SLIDES</a>
|
||||
<a href="https://gitter.im/jpetazzo/training-20180411-paris">CHATROOM</a>
|
||||
</p>
|
||||
</div>
|
||||
<div class="block">
|
||||
<h4>Introduction à l'orchestration</h4>
|
||||
<h5>Kubernetes par l'exemple</h5>
|
||||
<h6>(13 avril 2018)</h6>
|
||||
<p>
|
||||
<a href="kube.yml.html">SLIDES</a>
|
||||
<a href="https://gitter.im/jpetazzo/training-20180413-paris">CHATROOM</a>
|
||||
<a href="https://docs.google.com/spreadsheets/d/1KiuCVduTf3wf-4-vSmcK96I61WYdDP0BppkOx_XZcjM/edit?ts=5acfc2ef#gid=0">FOODMENU</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Docker and
|
||||
Containers
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-20180411-paris)"
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
@@ -16,14 +15,14 @@ chapters:
|
||||
- common/about-slides.md
|
||||
- common/toc.md
|
||||
- - intro/Docker_Overview.md
|
||||
#- intro/Docker_History.md
|
||||
- intro/Docker_History.md
|
||||
- intro/Training_Environment.md
|
||||
- intro/Installing_Docker.md
|
||||
- intro/First_Containers.md
|
||||
- - intro/First_Containers.md
|
||||
- intro/Background_Containers.md
|
||||
- intro/Start_And_Attach.md
|
||||
- - intro/Initial_Images.md
|
||||
- intro/Building_Images_Interactively.md
|
||||
- intro/Initial_Images.md
|
||||
- - intro/Building_Images_Interactively.md
|
||||
- intro/Building_Images_With_Dockerfiles.md
|
||||
- intro/Cmd_And_Entrypoint.md
|
||||
- intro/Copying_Files_During_Build.md
|
||||
@@ -31,6 +30,8 @@ chapters:
|
||||
- intro/Publishing_To_Docker_Hub.md
|
||||
- intro/Dockerfile_Tips.md
|
||||
- - intro/Naming_And_Inspecting.md
|
||||
- intro/Labels.md
|
||||
- intro/Getting_Inside.md
|
||||
- intro/Container_Networking_Basics.md
|
||||
- intro/Network_Drivers.md
|
||||
- intro/Container_Network_Model.md
|
||||
@@ -39,6 +40,17 @@ chapters:
|
||||
- - intro/Local_Development_Workflow.md
|
||||
- intro/Working_With_Volumes.md
|
||||
- intro/Compose_For_Dev_Stacks.md
|
||||
- intro/Docker_Machine.md
|
||||
- - intro/CI_Pipeline.md
|
||||
- intro/Advanced_Dockerfiles.md
|
||||
- intro/Application_Configuration.md
|
||||
- intro/Dockerfile_Samples.md
|
||||
- intro/Logging.md
|
||||
- - intro/Namespaces_Cgroups.md
|
||||
- intro/Copy_On_Write.md
|
||||
#- intro/Containers_From_Scratch.md
|
||||
- - intro/Container_Engines.md
|
||||
- intro/Ecosystem.md
|
||||
- intro/Orchestration_Overview.md
|
||||
- common/thankyou.md
|
||||
- intro/links.md
|
||||
|
||||
@@ -16,7 +16,7 @@ chapters:
|
||||
- common/about-slides.md
|
||||
- common/toc.md
|
||||
- - intro/Docker_Overview.md
|
||||
#- intro/Docker_History.md
|
||||
- intro/Docker_History.md
|
||||
- intro/Training_Environment.md
|
||||
- intro/Installing_Docker.md
|
||||
- intro/First_Containers.md
|
||||
@@ -31,6 +31,8 @@ chapters:
|
||||
- intro/Publishing_To_Docker_Hub.md
|
||||
- intro/Dockerfile_Tips.md
|
||||
- - intro/Naming_And_Inspecting.md
|
||||
- intro/Labels.md
|
||||
- intro/Getting_Inside.md
|
||||
- intro/Container_Networking_Basics.md
|
||||
- intro/Network_Drivers.md
|
||||
- intro/Container_Network_Model.md
|
||||
@@ -39,6 +41,15 @@ chapters:
|
||||
- - intro/Local_Development_Workflow.md
|
||||
- intro/Working_With_Volumes.md
|
||||
- intro/Compose_For_Dev_Stacks.md
|
||||
- intro/Docker_Machine.md
|
||||
- intro/Advanced_Dockerfiles.md
|
||||
- intro/Application_Configuration.md
|
||||
- intro/Logging.md
|
||||
- - intro/Namespaces_Cgroups.md
|
||||
- intro/Copy_On_Write.md
|
||||
#- intro/Containers_From_Scratch.md
|
||||
- intro/Container_Engines.md
|
||||
- intro/Ecosystem.md
|
||||
- intro/Orchestration_Overview.md
|
||||
- common/thankyou.md
|
||||
- intro/links.md
|
||||
|
||||
1
slides/intro.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
intro-fullday.yml
|
||||
201
slides/intro/Application_Configuration.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# Application Configuration
|
||||
|
||||
There are many ways to provide configuration to containerized applications.
|
||||
|
||||
There is no "best way" — it depends on factors like:
|
||||
|
||||
* configuration size,
|
||||
|
||||
* mandatory and optional parameters,
|
||||
|
||||
* scope of configuration (per container, per app, per customer, per site, etc),
|
||||
|
||||
* frequency of changes in the configuration.
|
||||
|
||||
---
|
||||
|
||||
## Command-line parameters
|
||||
|
||||
```bash
|
||||
docker run jpetazzo/hamba 80 www1:80 www2:80
|
||||
```
|
||||
|
||||
* Configuration is provided through command-line parameters.
|
||||
|
||||
* In the above example, the `ENTRYPOINT` is a script that will:
|
||||
|
||||
- parse the parameters,
|
||||
|
||||
- generate a configuration file,
|
||||
|
||||
- start the actual service.
|
||||
|
||||
---
|
||||
|
||||
## Command-line parameters pros and cons
|
||||
|
||||
* Appropriate for mandatory parameters (without which the service cannot start).
|
||||
|
||||
* Convenient for "toolbelt" services instanciated many times.
|
||||
|
||||
(Because there is no extra step: just run it!)
|
||||
|
||||
* Not great for dynamic configurations or bigger configurations.
|
||||
|
||||
(These things are still possible, but more cumbersome.)
|
||||
|
||||
---
|
||||
|
||||
## Environment variables
|
||||
|
||||
```bash
|
||||
docker run -e ELASTICSEARCH_URL=http://es42:9201/ kibana
|
||||
```
|
||||
|
||||
* Configuration is provided through environment variables.
|
||||
|
||||
* The environment variable can be used straight by the program,
|
||||
<br/>or by a script generating a configuration file.
|
||||
|
||||
---
|
||||
|
||||
## Environment variables pros and cons
|
||||
|
||||
* Appropriate for optional parameters (since the image can provide default values).
|
||||
|
||||
* Also convenient for services instanciated many times.
|
||||
|
||||
(It's as easy as command-line parameters.)
|
||||
|
||||
* Great for services with lots of parameters, but you only want to specify a few.
|
||||
|
||||
(And use default values for everything else.)
|
||||
|
||||
* Ability to introspect possible parameters and their default values.
|
||||
|
||||
* Not great for dynamic configurations.
|
||||
|
||||
---
|
||||
|
||||
## Baked-in configuration
|
||||
|
||||
```
|
||||
FROM prometheus
|
||||
COPY prometheus.conf /etc
|
||||
```
|
||||
|
||||
* The configuration is added to the image.
|
||||
|
||||
* The image may have a default configuration; the new configuration can:
|
||||
|
||||
- replace the default configuration,
|
||||
|
||||
- extend it (if the code can read multiple configuration files).
|
||||
|
||||
---
|
||||
|
||||
## Baked-in configuration pros and cons
|
||||
|
||||
* Allows arbitrary customization and complex configuration files.
|
||||
|
||||
* Requires to write a configuration file. (Obviously!)
|
||||
|
||||
* Requires to build an image to start the service.
|
||||
|
||||
* Requires to rebuild the image to reconfigure the service.
|
||||
|
||||
* Requires to rebuild the image to upgrade the service.
|
||||
|
||||
* Configured images can be stored in registries.
|
||||
|
||||
(Which is great, but requires a registry.)
|
||||
|
||||
---
|
||||
|
||||
## Configuration volume
|
||||
|
||||
```bash
|
||||
docker run -v appconfig:/etc/appconfig myapp
|
||||
```
|
||||
|
||||
* The configuration is stored in a volume.
|
||||
|
||||
* The volume is attached to the container.
|
||||
|
||||
* The image may have a default configuration.
|
||||
|
||||
(But this results in a less "obvious" setup, that needs more documentation.)
|
||||
|
||||
---
|
||||
|
||||
## Configuration volume pros and cons
|
||||
|
||||
* Allows arbitrary customization and complex configuration files.
|
||||
|
||||
* Requires to create a volume for each different configuration.
|
||||
|
||||
* Services with identical configurations can use the same volume.
|
||||
|
||||
* Doesn't require to build / rebuild an image when upgrading / reconfiguring.
|
||||
|
||||
* Configuration can be generated or edited through another container.
|
||||
|
||||
---
|
||||
|
||||
## Dynamic configuration volume
|
||||
|
||||
* This is a powerful pattern for dynamic, complex configurations.
|
||||
|
||||
* The configuration is stored in a volume.
|
||||
|
||||
* The configuration is generated / updated by a special container.
|
||||
|
||||
* The application container detects when the configuration is changed.
|
||||
|
||||
(And automatically reloads the configuration when necessary.)
|
||||
|
||||
* The configuration can be shared between multiple services if needed.
|
||||
|
||||
---
|
||||
|
||||
## Dynamic configuration volume example
|
||||
|
||||
In a first terminal, start a load balancer with an initial configuration:
|
||||
|
||||
```bash
|
||||
$ docker run --name loadbalancer jpetazzo/hamba \
|
||||
80 goo.gl:80
|
||||
```
|
||||
|
||||
In another terminal, reconfigure that load balancer:
|
||||
|
||||
```bash
|
||||
$ docker run --rm --volumes-from loadbalancer jpetazzo/hamba reconfigure \
|
||||
80 google.com:80
|
||||
```
|
||||
|
||||
The configuration could also be updated through e.g. a REST API.
|
||||
|
||||
(The REST API being itself served from another container.)
|
||||
|
||||
---
|
||||
|
||||
## Keeping secrets
|
||||
|
||||
.warning[Ideally, you should not put secrets (passwords, tokens...) in:]
|
||||
|
||||
* command-line or environment variables (anyone with Docker API access can get them),
|
||||
|
||||
* images, especially stored in a registry.
|
||||
|
||||
Secrets management is better handled with an orchestrator (like Swarm or Kubernetes).
|
||||
|
||||
Orchestrators will allow to pass secrets in a "one-way" manner.
|
||||
|
||||
Managing secrets securely without an orchestrator can be contrived.
|
||||
|
||||
E.g.:
|
||||
|
||||
- read the secret on stdin when the service starts,
|
||||
|
||||
- pass the secret using an API endpoint.
|
||||
@@ -93,20 +93,22 @@ The output of `docker build` looks like this:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker build -t figlet .
|
||||
Sending build context to Docker daemon 2.048 kB
|
||||
Sending build context to Docker daemon
|
||||
Step 0 : FROM ubuntu
|
||||
---> e54ca5efa2e9
|
||||
Step 1 : RUN apt-get update
|
||||
---> Running in 840cb3533193
|
||||
---> 7257c37726a1
|
||||
Removing intermediate container 840cb3533193
|
||||
Step 2 : RUN apt-get install figlet
|
||||
---> Running in 2b44df762a2f
|
||||
---> f9e8f1642759
|
||||
Removing intermediate container 2b44df762a2f
|
||||
Successfully built f9e8f1642759
|
||||
docker build -t figlet .
|
||||
Sending build context to Docker daemon 2.048kB
|
||||
Step 1/3 : FROM ubuntu
|
||||
---> f975c5035748
|
||||
Step 2/3 : RUN apt-get update
|
||||
---> Running in e01b294dbffd
|
||||
(...output of the RUN command...)
|
||||
Removing intermediate container e01b294dbffd
|
||||
---> eb8d9b561b37
|
||||
Step 3/3 : RUN apt-get install figlet
|
||||
---> Running in c29230d70f9b
|
||||
(...output of the RUN command...)
|
||||
Removing intermediate container c29230d70f9b
|
||||
---> 0dfd7a253f21
|
||||
Successfully built 0dfd7a253f21
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
]
|
||||
|
||||
@@ -134,20 +136,20 @@ Sending build context to Docker daemon 2.048 kB
|
||||
## Executing each step
|
||||
|
||||
```bash
|
||||
Step 1 : RUN apt-get update
|
||||
---> Running in 840cb3533193
|
||||
Step 2/3 : RUN apt-get update
|
||||
---> Running in e01b294dbffd
|
||||
(...output of the RUN command...)
|
||||
---> 7257c37726a1
|
||||
Removing intermediate container 840cb3533193
|
||||
Removing intermediate container e01b294dbffd
|
||||
---> eb8d9b561b37
|
||||
```
|
||||
|
||||
* A container (`840cb3533193`) is created from the base image.
|
||||
* A container (`e01b294dbffd`) is created from the base image.
|
||||
|
||||
* The `RUN` command is executed in this container.
|
||||
|
||||
* The container is committed into an image (`7257c37726a1`).
|
||||
* The container is committed into an image (`eb8d9b561b37`).
|
||||
|
||||
* The build container (`840cb3533193`) is removed.
|
||||
* The build container (`e01b294dbffd`) is removed.
|
||||
|
||||
* The output of this step will be the base image for the next one.
|
||||
|
||||
|
||||
3
slides/intro/CI_Pipeline.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Building a CI pipeline
|
||||
|
||||
.center[]
|
||||
@@ -64,6 +64,7 @@ Let's build it:
|
||||
$ docker build -t figlet .
|
||||
...
|
||||
Successfully built 042dff3b4a8d
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
|
||||
And run it:
|
||||
@@ -165,6 +166,7 @@ Let's build it:
|
||||
$ docker build -t figlet .
|
||||
...
|
||||
Successfully built 36f588918d73
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
|
||||
And run it:
|
||||
@@ -223,6 +225,7 @@ Let's build it:
|
||||
$ docker build -t figlet .
|
||||
...
|
||||
Successfully built 6e0b6a048a07
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
|
||||
Run it without parameters:
|
||||
|
||||
177
slides/intro/Container_Engines.md
Normal file
@@ -0,0 +1,177 @@
|
||||
# Docker Engine and other container engines
|
||||
|
||||
* We are going to cover the architecture of the Docker Engine.
|
||||
|
||||
* We will also present other container engines.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Docker Engine external architecture
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Docker Engine external architecture
|
||||
|
||||
* The Engine is a daemon (service running in the background).
|
||||
|
||||
* All interaction is done through a REST API exposed over a socket.
|
||||
|
||||
* On Linux, the default socket is a UNIX socket: `/var/run/docker.sock`.
|
||||
|
||||
* We can also use a TCP socket, with optional mutual TLS authentication.
|
||||
|
||||
* The `docker` CLI communicates with the Engine over the socket.
|
||||
|
||||
Note: strictly speaking, the Docker API is not fully REST.
|
||||
|
||||
Some operations (e.g. dealing with interactive containers
|
||||
and log streaming) don't fit the REST model.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Docker Engine internal architecture
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Docker Engine internal architecture
|
||||
|
||||
* Up to Docker 1.10: the Docker Engine is one single monolithic binary.
|
||||
|
||||
* Starting with Docker 1.11, the Engine is split into multiple parts:
|
||||
|
||||
- `dockerd` (REST API, auth, networking, storage)
|
||||
|
||||
- `containerd` (container lifecycle, controlled over a gRPC API)
|
||||
|
||||
- `containerd-shim` (per-container; does almost nothing but allows to restart the Engine without restarting the containers)
|
||||
|
||||
- `runc` (per-container; does the actual heavy lifting to start the container)
|
||||
|
||||
* Some features (like image and snapshot management) are progressively being pushed from `dockerd` to `containerd`.
|
||||
|
||||
For more details, check [this short presentation by Phil Estes](https://www.slideshare.net/PhilEstes/diving-through-the-layers-investigating-runc-containerd-and-the-docker-engine-architecture).
|
||||
|
||||
---
|
||||
|
||||
## Other container engines
|
||||
|
||||
The following list is not exhaustive.
|
||||
|
||||
Furthermore, we limited the scope to Linux containers.
|
||||
|
||||
Containers also exist (sometimes with other names) on Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
---
|
||||
|
||||
## LXC
|
||||
|
||||
* The venerable ancestor (first realeased in 2008).
|
||||
|
||||
* Docker initially relied on it to execute containers.
|
||||
|
||||
* No daemon; no central API.
|
||||
|
||||
* Each container is managed by a `lxc-start` process.
|
||||
|
||||
* Each `lxc-start` process exposes a custom API over a local UNIX socket, allowing to interact with the container.
|
||||
|
||||
* No notion of image (container filesystems have to be managed manually).
|
||||
|
||||
* Networking has to be setup manually.
|
||||
|
||||
---
|
||||
|
||||
## LXD
|
||||
|
||||
* Re-uses LXC code (through liblxc).
|
||||
|
||||
* Builds on top of LXC to offer a more modern experience.
|
||||
|
||||
* Daemon exposing a REST API.
|
||||
|
||||
* Can manage images, snapshots, migrations, networking, storage.
|
||||
|
||||
* "offers a user experience similar to virtual machines but using Linux containers instead."
|
||||
|
||||
---
|
||||
|
||||
## rkt
|
||||
|
||||
* Compares to `runc`.
|
||||
|
||||
* No daemon or API.
|
||||
|
||||
* Strong emphasis on security (through privilege separation).
|
||||
|
||||
* Networking has to be setup separately (e.g. through CNI plugins).
|
||||
|
||||
* Partial image management (pull, but no push).
|
||||
|
||||
(Image build is handled by separate tools.)
|
||||
|
||||
---
|
||||
|
||||
## CRI-O
|
||||
|
||||
* Designed to be used with Kubernetes as a simple, basic runtime.
|
||||
|
||||
* Compares to `containerd`.
|
||||
|
||||
* Daemon exposing a gRPC interface.
|
||||
|
||||
* Controlled using the CRI API (Container Runtime Interface defined by Kubernetes).
|
||||
|
||||
* Needs an underlying OCI runtime (e.g. runc).
|
||||
|
||||
* Handles storage, images, networking (through CNI plugins).
|
||||
|
||||
We're not aware of anyone using it directly (i.e. outside of Kubernetes).
|
||||
|
||||
---
|
||||
|
||||
## systemd
|
||||
|
||||
* "init" system (PID 1) in most modern Linux distributions.
|
||||
|
||||
* Offers tools like `systemd-nspawn` and `machinectl` to manage containers.
|
||||
|
||||
* `systemd-nspawn` is "In many ways it is similar to chroot(1), but more powerful".
|
||||
|
||||
* `machinectl` can interact with VMs and containers managed by systemd.
|
||||
|
||||
* Exposes a DBUS API.
|
||||
|
||||
* Basic image support (tar archives and raw disk images).
|
||||
|
||||
* Network has to be setup manually.
|
||||
|
||||
---
|
||||
|
||||
## Overall ...
|
||||
|
||||
* The Docker Engine is very developer-centric:
|
||||
|
||||
- easy to install
|
||||
|
||||
- easy to use
|
||||
|
||||
- no manual setup
|
||||
|
||||
- first-class image build and transfer
|
||||
|
||||
* As a result, it is a fantastic tool in development environments.
|
||||
|
||||
* On servers:
|
||||
|
||||
- Docker is a good default choice
|
||||
|
||||
- If you use Kubernetes, the engine doesn't matter
|
||||
|
||||
@@ -49,14 +49,14 @@ We will use `docker ps`:
|
||||
|
||||
```bash
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE ... PORTS ...
|
||||
e40ffb406c9e nginx ... 0.0.0.0:32769->80/tcp, 0.0.0.0:32768->443/tcp ...
|
||||
CONTAINER ID IMAGE ... PORTS ...
|
||||
e40ffb406c9e nginx ... 0.0.0.0:32768->80/tcp ...
|
||||
```
|
||||
|
||||
|
||||
* The web server is running on ports 80 and 443 inside the container.
|
||||
* The web server is running on port 80 inside the container.
|
||||
|
||||
* Those ports are mapped to ports 32769 and 32768 on our Docker host.
|
||||
* This port is mapped to port 32768 on our Docker host.
|
||||
|
||||
We will explain the whys and hows of this port mapping.
|
||||
|
||||
@@ -81,7 +81,7 @@ Make sure to use the right port number if it is different
|
||||
from the example below:
|
||||
|
||||
```bash
|
||||
$ curl localhost:32769
|
||||
$ curl localhost:32768
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
@@ -91,6 +91,31 @@ $ curl localhost:32769
|
||||
|
||||
---
|
||||
|
||||
## How does Docker know which port to map?
|
||||
|
||||
* There is metadata in the image telling "this image has something on port 80".
|
||||
|
||||
* We can see that metadata with `docker inspect`:
|
||||
|
||||
```bash
|
||||
$ docker inspect nginx --format {{.Config.ExposedPorts}}
|
||||
map[80/tcp:{}]
|
||||
```
|
||||
|
||||
* This metadata was set in the Dockerfile, with the `EXPOSE` keyword.
|
||||
|
||||
* We can see that with `docker history`:
|
||||
|
||||
```bash
|
||||
$ docker history nginx
|
||||
IMAGE CREATED CREATED BY
|
||||
7f70b30f2cc6 11 days ago /bin/sh -c #(nop) CMD ["nginx" "-g" "…
|
||||
<missing> 11 days ago /bin/sh -c #(nop) STOPSIGNAL [SIGTERM]
|
||||
<missing> 11 days ago /bin/sh -c #(nop) EXPOSE 80/tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Why are we mapping ports?
|
||||
|
||||
* We are out of IPv4 addresses.
|
||||
@@ -113,7 +138,7 @@ There is a command to help us:
|
||||
|
||||
```bash
|
||||
$ docker port <containerID> 80
|
||||
32769
|
||||
32768
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
3
slides/intro/Containers_From_Scratch.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Building containers from scratch
|
||||
|
||||
(This is a "bonus section" done if time permits.)
|
||||
339
slides/intro/Copy_On_Write.md
Normal file
@@ -0,0 +1,339 @@
|
||||
# Copy-on-write filesystems
|
||||
|
||||
Container engines rely on copy-on-write to be able
|
||||
to start containers quickly, regardless of their size.
|
||||
|
||||
We will explain how that works, and review some of
|
||||
the copy-on-write storage systems available on Linux.
|
||||
|
||||
---
|
||||
|
||||
## What is copy-on-write?
|
||||
|
||||
- Copy-on-write is a mechanism allowing to share data.
|
||||
|
||||
- The data appears to be a copy, but is only
|
||||
a link (or reference) to the original data.
|
||||
|
||||
- The actual copy happens only when someone
|
||||
tries to change the shared data.
|
||||
|
||||
- Whoever changes the shared data ends up
|
||||
using their own copy instead of the shared data.
|
||||
|
||||
---
|
||||
|
||||
## A few metaphors
|
||||
|
||||
--
|
||||
|
||||
- First metaphor:
|
||||
<br/>white board and tracing paper
|
||||
|
||||
--
|
||||
|
||||
- Second metaphor:
|
||||
<br/>magic books with shadowy pages
|
||||
|
||||
--
|
||||
|
||||
- Third metaphor:
|
||||
<br/>just-in-time house building
|
||||
|
||||
---
|
||||
|
||||
## Copy-on-write is *everywhere*
|
||||
|
||||
- Process creation with `fork()`.
|
||||
|
||||
- Consistent disk snapshots.
|
||||
|
||||
- Efficient VM provisioning.
|
||||
|
||||
- And, of course, containers.
|
||||
|
||||
---
|
||||
|
||||
## Copy-on-write and containers
|
||||
|
||||
Copy-on-write is essential to give us "convenient" containers.
|
||||
|
||||
- Creating a new container (from an existing image) is "free".
|
||||
|
||||
(Otherwise, we would have to copy the image first.)
|
||||
|
||||
- Customizing a container (by tweaking a few files) is cheap.
|
||||
|
||||
(Adding a 1 KB configuration file to a 1 GB container takes 1 KB, not 1 GB.)
|
||||
|
||||
- We can take snapshots, i.e. have "checkpoints" or "save points"
|
||||
when building images.
|
||||
|
||||
---
|
||||
|
||||
## AUFS overview
|
||||
|
||||
- The original (legacy) copy-on-write filesystem used by first versions of Docker.
|
||||
|
||||
- Combine multiple *branches* in a specific order.
|
||||
|
||||
- Each branch is just a normal directory.
|
||||
|
||||
- You generally have:
|
||||
|
||||
- at least one read-only branch (at the bottom),
|
||||
|
||||
- exactly one read-write branch (at the top).
|
||||
|
||||
(But other fun combinations are possible too!)
|
||||
|
||||
---
|
||||
|
||||
## AUFS operations: opening a file
|
||||
|
||||
- With `O_RDONLY` - read-only access:
|
||||
|
||||
- look it up in each branch, starting from the top
|
||||
|
||||
- open the first one we find
|
||||
|
||||
- With `O_WRONLY` or `O_RDWR` - write access:
|
||||
|
||||
- if the file exists on the top branch: open it
|
||||
|
||||
- if the file exists on another branch: "copy up"
|
||||
<br/>
|
||||
(i.e. copy the file to the top branch and open the copy)
|
||||
|
||||
- if the file doesn't exist on any branch: create it on the top branch
|
||||
|
||||
That "copy-up" operation can take a while if the file is big!
|
||||
|
||||
---
|
||||
|
||||
## AUFS operations: deleting a file
|
||||
|
||||
- A *whiteout* file is created.
|
||||
|
||||
- This is similar to the concept of "tombstones" used in some data systems.
|
||||
|
||||
```
|
||||
# docker run ubuntu rm /etc/shadow
|
||||
|
||||
# ls -la /var/lib/docker/aufs/diff/$(docker ps --no-trunc -lq)/etc
|
||||
total 8
|
||||
drwxr-xr-x 2 root root 4096 Jan 27 15:36 .
|
||||
drwxr-xr-x 5 root root 4096 Jan 27 15:36 ..
|
||||
-r--r--r-- 2 root root 0 Jan 27 15:36 .wh.shadow
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## AUFS performance
|
||||
|
||||
- AUFS `mount()` is fast, so creation of containers is quick.
|
||||
|
||||
- Read/write access has native speeds.
|
||||
|
||||
- But initial `open()` is expensive in two scenarios:
|
||||
|
||||
- when writing big files (log files, databases ...),
|
||||
|
||||
- when searching many directories (PATH, classpath, etc.) over many layers.
|
||||
|
||||
- Protip: when we built dotCloud, we ended up putting
|
||||
all important data on *volumes*.
|
||||
|
||||
- When starting the same container multiple times:
|
||||
|
||||
- the data is loaded only once from disk, and cached only once in memory;
|
||||
|
||||
- but `dentries` will be duplicated.
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper
|
||||
|
||||
Device Mapper is a rich subsystem with many features.
|
||||
|
||||
It can be used for: RAID, encrypted devices, snapshots, and more.
|
||||
|
||||
In the context of containers (and Docker in particular), "Device Mapper"
|
||||
means:
|
||||
|
||||
"the Device Mapper system + its *thin provisioning target*"
|
||||
|
||||
If you see the abbreviation "thinp" it stands for "thin provisioning".
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper principles
|
||||
|
||||
- Copy-on-write happens on the *block* level
|
||||
(instead of the *file* level).
|
||||
|
||||
- Each container and each image get their own block device.
|
||||
|
||||
- At any given time, it is possible to take a snapshot:
|
||||
|
||||
- of an existing container (to create a frozen image),
|
||||
|
||||
- of an existing image (to create a container from it).
|
||||
|
||||
- If a block has never been written to:
|
||||
|
||||
- it's assumed to be all zeros,
|
||||
|
||||
- it's not allocated on disk.
|
||||
|
||||
(That last property is the reason for the name "thin" provisioning.)
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper operational details
|
||||
|
||||
- Two storage areas are needed:
|
||||
one for *data*, another for *metadata*.
|
||||
|
||||
- "data" is also called the "pool"; it's just a big pool of blocks.
|
||||
|
||||
(Docker uses the smallest possible block size, 64 KB.)
|
||||
|
||||
- "metadata" contains the mappings between virtual offsets (in the
|
||||
snapshots) and physical offsets (in the pool).
|
||||
|
||||
- Each time a new block (or a copy-on-write block) is written,
|
||||
a block is allocated from the pool.
|
||||
|
||||
- When there are no more blocks in the pool, attempts to write
|
||||
will stall until the pool is increased (or the write operation
|
||||
aborted).
|
||||
|
||||
- In other words: when running out of space, containers are
|
||||
frozen, but operations will resume as soon as space is available.
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper performance
|
||||
|
||||
- By default, Docker puts data and metadata on a loop device
|
||||
backed by a sparse file.
|
||||
|
||||
- This is great from a usability point of view,
|
||||
since zero configuration is needed.
|
||||
|
||||
- But it is terrible from a performance point of view:
|
||||
|
||||
- each time a container writes to a new block,
|
||||
- a block has to be allocated from the pool,
|
||||
- and when it's written to,
|
||||
- a block has to be allocated from the sparse file,
|
||||
- and sparse file performance isn't great anyway.
|
||||
|
||||
- If you use Device Mapper, make sure to put data (and metadata)
|
||||
on devices!
|
||||
|
||||
---
|
||||
|
||||
## BTRFS principles
|
||||
|
||||
- BTRFS is a filesystem (like EXT4, XFS, NTFS...) with built-in snapshots.
|
||||
|
||||
- The "copy-on-write" happens at the filesystem level.
|
||||
|
||||
- BTRFS integrates the snapshot and block pool management features
|
||||
at the filesystem level.
|
||||
|
||||
(Instead of the block level for Device Mapper.)
|
||||
|
||||
- In practice, we create a "subvolume" and
|
||||
later take a "snapshot" of that subvolume.
|
||||
|
||||
Imagine: `mkdir` with Super Powers and `cp -a` with Super Powers.
|
||||
|
||||
- These operations can be executed with the `btrfs` CLI tool.
|
||||
|
||||
---
|
||||
|
||||
## BTRFS in practice with Docker
|
||||
|
||||
- Docker can use BTRFS and its snapshotting features to store container images.
|
||||
|
||||
- The only requirement is that `/var/lib/docker` is on a BTRFS filesystem.
|
||||
|
||||
(Or, the directory specified with the `--data-root` flag when starting the engine.)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## BTRFS quirks
|
||||
|
||||
- BTRFS works by dividing its storage in *chunks*.
|
||||
|
||||
- A chunk can contain data or metadata.
|
||||
|
||||
- You can run out of chunks (and get `No space left on device`)
|
||||
even though `df` shows space available.
|
||||
|
||||
(Because chunks are only partially allocated.)
|
||||
|
||||
- Quick fix:
|
||||
|
||||
```
|
||||
# btrfs filesys balance start -dusage=1 /var/lib/docker
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Overlay2
|
||||
|
||||
- Overlay2 is very similar to AUFS.
|
||||
|
||||
- However, it has been merged in "upstream" kernel.
|
||||
|
||||
- It is therefore available on all modern kernels.
|
||||
|
||||
(AUFS was available on Debian and Ubuntu, but required custom kernels on other distros.)
|
||||
|
||||
- It is simpler than AUFS (it can only have two branches, called "layers").
|
||||
|
||||
- The container engine abstracts this detail, so this is not a concern.
|
||||
|
||||
- Overlay2 storage drivers generally use hard links between layers.
|
||||
|
||||
- This improves `stat()` and `open()` performance, at the expense of inode usage.
|
||||
|
||||
---
|
||||
|
||||
## ZFS
|
||||
|
||||
- ZFS is similar to BTRFS (at least from a container user's perspective).
|
||||
|
||||
- Pros:
|
||||
|
||||
- high performance
|
||||
- high reliability (with e.g. data checksums)
|
||||
- optional data compression and deduplication
|
||||
|
||||
- Cons:
|
||||
|
||||
- high memory usage
|
||||
- not in upstream kernel
|
||||
|
||||
- It is available as a kernel module or through FUSE.
|
||||
|
||||
---
|
||||
|
||||
## Which one is the best?
|
||||
|
||||
- Eventually, overlay2 should be the best option.
|
||||
|
||||
- It is available on all modern systems.
|
||||
|
||||
- Its memory usage is better than Device Mapper, BTRFS, or ZFS.
|
||||
|
||||
- The remarks about *write performance* shouldn't bother you:
|
||||
<br/>
|
||||
data should always be stored in volumes anyway!
|
||||
|
||||
81
slides/intro/Docker_Machine.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# Managing hosts with Docker Machine
|
||||
|
||||
- Docker Machine is a tool to provision and manage Docker hosts.
|
||||
|
||||
- It automates the creation of a virtual machine:
|
||||
|
||||
- locally, with a tool like VirtualBox or VMware;
|
||||
|
||||
- on a public cloud like AWS EC2, Azure, Digital Ocean, GCP, etc.;
|
||||
|
||||
- on a private cloud like OpenStack.
|
||||
|
||||
- It can also configure existing machines through an SSH connection.
|
||||
|
||||
- It can manage as many hosts as you want, with as many "drivers" as you want.
|
||||
|
||||
---
|
||||
|
||||
## Docker Machine workflow
|
||||
|
||||
1) Prepare the environment: setup VirtualBox, obtain cloud credentials ...
|
||||
|
||||
2) Create hosts with `docker-machine create -d drivername machinename`.
|
||||
|
||||
3) Use a specific machine with `eval $(docker-machine env machinename)`.
|
||||
|
||||
4) Profit!
|
||||
|
||||
---
|
||||
|
||||
## Environment variables
|
||||
|
||||
- Most of the tools (CLI, libraries...) connecting to the Docker API can use ennvironment variables.
|
||||
|
||||
- These variables are:
|
||||
|
||||
- `DOCKER_HOST` (indicates address+port to connect to, or path of UNIX socket)
|
||||
|
||||
- `DOCKER_TLS_VERIFY` (indicates that TLS mutual auth should be used)
|
||||
|
||||
- `DOCKER_CERT_PATH` (path to the keypair and certificate to use for auth)
|
||||
|
||||
- `docker-machine env ...` will generate the variables needed to connect to an host.
|
||||
|
||||
- `$(eval docker-machine env ...)` sets these variables in the current shell.
|
||||
|
||||
---
|
||||
|
||||
## Host management features
|
||||
|
||||
With `docker-machine`, we can:
|
||||
|
||||
- upgrade an host to the latest version of the Docker Engine,
|
||||
|
||||
- start/stop/restart hosts,
|
||||
|
||||
- get a shell on a remote machine (with SSH),
|
||||
|
||||
- copy files to/from remotes machines (with SCP),
|
||||
|
||||
- mount a remote host's directory on the local machine (with SSHFS),
|
||||
|
||||
- ...
|
||||
|
||||
---
|
||||
|
||||
## The `generic` driver
|
||||
|
||||
When provisioning a new host, `docker-machine` executes these steps:
|
||||
|
||||
1) Create the host using a cloud or hypervisor API.
|
||||
|
||||
2) Connect to the host over SSH.
|
||||
|
||||
3) Install and configure Docker on the host.
|
||||
|
||||
With the `generic` driver, we provide the IP address of an existing host
|
||||
(instead of e.g. cloud credentials) and we omit the first step.
|
||||
|
||||
This allows to provision physical machines, or VMs provided by a 3rd
|
||||
party, or use a cloud for which we don't have a provisioning API.
|
||||
@@ -72,7 +72,7 @@ class: pic
|
||||
|
||||
class: pic
|
||||
|
||||
## The parallel with the shipping indsutry
|
||||
## The parallel with the shipping industry
|
||||
|
||||

|
||||
|
||||
|
||||
5
slides/intro/Dockerfile_Samples.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Dockerfile Samples
|
||||
|
||||
---
|
||||
|
||||
## (Demo in terminal)
|
||||
173
slides/intro/Ecosystem.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# The container ecosystem
|
||||
|
||||
In this chapter, we will talk about a few actors of the container ecosystem.
|
||||
|
||||
We have (arbitrarily) decided to focus on two groups:
|
||||
|
||||
- the Docker ecosystem,
|
||||
|
||||
- the Cloud Native Computing Foundation (CNCF) and its projects.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## The Docker ecosystem
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Moby vs. Docker
|
||||
|
||||
- Docker Inc. (the company) started Docker (the open source project).
|
||||
|
||||
- At some point, it became necessary to differentiate between:
|
||||
|
||||
- the open source project (code base, contributors...),
|
||||
|
||||
- the product that we use to run containers (the engine),
|
||||
|
||||
- the platform that we use to manage containerized applications,
|
||||
|
||||
- the brand.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Exercise in brand management
|
||||
|
||||
Questions:
|
||||
|
||||
--
|
||||
|
||||
- What is the brand of the car on the previous slide?
|
||||
|
||||
--
|
||||
|
||||
- What kind of engine does it have?
|
||||
|
||||
--
|
||||
|
||||
- Would you say that it's a safe or unsafe car?
|
||||
|
||||
--
|
||||
|
||||
- Harder question: can you drive from the US West to East coasts with it?
|
||||
|
||||
--
|
||||
|
||||
The answers to these questions are part of the Tesla brand.
|
||||
|
||||
---
|
||||
|
||||
## What if ...
|
||||
|
||||
- The blueprints for Tesla cars were available for free.
|
||||
|
||||
- You could legally build your own Tesla.
|
||||
|
||||
- You were allowed to customize it entirely.
|
||||
|
||||
(Put a combustion engine, drive it with a game pad ...)
|
||||
|
||||
- You could even sell the customized versions.
|
||||
|
||||
--
|
||||
|
||||
- ... And call your customized version "Tesla".
|
||||
|
||||
--
|
||||
|
||||
Would we give the same answers to the questions on the previous slide?
|
||||
|
||||
---
|
||||
|
||||
## From Docker to Moby
|
||||
|
||||
- Docker Inc. decided to split the brand.
|
||||
|
||||
- Moby is the open source project.
|
||||
|
||||
(= Components and libraries that you can use, reuse, customize, sell ...)
|
||||
|
||||
- Docker is the product.
|
||||
|
||||
(= Software that you can use, buy support contracts ...)
|
||||
|
||||
- Docker is made with Moby.
|
||||
|
||||
- When Docker Inc. improves the Docker products, it improves Moby.
|
||||
|
||||
(And vice versa.)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Other examples
|
||||
|
||||
- *Read the Docs* is an open source project to generate and host documentation.
|
||||
|
||||
- You can host it yourself (on your own servers).
|
||||
|
||||
- You can also get hosted on readthedocs.org.
|
||||
|
||||
- The maintainers of the open source project often receive
|
||||
support requests from users of the hosted product ...
|
||||
|
||||
- ... And the maintainers of the hosted product often
|
||||
receive support requests from users of self-hosted instances.
|
||||
|
||||
- Another example:
|
||||
|
||||
*WordPress.com is a blogging platform that is owned and hosted online by
|
||||
Automattic. It is run on WordPress, an open source piece of software used by
|
||||
bloggers. (Wikipedia)*
|
||||
|
||||
---
|
||||
|
||||
## Docker CE vs Docker EE
|
||||
|
||||
- Docker CE = Community Edition.
|
||||
|
||||
- Available on most Linux distros, Mac, Windows.
|
||||
|
||||
- Optimized for developers and ease of use.
|
||||
|
||||
- Docker EE = Enterprise Edition.
|
||||
|
||||
- Available only on a subset of Linux distros + Windows servers.
|
||||
|
||||
(Only available when there is a strong partnership to offer enterprise-class support.)
|
||||
|
||||
- Optimized for production use.
|
||||
|
||||
- Comes with additional components: security scanning, RBAC ...
|
||||
|
||||
---
|
||||
|
||||
## The CNCF
|
||||
|
||||
- Non-profit, part of the Linux Foundation; founded in December 2015.
|
||||
|
||||
*The Cloud Native Computing Foundation builds sustainable ecosystems and fosters
|
||||
a community around a constellation of high-quality projects that orchestrate
|
||||
containers as part of a microservices architecture.*
|
||||
|
||||
*CNCF is an open source software foundation dedicated to making cloud-native computing universal and sustainable.*
|
||||
|
||||
- Home of Kubernetes (and many other projects now).
|
||||
|
||||
- Funded by corporate memberships.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
227
slides/intro/Getting_Inside.md
Normal file
@@ -0,0 +1,227 @@
|
||||
class: title
|
||||
|
||||
# Getting inside a container
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Objectives
|
||||
|
||||
On a traditional server or VM, we sometimes need to:
|
||||
|
||||
* log into the machine (with SSH or on the console),
|
||||
|
||||
* analyze the disks (by removing them or rebooting with a rescue system).
|
||||
|
||||
In this chapter, we will see how to do that with containers.
|
||||
|
||||
---
|
||||
|
||||
## Getting a shell
|
||||
|
||||
Every once in a while, we want to log into a machine.
|
||||
|
||||
In an perfect world, this shouldn't be necessary.
|
||||
|
||||
* You need to install or update packages (and their configuration)?
|
||||
|
||||
Use configuration management. (e.g. Ansible, Chef, Puppet, Salt...)
|
||||
|
||||
* You need to view logs and metrics?
|
||||
|
||||
Collect and access them through a centralized platform.
|
||||
|
||||
In the real world, though ... we often need shell access!
|
||||
|
||||
---
|
||||
|
||||
## Not getting a shell
|
||||
|
||||
Even without a perfect deployment system, we can do many operations without getting a shell.
|
||||
|
||||
* Installing packages can (and should) be done in the container image.
|
||||
|
||||
* Configuration can be done at the image level, or when the container starts.
|
||||
|
||||
* Dynamic configuration can be stored in a volume (shared with another container).
|
||||
|
||||
* Logs written to stdout are automatically collected by the Docker Engine.
|
||||
|
||||
* Other logs can be written to a shared volume.
|
||||
|
||||
* Process information and metrics are visible from the host.
|
||||
|
||||
_Let's save logging, volumes ... for later, but let's have a look at process information!_
|
||||
|
||||
---
|
||||
|
||||
## Viewing container processes from the host
|
||||
|
||||
If you run Docker on Linux, container processes are visible on the host.
|
||||
|
||||
```bash
|
||||
$ ps faux | less
|
||||
```
|
||||
|
||||
* Scroll around the output of this command.
|
||||
|
||||
* You should see the `jpetazzo/clock` container.
|
||||
|
||||
* A containerized process is just like any other process on the host.
|
||||
|
||||
* We can use tools like `lsof`, `strace`, `gdb` ... To analyze them.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's the difference between a container process and a host process?
|
||||
|
||||
* Each process (containerized or not) belongs to *namespaces* and *cgroups*.
|
||||
|
||||
* The namespaces and cgroups determine what a process can "see" and "do".
|
||||
|
||||
* Analogy: each process (containerized or not) runs with a specific UID (user ID).
|
||||
|
||||
* UID=0 is root, and has elevated privileges. Other UIDs are normal users.
|
||||
|
||||
_We will give more details about namespaces and cgroups later._
|
||||
|
||||
---
|
||||
|
||||
## Getting a shell in a running container
|
||||
|
||||
* Sometimes, we need to get a shell anyway.
|
||||
|
||||
* We _could_ run some SSH server in the container ...
|
||||
|
||||
* But it is easier to use `docker exec`.
|
||||
|
||||
```bash
|
||||
$ docker exec -ti ticktock sh
|
||||
```
|
||||
|
||||
* This creates a new process (running `sh`) _inside_ the container.
|
||||
|
||||
* This can also be done "manually" with the tool `nsenter`.
|
||||
|
||||
---
|
||||
|
||||
## Caveats
|
||||
|
||||
* The tool that you want to run needs to exist in the container.
|
||||
|
||||
* Some tools (like `ip netns exec`) let you attach to _one_ namespace at a time.
|
||||
|
||||
(This lets you e.g. setup network interfaces, even if you don't have `ifconfig` or `ip` in the container.)
|
||||
|
||||
* Most importantly: the container needs to be running.
|
||||
|
||||
* What if the container is stopped or crashed?
|
||||
|
||||
---
|
||||
|
||||
## Getting a shell in a stopped container
|
||||
|
||||
* A stopped container is only _storage_ (like a disk drive).
|
||||
|
||||
* We cannot SSH into a disk drive or USB stick!
|
||||
|
||||
* We need to connect the disk to a running machine.
|
||||
|
||||
* How does that translate into the container world?
|
||||
|
||||
---
|
||||
|
||||
## Analyzing a stopped container
|
||||
|
||||
As an exercise, we are going to try to find out what's wrong with `jpetazzo/crashtest`.
|
||||
|
||||
```bash
|
||||
docker run jpetazzo/crashtest
|
||||
```
|
||||
|
||||
The container starts, but then stops immediately, without any output.
|
||||
|
||||
What would McGyver do?
|
||||
|
||||
First, let's check the status of that container.
|
||||
|
||||
```bash
|
||||
docker ps -l
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Viewing filesystem changes
|
||||
|
||||
* We can use `docker diff` to see files that were added / changed / removed.
|
||||
|
||||
```bash
|
||||
docker diff <container_id>
|
||||
```
|
||||
|
||||
* The container ID was shown by `docker ps -l`.
|
||||
|
||||
* We can also see it with `docker ps -lq`.
|
||||
|
||||
* The output of `docker diff` shows some interesting log files!
|
||||
|
||||
---
|
||||
|
||||
## Accessing files
|
||||
|
||||
* We can extract files with `docker cp`.
|
||||
|
||||
```bash
|
||||
docker cp <container_id>:/var/log/nginx/error.log .
|
||||
```
|
||||
|
||||
* Then we can look at that log file.
|
||||
|
||||
```bash
|
||||
cat error.log
|
||||
```
|
||||
|
||||
(The directory `/run/nginx` doesn't exist.)
|
||||
|
||||
---
|
||||
|
||||
## Exploring a crashed container
|
||||
|
||||
* We can restart a container with `docker start` ...
|
||||
|
||||
* ... But it will probably crash again immediately!
|
||||
|
||||
* We cannot specify a different program to run with `docker start`
|
||||
|
||||
* But we can create a new image from the crashed container
|
||||
|
||||
```bash
|
||||
docker commit <container_id> debugimage
|
||||
```
|
||||
|
||||
* Then we can run a new container from that image, with a custom entrypoint
|
||||
|
||||
```bash
|
||||
docker run -ti --entrypoint sh debugimage
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Obtaining a complete dump
|
||||
|
||||
* We can also dump the entire filesystem of a container.
|
||||
|
||||
* This is done with `docker export`.
|
||||
|
||||
* It generates a tar archive.
|
||||
|
||||
```bash
|
||||
docker export <container_id> | tar tv
|
||||
```
|
||||
|
||||
This will give a detailed listing of the content of the container.
|
||||
@@ -29,7 +29,7 @@ We can arbitrarily distinguish:
|
||||
|
||||
* Installing Docker on an existing Linux machine (physical or VM)
|
||||
|
||||
* Installing Docker on MacOS or Windows
|
||||
* Installing Docker on macOS or Windows
|
||||
|
||||
* Installing Docker on a fleet of cloud VMs
|
||||
|
||||
@@ -55,9 +55,31 @@ We can arbitrarily distinguish:
|
||||
|
||||
---
|
||||
|
||||
## Installing Docker on MacOS and Windows
|
||||
class: extra-details
|
||||
|
||||
* On MacOS, the recommended method is to use Docker4Mac:
|
||||
## Docker Inc. packages vs distribution packages
|
||||
|
||||
* Docker Inc. releases new versions monthly (edge) and quarterly (stable)
|
||||
|
||||
* Releases are immediately available on Docker Inc.'s package repositories
|
||||
|
||||
* Linux distros don't always update to the latest Docker version
|
||||
|
||||
(Sometimes, updating would break their guidelines for major/minor upgrades)
|
||||
|
||||
* Sometimes, some distros have carried packages with custom patches
|
||||
|
||||
* Sometimes, these patches added critical security bugs ☹
|
||||
|
||||
* Installing through Docker Inc.'s repositories is a bit of extra work …
|
||||
|
||||
… but it is generally worth it!
|
||||
|
||||
---
|
||||
|
||||
## Installing Docker on macOS and Windows
|
||||
|
||||
* On macOS, the recommended method is to use Docker4Mac:
|
||||
|
||||
https://docs.docker.com/docker-for-mac/install/
|
||||
|
||||
@@ -71,7 +93,7 @@ We can arbitrarily distinguish:
|
||||
|
||||
---
|
||||
|
||||
## Running Docker on MacOS and Windows
|
||||
## Running Docker on macOS and Windows
|
||||
|
||||
When you execute `docker version` from the terminal:
|
||||
|
||||
|
||||
82
slides/intro/Labels.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Labels
|
||||
|
||||
* Labels allow to attach arbitrary metadata to containers.
|
||||
|
||||
* Labels are key/value pairs.
|
||||
|
||||
* They are specified at container creation.
|
||||
|
||||
* You can query them with `docker inspect`.
|
||||
|
||||
* They can also be used as filters with some commands (e.g. `docker ps`).
|
||||
|
||||
---
|
||||
|
||||
## Using labels
|
||||
|
||||
Let's create a few containers with a label `owner`.
|
||||
|
||||
```bash
|
||||
docker run -d -l owner=alice nginx
|
||||
docker run -d -l owner=bob nginx
|
||||
docker run -d -l owner nginx
|
||||
```
|
||||
|
||||
We didn't specify a value for the `owner` label in the last example.
|
||||
|
||||
This is equivalent to setting the value to be an empty string.
|
||||
|
||||
---
|
||||
|
||||
## Querying labels
|
||||
|
||||
We can view the labels with `docker inspect`.
|
||||
|
||||
```bash
|
||||
$ docker inspect $(docker ps -lq) | grep -A3 Labels
|
||||
"Labels": {
|
||||
"maintainer": "NGINX Docker Maintainers <docker-maint@nginx.com>",
|
||||
"owner": ""
|
||||
},
|
||||
```
|
||||
|
||||
We can use the `--format` flag to list the value of a label.
|
||||
|
||||
```bash
|
||||
$ docker inspect $(docker ps -q) --format 'OWNER={{.Config.Labels.owner}}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using labels to select containers
|
||||
|
||||
We can list containers having a specific label.
|
||||
|
||||
```bash
|
||||
$ docker ps --filter label=owner
|
||||
```
|
||||
|
||||
Or we can list containers having a specific label with a specific value.
|
||||
|
||||
```bash
|
||||
$ docker ps --filter label=owner=alice
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use-cases for labels
|
||||
|
||||
|
||||
* HTTP vhost of a web app or web service.
|
||||
|
||||
(The label is used to generate the configuration for NGINX, HAProxy, etc.)
|
||||
|
||||
* Backup schedule for a stateful service.
|
||||
|
||||
(The label is used by a cron job to determine if/when to backup container data.)
|
||||
|
||||
* Service ownership.
|
||||
|
||||
(To determine internal cross-billing, or who to page in case of outage.)
|
||||
|
||||
* etc.
|
||||
273
slides/intro/Logging.md
Normal file
@@ -0,0 +1,273 @@
|
||||
# Logging
|
||||
|
||||
In this chapter, we will explain the different ways to send logs from containers.
|
||||
|
||||
We will then show one particular method in action, using ELK and Docker's logging drivers.
|
||||
|
||||
---
|
||||
|
||||
## There are many ways to send logs
|
||||
|
||||
- The simplest method is to write on the standard output and error.
|
||||
|
||||
- Applications can write their logs to local files.
|
||||
|
||||
(The files are usually periodically rotated and compressed.)
|
||||
|
||||
- It is also very common (on UNIX systems) to use syslog.
|
||||
|
||||
(The logs are collected by syslogd or an equivalent like journald.)
|
||||
|
||||
- In large applications with many components, it is common to use a logging service.
|
||||
|
||||
(The code uses a library to send messages to the logging service.)
|
||||
|
||||
*All these methods are available with containers.*
|
||||
|
||||
---
|
||||
|
||||
## Writing on stdout/stderr
|
||||
|
||||
- The standard output and error of containers is managed by the container engine.
|
||||
|
||||
- This means that each line written by the container is received by the engine.
|
||||
|
||||
- The engine can then do "whatever" with these log lines.
|
||||
|
||||
- With Docker, the default configuration is to write the logs to local files.
|
||||
|
||||
- The files can then be queried with e.g. `docker logs` (and the equivalent API request).
|
||||
|
||||
- This can be customized, as we will see later.
|
||||
|
||||
---
|
||||
|
||||
## Writing to local files
|
||||
|
||||
- If we write to files, it is possible to access them but cumbersome.
|
||||
|
||||
(We have to use `docker exec` or `docker cp`.)
|
||||
|
||||
- Furthermore, if the container is stopped, we cannot use `docker exec`.
|
||||
|
||||
- If the container is deleted, the logs disappear.
|
||||
|
||||
- What should we do for programs who can only log to local files?
|
||||
|
||||
--
|
||||
|
||||
- There are multiple solutions.
|
||||
|
||||
---
|
||||
|
||||
## Using a volume or bind mount
|
||||
|
||||
- Instead of writing logs to a normal directory, we can place them on a volume.
|
||||
|
||||
- The volume can be accessed by other containers.
|
||||
|
||||
- We can run a program like `filebeat` in another container accessing the same volume.
|
||||
|
||||
(`filebeat` reads local log files continuously, like `tail -f`, and sends them
|
||||
to a centralized system like ElasticSearch.)
|
||||
|
||||
- We can also use a bind mount, e.g. `-v /var/log/containers/www:/var/log/tomcat`.
|
||||
|
||||
- The container will write log files to a directory mapped to a host directory.
|
||||
|
||||
- The log files will appear on the host and be consumable directly from the host.
|
||||
|
||||
---
|
||||
|
||||
## Using logging services
|
||||
|
||||
- We can use logging frameworks (like log4j or the Python `logging` package).
|
||||
|
||||
- These frameworks require some code and/or configuration in our application code.
|
||||
|
||||
- These mechanisms can be used identically inside or outside of containers.
|
||||
|
||||
- Sometimes, we can leverage containerized networking to simplify their setup.
|
||||
|
||||
- For instance, our code can send log messages to a server named `log`.
|
||||
|
||||
- The name `log` will resolve to different addresses in development, production, etc.
|
||||
|
||||
---
|
||||
|
||||
## Using syslog
|
||||
|
||||
- What if our code (or the program we are running in containers) uses syslog?
|
||||
|
||||
- One possibility is to run a syslog daemon in the container.
|
||||
|
||||
- Then that daemon can be setup to write to local files or forward to the network.
|
||||
|
||||
- Under the hood, syslog clients connect to a local UNIX socket, `/dev/log`.
|
||||
|
||||
- We can expose a syslog socket to the container (by using a volume or bind-mount).
|
||||
|
||||
- Then just create a symlink from `/dev/log` to the syslog socket.
|
||||
|
||||
- Voilà!
|
||||
|
||||
---
|
||||
|
||||
## Using logging drivers
|
||||
|
||||
- If we log to stdout and stderr, the container engine receives the log messages.
|
||||
|
||||
- The Docker Engine has a modular logging system with many plugins, including:
|
||||
|
||||
- json-file (the default one)
|
||||
- syslog
|
||||
- journald
|
||||
- gelf
|
||||
- fluentd
|
||||
- splunk
|
||||
- etc.
|
||||
|
||||
- Each plugin can process and forward the logs to another process or system.
|
||||
|
||||
---
|
||||
|
||||
## Demo: sending logs to ELK
|
||||
|
||||
- We are going to deploy an ELK stack.
|
||||
|
||||
- It will accept logs over a GELF socket.
|
||||
|
||||
- We will run a few containers with the `gelf` logging driver.
|
||||
|
||||
- We will then see our logs in Kibana, the web interface provided by ELK.
|
||||
|
||||
*Important foreword: this is not an "official" or "recommended"
|
||||
setup; it is just an example. We used ELK in this demo because
|
||||
it's a popular setup and we keep being asked about it; but you
|
||||
will have equal success with Fluent or other logging stacks!*
|
||||
|
||||
---
|
||||
|
||||
## What's in an ELK stack?
|
||||
|
||||
- ELK is three components:
|
||||
|
||||
- ElasticSearch (to store and index log entries)
|
||||
|
||||
- Logstash (to receive log entries from various
|
||||
sources, process them, and forward them to various
|
||||
destinations)
|
||||
|
||||
- Kibana (to view/search log entries with a nice UI)
|
||||
|
||||
- The only component that we will configure is Logstash
|
||||
|
||||
- We will accept log entries using the GELF protocol
|
||||
|
||||
- Log entries will be stored in ElasticSearch,
|
||||
<br/>and displayed on Logstash's stdout for debugging
|
||||
|
||||
---
|
||||
|
||||
## Running ELK
|
||||
|
||||
- We are going to use a Compose file describing the ELK stack.
|
||||
|
||||
```bash
|
||||
$ cd ~/container.training/stacks
|
||||
$ docker-compose -f elk.yml up -d
|
||||
```
|
||||
|
||||
- Let's have a look at the Compose file while it's deploying.
|
||||
|
||||
---
|
||||
|
||||
## Our basic ELK deployment
|
||||
|
||||
- We are using images from the Docker Hub: `elasticsearch`, `logstash`, `kibana`.
|
||||
|
||||
- We don't need to change the configuration of ElasticSearch.
|
||||
|
||||
- We need to tell Kibana the address of ElasticSearch:
|
||||
|
||||
- it is set with the `ELASTICSEARCH_URL` environment variable,
|
||||
|
||||
- by default it is `localhost:9200`, we change it to `elastichsearch:9200`.
|
||||
|
||||
- We need to configure Logstash:
|
||||
|
||||
- we pass the entire configuration file through command-line arguments,
|
||||
|
||||
- this is a hack so that we don't have to create an image just for the config.
|
||||
|
||||
---
|
||||
|
||||
## Sending logs to ELK
|
||||
|
||||
- The ELK stack accepts log messages through a GELF socket.
|
||||
|
||||
- The GELF socket listens on UDP port 12201.
|
||||
|
||||
- To send a message, we need to change the logging driver used by Docker.
|
||||
|
||||
- This can be done globally (by reconfiguring the Engine) or on a per-container basis.
|
||||
|
||||
- Let's override the logging driver for a single container:
|
||||
|
||||
```bash
|
||||
$ docker run --log-driver=gelf --log-opt=gelf-address=udp://localhost:12201 \
|
||||
alpine echo hello world
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Viewing the logs in ELK
|
||||
|
||||
- Connect to the Kibana interface.
|
||||
|
||||
- It is exposed on port 5601.
|
||||
|
||||
- Browse http://X.X.X.X:5601.
|
||||
|
||||
---
|
||||
|
||||
## "Configuring" Kibana
|
||||
|
||||
- Kibana should offer you to "Configure an index pattern":
|
||||
<br/>in the "Time-field name" drop down, select "@timestamp", and hit the
|
||||
"Create" button.
|
||||
|
||||
- Then:
|
||||
|
||||
- click "Discover" (in the top-left corner),
|
||||
- click "Last 15 minutes" (in the top-right corner),
|
||||
- click "Last 1 hour" (in the list in the middle),
|
||||
- click "Auto-refresh" (top-right corner),
|
||||
- click "5 seconds" (top-left of the list).
|
||||
|
||||
- You should see a series of green bars (with one new green bar every minute).
|
||||
|
||||
- Our 'hello world' message should be visible there.
|
||||
|
||||
---
|
||||
|
||||
## Important afterword
|
||||
|
||||
**This is not a "production-grade" setup.**
|
||||
|
||||
It is just an educational example. Since we have only
|
||||
one node , we did set up a single
|
||||
ElasticSearch instance and a single Logstash instance.
|
||||
|
||||
In a production setup, you need an ElasticSearch cluster
|
||||
(both for capacity and availability reasons). You also
|
||||
need multiple Logstash instances.
|
||||
|
||||
And if you want to withstand
|
||||
bursts of logs, you need some kind of message queue:
|
||||
Redis if you're cheap, Kafka if you want to make sure
|
||||
that you don't drop messages on the floor. Good luck.
|
||||
|
||||
If you want to learn more about the GELF driver,
|
||||
have a look at [this blog post](
|
||||
http://jpetazzo.github.io/2017/01/20/docker-logging-gelf/).
|
||||
1037
slides/intro/Namespaces_Cgroups.md
Normal file
427
slides/intro/Orchestration_Overview.md
Normal file
@@ -0,0 +1,427 @@
|
||||
# Orchestration, an overview
|
||||
|
||||
In this chapter, we will:
|
||||
|
||||
* Explain what is orchestration and why we would need it.
|
||||
|
||||
* Present (from a high-level perspective) some orchestrators.
|
||||
|
||||
* Show one orchestrator (Kubernetes) in action.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## What's orchestration?
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## What's orchestration?
|
||||
|
||||
According to Wikipedia:
|
||||
|
||||
*Orchestration describes the __automated__ arrangement,
|
||||
coordination, and management of complex computer systems,
|
||||
middleware, and services.*
|
||||
|
||||
--
|
||||
|
||||
*[...] orchestration is often discussed in the context of
|
||||
__service-oriented architecture__, __virtualization__, provisioning,
|
||||
Converged Infrastructure and __dynamic datacenter__ topics.*
|
||||
|
||||
--
|
||||
|
||||
What does that really mean?
|
||||
|
||||
---
|
||||
|
||||
## Example 1: dynamic cloud instances
|
||||
|
||||
--
|
||||
|
||||
- Q: do we always use 100% of our servers?
|
||||
|
||||
--
|
||||
|
||||
- A: obviously not!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## Example 1: dynamic cloud instances
|
||||
|
||||
- Every night, scale down
|
||||
|
||||
(by shutting down extraneous replicated instances)
|
||||
|
||||
- Every morning, scale up
|
||||
|
||||
(by deploying new copies)
|
||||
|
||||
- "Pay for what you use"
|
||||
|
||||
(i.e. save big $$$ here)
|
||||
|
||||
---
|
||||
|
||||
## Example 1: dynamic cloud instances
|
||||
|
||||
How do we implement this?
|
||||
|
||||
- Crontab
|
||||
|
||||
- Autoscaling (save even bigger $$$)
|
||||
|
||||
That's *relatively* easy.
|
||||
|
||||
Now, how are things for our IAAS provider?
|
||||
|
||||
---
|
||||
|
||||
## Example 2: dynamic datacenter
|
||||
|
||||
- Q: what's the #1 cost in a datacenter?
|
||||
|
||||
--
|
||||
|
||||
- A: electricity!
|
||||
|
||||
--
|
||||
|
||||
- Q: what uses electricity?
|
||||
|
||||
--
|
||||
|
||||
- A: servers, obviously
|
||||
|
||||
- A: ... and associated cooling
|
||||
|
||||
--
|
||||
|
||||
- Q: do we always use 100% of our servers?
|
||||
|
||||
--
|
||||
|
||||
- A: obviously not!
|
||||
|
||||
---
|
||||
|
||||
## Example 2: dynamic datacenter
|
||||
|
||||
- If only we could turn off unused servers during the night...
|
||||
|
||||
- Problem: we can only turn off a server if it's totally empty!
|
||||
|
||||
(i.e. all VMs on it are stopped/moved)
|
||||
|
||||
- Solution: *migrate* VMs and shutdown empty servers
|
||||
|
||||
(e.g. combine two hypervisors with 40% load into 80%+0%,
|
||||
<br/>and shutdown the one at 0%)
|
||||
|
||||
---
|
||||
|
||||
## Example 2: dynamic datacenter
|
||||
|
||||
How do we implement this?
|
||||
|
||||
- Shutdown empty hosts (but keep some spare capacity)
|
||||
|
||||
- Start hosts again when capacity gets low
|
||||
|
||||
- Ability to "live migrate" VMs
|
||||
|
||||
(Xen already did this 10+ years ago)
|
||||
|
||||
- Rebalance VMs on a regular basis
|
||||
|
||||
- what if a VM is stopped while we move it?
|
||||
- should we allow provisioning on hosts involved in a migration?
|
||||
|
||||
*Scheduling* becomes more complex.
|
||||
|
||||
---
|
||||
|
||||
## What is scheduling?
|
||||
|
||||
According to Wikipedia (again):
|
||||
|
||||
*In computing, scheduling is the method by which threads,
|
||||
processes or data flows are given access to system resources.*
|
||||
|
||||
The scheduler is concerned mainly with:
|
||||
|
||||
- throughput (total amount or work done per time unit);
|
||||
- turnaround time (between submission and completion);
|
||||
- response time (between submission and start);
|
||||
- waiting time (between job readiness and execution);
|
||||
- fairness (appropriate times according to priorities).
|
||||
|
||||
In practice, these goals often conflict.
|
||||
|
||||
**"Scheduling" = decide which resources to use.**
|
||||
|
||||
---
|
||||
|
||||
## Exercise 1
|
||||
|
||||
- You have:
|
||||
|
||||
- 5 hypervisors (physical machines)
|
||||
|
||||
- Each server has:
|
||||
|
||||
- 16 GB RAM, 8 cores, 1 TB disk
|
||||
|
||||
- Each week, your team asks:
|
||||
|
||||
- one VM with X RAM, Y CPU, Z disk
|
||||
|
||||
Scheduling = deciding which hypervisor to use for each VM.
|
||||
|
||||
Difficulty: easy!
|
||||
|
||||
---
|
||||
|
||||
<!-- Warning, two almost identical slides (for img effect) -->
|
||||
|
||||
## Exercise 2
|
||||
|
||||
- You have:
|
||||
|
||||
- 1000+ hypervisors (and counting!)
|
||||
|
||||
- Each server has different resources:
|
||||
|
||||
- 8-500 GB of RAM, 4-64 cores, 1-100 TB disk
|
||||
|
||||
- Multiple times a day, a different team asks for:
|
||||
|
||||
- up to 50 VMs with different characteristics
|
||||
|
||||
Scheduling = deciding which hypervisor to use for each VM.
|
||||
|
||||
Difficulty: ???
|
||||
|
||||
---
|
||||
|
||||
<!-- Warning, two almost identical slides (for img effect) -->
|
||||
|
||||
## Exercise 2
|
||||
|
||||
- You have:
|
||||
|
||||
- 1000+ hypervisors (and counting!)
|
||||
|
||||
- Each server has different resources:
|
||||
|
||||
- 8-500 GB of RAM, 4-64 cores, 1-100 TB disk
|
||||
|
||||
- Multiple times a day, a different team asks for:
|
||||
|
||||
- up to 50 VMs with different characteristics
|
||||
|
||||
Scheduling = deciding which hypervisor to use for each VM.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Exercise 3
|
||||
|
||||
- You have machines (physical and/or virtual)
|
||||
|
||||
- You have containers
|
||||
|
||||
- You are trying to put the containers on the machines
|
||||
|
||||
- Sounds familiar?
|
||||
|
||||
---
|
||||
|
||||
## Scheduling with one resource
|
||||
|
||||
.center[]
|
||||
|
||||
Can we do better?
|
||||
|
||||
---
|
||||
|
||||
## Scheduling with one resource
|
||||
|
||||
.center[]
|
||||
|
||||
Yup!
|
||||
|
||||
---
|
||||
|
||||
## Scheduling with two resources
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## Scheduling with three resources
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## You need to be good at this
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## But also, you must be quick!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## And be web scale!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## And think outside (?) of the box!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## Good luck!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## TL,DR
|
||||
|
||||
* Scheduling with multiple resources (dimensions) is hard.
|
||||
|
||||
* Don't expect to solve the problem with a Tiny Shell Script.
|
||||
|
||||
* There are literally tons of research papers written on this.
|
||||
|
||||
---
|
||||
|
||||
## But our orchestrator also needs to manage ...
|
||||
|
||||
* Network connectivity (or filtering) between containers.
|
||||
|
||||
* Load balancing (external and internal).
|
||||
|
||||
* Failure recovery (if a node or a whole datacenter fails).
|
||||
|
||||
* Rolling out new versions of our applications.
|
||||
|
||||
(Canary deployments, blue/green deployments...)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Some orchestrators
|
||||
|
||||
We are going to present briefly a few orchestrators.
|
||||
|
||||
There is no "absolute best" orchestrator.
|
||||
|
||||
It depends on:
|
||||
|
||||
- your applications,
|
||||
|
||||
- your requirements,
|
||||
|
||||
- your pre-existing skills...
|
||||
|
||||
---
|
||||
|
||||
## Nomad
|
||||
|
||||
- Open Source project by Hashicorp.
|
||||
|
||||
- Arbitrary scheduler (not just for containers).
|
||||
|
||||
- Great if you want to schedule mixed workloads.
|
||||
|
||||
(VMs, containers, processes...)
|
||||
|
||||
- Less integration with the rest of the container ecosystem.
|
||||
|
||||
---
|
||||
|
||||
## Mesos
|
||||
|
||||
- Open Source project in the Apache Foundation.
|
||||
|
||||
- Arbitrary scheduler (not just for containers).
|
||||
|
||||
- Two-level scheduler.
|
||||
|
||||
- Top-level scheduler acts as a resource broker.
|
||||
|
||||
- Second-level schedulers (aka "frameworks") obtain resources from top-level.
|
||||
|
||||
- Frameworks implement various strategies.
|
||||
|
||||
(Marathon = long running processes; Chronos = run at intervals; ...)
|
||||
|
||||
- Commercial offering through DC/OS my Mesosphere.
|
||||
|
||||
---
|
||||
|
||||
## Rancher
|
||||
|
||||
- Rancher 1 offered a simple interface for Docker hosts.
|
||||
|
||||
- Rancher 2 is a complete management platform for Docker and Kubernetes.
|
||||
|
||||
- Technically not an orchestrator, but it's a popular option.
|
||||
|
||||
---
|
||||
|
||||
## Swarm
|
||||
|
||||
- Tightly integrated with the Docker Engine.
|
||||
|
||||
- Extremely simple to deploy and setup, even in multi-manager (HA) mode.
|
||||
|
||||
- Secure by default.
|
||||
|
||||
- Strongly opinionated:
|
||||
|
||||
- smaller set of features,
|
||||
|
||||
- easier to operate.
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes
|
||||
|
||||
- Open Source project initiated by Google.
|
||||
|
||||
- Contributions from many other actors.
|
||||
|
||||
- *De facto* standard for container orchestration.
|
||||
|
||||
- Many deployment options; some of them very complex.
|
||||
|
||||
- Reputation: steep learning curve.
|
||||
|
||||
- Reality:
|
||||
|
||||
- true, if we try to understand *everything*;
|
||||
|
||||
- false, if we focus on what matters.
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes in action
|
||||
|
||||
.center[]
|
||||
@@ -38,6 +38,42 @@ individual Docker VM.*
|
||||
|
||||
---
|
||||
|
||||
## What *is* Docker?
|
||||
|
||||
- "Installing Docker" really means "Installing the Docker Engine and CLI".
|
||||
|
||||
- The Docker Engine is a daemon (a service running in the background).
|
||||
|
||||
- This daemon manages containers, the same way that an hypervisor manages VMs.
|
||||
|
||||
- We interact with the Docker Engine by using the Docker CLI.
|
||||
|
||||
- The Docker CLI and the Docker Engine communicate through an API.
|
||||
|
||||
- There are many other programs, and many client libraries, to use that API.
|
||||
|
||||
---
|
||||
|
||||
## Why don't we run Docker locally?
|
||||
|
||||
- We are going to download container images and distribution packages.
|
||||
|
||||
- This could put a bit of stress on the local WiFi and slow us down.
|
||||
|
||||
- Instead, we use a remote VM that has a good connectivity
|
||||
|
||||
- In some rare cases, installing Docker locally is challenging:
|
||||
|
||||
- no administrator/root access (computer managed by strict corp IT)
|
||||
|
||||
- 32-bit CPU or OS
|
||||
|
||||
- old OS version (e.g. CentOS 6, OSX pre-Yosemite, Windows 7)
|
||||
|
||||
- It's better to spend time learning containers than fiddling with the installer!
|
||||
|
||||
---
|
||||
|
||||
## Connecting to your Virtual Machine
|
||||
|
||||
You need an SSH client.
|
||||
@@ -66,21 +102,24 @@ Once logged in, make sure that you can run a basic Docker command:
|
||||
```bash
|
||||
$ docker version
|
||||
Client:
|
||||
Version: 17.09.0-ce
|
||||
API version: 1.32
|
||||
Go version: go1.8.3
|
||||
Git commit: afdb6d4
|
||||
Built: Tue Sep 26 22:40:09 2017
|
||||
OS/Arch: darwin/amd64
|
||||
Version: 18.03.0-ce
|
||||
API version: 1.37
|
||||
Go version: go1.9.4
|
||||
Git commit: 0520e24
|
||||
Built: Wed Mar 21 23:10:06 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: false
|
||||
Orchestrator: swarm
|
||||
|
||||
Server:
|
||||
Version: 17.09.0-ce
|
||||
API version: 1.32 (minimum version 1.12)
|
||||
Go version: go1.8.3
|
||||
Git commit: afdb6d4
|
||||
Built: Tue Sep 26 22:45:38 2017
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: true
|
||||
Engine:
|
||||
Version: 18.03.0-ce
|
||||
API version: 1.37 (minimum version 1.12)
|
||||
Go version: go1.9.4
|
||||
Git commit: 0520e24
|
||||
Built: Wed Mar 21 23:08:35 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: false
|
||||
```
|
||||
]
|
||||
|
||||
|
||||
@@ -195,7 +195,7 @@ Let's start another container using the `webapps` volume.
|
||||
$ docker run -v webapps:/webapps -w /webapps -ti alpine vi ROOT/index.jsp
|
||||
```
|
||||
|
||||
Vandalize the page, save, exit.
|
||||
Where `-w` sets the working directory inside the container. Vandalize the page, save and exit.
|
||||
|
||||
Then run `curl localhost:1234` again to see your changes.
|
||||
|
||||
@@ -259,7 +259,7 @@ $ docker run -d --name redis28 redis:2.8
|
||||
Connect to the Redis container and set some data.
|
||||
|
||||
```bash
|
||||
$ docker run -ti --link redis28:redis alpine telnet redis 6379
|
||||
$ docker run -ti --link redis28:redis alpine:3.6 telnet redis 6379
|
||||
```
|
||||
|
||||
Issue the following commands:
|
||||
@@ -298,7 +298,7 @@ class: extra-details
|
||||
Connect to the Redis container and see our data.
|
||||
|
||||
```bash
|
||||
docker run -ti --link redis30:redis alpine telnet redis 6379
|
||||
docker run -ti --link redis30:redis alpine:3.6 telnet redis 6379
|
||||
```
|
||||
|
||||
Issue a few commands.
|
||||
@@ -401,6 +401,47 @@ or providing extra features. For instance:
|
||||
|
||||
---
|
||||
|
||||
## Volumes vs. Mounts
|
||||
|
||||
* Since Docker 17.06, a new options is available: `--mount`.
|
||||
|
||||
* It offers a new, richer syntax to manipulate data in containers.
|
||||
|
||||
* It makes an explicit difference between:
|
||||
|
||||
- volumes (identified with a unique name, managed by a storage plugin),
|
||||
|
||||
- bind mounts (identified with a host path, not managed).
|
||||
|
||||
* The former `-v` / `--volume` option is still usable.
|
||||
|
||||
---
|
||||
|
||||
## `--mount` syntax
|
||||
|
||||
Binding a host path to a container path:
|
||||
|
||||
```bash
|
||||
$ docker run \
|
||||
--mount type=bind,source=/path/on/host,target=/path/in/container alpine
|
||||
```
|
||||
|
||||
Mounting a volume to a container path:
|
||||
|
||||
```bash
|
||||
$ docker run \
|
||||
--mount source=myvolume,target=/path/in/container alpine
|
||||
```
|
||||
|
||||
Mounting a tmpfs (in-memory, for temporary files):
|
||||
|
||||
```bash
|
||||
$ docker run \
|
||||
--mount type=tmpfs,destination=/path/in/container,tmpfs-size=1000000 alpine
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
|
||||
We've learned how to:
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
Introduction to Orchestration
|
||||
with Kubernetes
|
||||
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
|
||||
#chat: "In person!"
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
@@ -19,21 +18,25 @@ chapters:
|
||||
- - common/prereqs.md
|
||||
- kube/versions-k8s.md
|
||||
- common/sampleapp.md
|
||||
#- common/composescale.md
|
||||
- common/composescale.md
|
||||
- common/composedown.md
|
||||
- - kube/concepts-k8s.md
|
||||
- kube/concepts-k8s.md
|
||||
- common/declarative.md
|
||||
- kube/declarative.md
|
||||
- kube/kubenet.md
|
||||
- kube/kubectlget.md
|
||||
- kube/setup-k8s.md
|
||||
- kube/kubectlrun.md
|
||||
- - kube/kubectlexpose.md
|
||||
- - kube/kubectlrun.md
|
||||
- kube/kubectlexpose.md
|
||||
- kube/ourapponkube.md
|
||||
- kube/dashboard.md
|
||||
- - kube/kubectlscale.md
|
||||
- - kube/dashboard.md
|
||||
- kube/kubectlscale.md
|
||||
- kube/daemonset.md
|
||||
- kube/rollout.md
|
||||
- - kube/logs-cli.md
|
||||
- kube/logs-centralized.md
|
||||
- kube/helm.md
|
||||
- kube/namespaces.md
|
||||
- kube/whatsnext.md
|
||||
- common/thankyou.md
|
||||
- kube/links.md
|
||||
- common/thankyou.md
|
||||
@@ -32,6 +32,10 @@ chapters:
|
||||
- - kube/kubectlscale.md
|
||||
- kube/daemonset.md
|
||||
- kube/rollout.md
|
||||
- kube/logs-cli.md
|
||||
- kube/logs-centralized.md
|
||||
- kube/helm.md
|
||||
- kube/namespaces.md
|
||||
- kube/whatsnext.md
|
||||
- common/thankyou.md
|
||||
- kube/links.md
|
||||
- common/thankyou.md
|
||||
|
||||
1
slides/kube.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
kube-fullday.yml
|
||||
@@ -98,39 +98,76 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture: the master
|
||||
|
||||
- The Kubernetes logic (its "brains") is a collection of services:
|
||||
|
||||
- the API server (our point of entry to everything!)
|
||||
- core services like the scheduler and controller manager
|
||||
- `etcd` (a highly available key/value store; the "database" of Kubernetes)
|
||||
|
||||
- Together, these services form what is called the "master"
|
||||
|
||||
- These services can run straight on a host, or in containers
|
||||
<br/>
|
||||
(that's an implementation detail)
|
||||
|
||||
- `etcd` can be run on separate machines (first schema) or co-located (second schema)
|
||||
|
||||
- We need at least one master, but we can have more (for high availability)
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture: the nodes
|
||||
|
||||
- The nodes executing our containers run another collection of services:
|
||||
- The nodes executing our containers run a collection of services:
|
||||
|
||||
- a container Engine (typically Docker)
|
||||
|
||||
- kubelet (the "node agent")
|
||||
|
||||
- kube-proxy (a necessary but not sufficient network component)
|
||||
|
||||
- Nodes were formerly called "minions"
|
||||
|
||||
- It is customary to *not* run apps on the node(s) running master components
|
||||
(You might see that word in older articles or documentation)
|
||||
|
||||
(Except when using small development clusters)
|
||||
---
|
||||
|
||||
## Kubernetes architecture: the control plane
|
||||
|
||||
- The Kubernetes logic (its "brains") is a collection of services:
|
||||
|
||||
- the API server (our point of entry to everything!)
|
||||
|
||||
- core services like the scheduler and controller manager
|
||||
|
||||
- `etcd` (a highly available key/value store; the "database" of Kubernetes)
|
||||
|
||||
- Together, these services form the control plane of our cluster
|
||||
|
||||
- The control plane is also called the "master"
|
||||
|
||||
---
|
||||
|
||||
## Running the control plane on special nodes
|
||||
|
||||
- It is common to reserve a dedicated node for the control plane
|
||||
|
||||
(Except for single-node development clusters, like when using minikube)
|
||||
|
||||
- This node is then called a "master"
|
||||
|
||||
(Yes, this is ambiguous: is the "master" a node, or the whole control plane?)
|
||||
|
||||
- Normal applications are restricted from running on this node
|
||||
|
||||
(By using a mechanism called ["taints"](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/))
|
||||
|
||||
- When high availability is required, each service of the control plane must be resilient
|
||||
|
||||
- The control plane is then replicated on multiple nodes
|
||||
|
||||
(This is sometimes called a "multi-master" setup)
|
||||
|
||||
---
|
||||
|
||||
## Running the control plane outside containers
|
||||
|
||||
- The services of the control plane can run in or out of containers
|
||||
|
||||
- For instance: since `etcd` is a critical service, some people
|
||||
deploy it directly on a dedicated cluster (without containers)
|
||||
|
||||
(This is illustrated on the first "super complicated" schema)
|
||||
|
||||
- In some hosted Kubernetes offerings (e.g. GKE), the control plane is invisible
|
||||
|
||||
(We only "see" a Kubernetes API endpoint)
|
||||
|
||||
- In that case, there is no "master node"
|
||||
|
||||
*For this reason, it is more accurate to say "control plane" rather than "master".*
|
||||
|
||||
---
|
||||
|
||||
@@ -184,7 +221,7 @@ Yes!
|
||||
|
||||
*Probably not (in the future)*
|
||||
|
||||
.footnote[More information about CRI [on the Kubernetes blog](http://blog.kubernetes.io/2016/12/container-runtime-interface-cri-in-kubernetes.html)]
|
||||
.footnote[More information about CRI [on the Kubernetes blog](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes)]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ We all knew this couldn't be that easy, right!
|
||||
|
||||
- We could also tell Kubernetes to ignore these errors and try anyway
|
||||
|
||||
- The `--force` flag actual name is `--validate=false`
|
||||
- The `--force` flag's actual name is `--validate=false`
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -416,8 +416,83 @@ The timestamps should give us a hint about how many pods are currently receiving
|
||||
|
||||
---
|
||||
|
||||
## More labels, more selectors, more problems?
|
||||
## Cleaning up
|
||||
|
||||
- Bonus exercise 1: clean up the pods of the "old" daemon set
|
||||
- The pods of the "old" daemon set are still running
|
||||
|
||||
- Bonus exercise 2: how could we have done this to avoid creating new pods?
|
||||
- We are going to identify them programmatically
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the pods with `run=rng` but without `isactive=yes`:
|
||||
```bash
|
||||
kubectl get pods -l run=rng,isactive!=yes
|
||||
```
|
||||
|
||||
- Remove these pods:
|
||||
```bash
|
||||
kubectl get pods -l run=rng,isactive!=yes -o name |
|
||||
xargs kubectl delete
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Avoiding extra pods
|
||||
|
||||
- When we changed the definition of the daemon set, it immediately created new pods
|
||||
|
||||
- How could we have avoided this?
|
||||
|
||||
--
|
||||
|
||||
- By adding the `isactive: "yes"` label to the pods before changing the daemon set!
|
||||
|
||||
- This can be done programmatically with `kubectl patch`:
|
||||
|
||||
```bash
|
||||
PATCH='
|
||||
metadata:
|
||||
labels:
|
||||
isactive: "yes"
|
||||
'
|
||||
kubectl get pods -l run=rng -o name |
|
||||
xargs kubectl patch -p "$PATCH"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Labels and debugging
|
||||
|
||||
- When a pod is misbehaving, we can delete it: another one will be recreated
|
||||
|
||||
- But we can also change its labels
|
||||
|
||||
- It will be removed from the load balancer (it won't receive traffic anymore)
|
||||
|
||||
- Another pod will be recreated immediately
|
||||
|
||||
- But the problematic pod is still here, and we can inspect and debug it
|
||||
|
||||
- We can even re-add it to the rotation if necessary
|
||||
|
||||
(Very useful to troubleshoot intermittent and elusive bugs)
|
||||
|
||||
---
|
||||
|
||||
## Labels and advanced rollout control
|
||||
|
||||
- Conversely, we can add pods matching a service's selector
|
||||
|
||||
- These pods will then receive requests and serve traffic
|
||||
|
||||
- Examples:
|
||||
|
||||
- one-shot pod with all debug flags enabled, to collect logs
|
||||
|
||||
- pods created automatically, but added to rotation in a second step
|
||||
<br/>
|
||||
(by setting their label accordingly)
|
||||
|
||||
- This gives us building blocks for canary and blue/green deployments
|
||||
|
||||
212
slides/kube/helm.md
Normal file
@@ -0,0 +1,212 @@
|
||||
# Managing stacks with Helm
|
||||
|
||||
- We created our first resources with `kubectl run`, `kubectl expose` ...
|
||||
|
||||
- We have also created resources by loading YAML files with `kubectl apply -f`
|
||||
|
||||
- For larger stacks, managing thousands of lines of YAML is unreasonable
|
||||
|
||||
- These YAML bundles need to be customized with variable parameters
|
||||
|
||||
(E.g.: number of replicas, image version to use ...)
|
||||
|
||||
- It would be nice to have an organized, versioned collection of bundles
|
||||
|
||||
- It would be nice to be able to upgrade/rollback these bundles carefully
|
||||
|
||||
- [Helm](https://helm.sh/) is an open source project offering all these things!
|
||||
|
||||
---
|
||||
|
||||
## Helm concepts
|
||||
|
||||
- `helm` is a CLI tool
|
||||
|
||||
- `tiller` is its companion server-side component
|
||||
|
||||
- A "chart" is an archive containing templatized YAML bundles
|
||||
|
||||
- Charts are versioned
|
||||
|
||||
- Charts can be stored on private or public repositories
|
||||
|
||||
---
|
||||
|
||||
## Installing Helm
|
||||
|
||||
- We need to install the `helm` CLI; then use it to deploy `tiller`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the `helm` CLI:
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
|
||||
```
|
||||
|
||||
- Deploy `tiller`:
|
||||
```bash
|
||||
helm init
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Fix account permissions
|
||||
|
||||
- Helm permission model requires us to tweak permissions
|
||||
|
||||
- In a more realistic deployment, you might create per-user or per-team
|
||||
service accounts, roles, and role bindings
|
||||
|
||||
.exercise[
|
||||
|
||||
- Grant `cluster-admin` role to `kube-system:default` service account:
|
||||
```bash
|
||||
kubectl create clusterrolebinding add-on-cluster-admin \
|
||||
--clusterrole=cluster-admin --serviceaccount=kube-system:default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(Defining the exact roles and permissions on your cluster requires
|
||||
a deeper knowledge of Kubernetes' RBAC model. The command above is
|
||||
fine for personal and development clusters.)
|
||||
|
||||
---
|
||||
|
||||
## View available charts
|
||||
|
||||
- A public repo is pre-configured when installing Helm
|
||||
|
||||
- We can view available charts with `helm search` (and an optional keyword)
|
||||
|
||||
.exercise[
|
||||
|
||||
- View all available charts:
|
||||
```bash
|
||||
helm search
|
||||
```
|
||||
|
||||
- View charts related to `prometheus`:
|
||||
```bash
|
||||
helm search prometheus
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Install a chart
|
||||
|
||||
- Most charts use `LoadBalancer` service types by default
|
||||
|
||||
- Most charts require persistent volumes to store data
|
||||
|
||||
- We need to relax these requirements a bit
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the Prometheus metrics collector on our cluster:
|
||||
```bash
|
||||
helm install stable/prometheus \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.persistentVolume.enabled=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Where do these `--set` options come from?
|
||||
|
||||
---
|
||||
|
||||
## Inspecting a chart
|
||||
|
||||
- `helm inspect` shows details about a chart (including available options)
|
||||
|
||||
.exercise[
|
||||
|
||||
- See the metadata and all available options for `stable/prometheus`:
|
||||
```bash
|
||||
helm inspect stable/prometheus
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The chart's metadata includes an URL to the project's home page.
|
||||
|
||||
(Sometimes it conveniently points to the documentation for the chart.)
|
||||
|
||||
---
|
||||
|
||||
## Creating a chart
|
||||
|
||||
- We are going to show a way to create a *very simplified* chart
|
||||
|
||||
- In a real chart, *lots of things* would be templatized
|
||||
|
||||
(Resource names, service types, number of replicas...)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a sample chart:
|
||||
```bash
|
||||
helm create dockercoins
|
||||
```
|
||||
|
||||
- Move away the sample templates and create an empty template directory:
|
||||
```bash
|
||||
mv dockercoins/templates dockercoins/default-templates
|
||||
mkdir dockercoins/templates
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Exporting the YAML for our application
|
||||
|
||||
- The following section assumes that DockerCoins is currently running
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create one YAML file for each resource that we need:
|
||||
.small[
|
||||
```bash
|
||||
|
||||
while read kind name; do
|
||||
kubectl get -o yaml --export $kind $name > dockercoins/templates/$name-$kind.yaml
|
||||
done <<EOF
|
||||
deployment worker
|
||||
deployment hasher
|
||||
daemonset rng
|
||||
deployment webui
|
||||
deployment redis
|
||||
service hasher
|
||||
service rng
|
||||
service webui
|
||||
service redis
|
||||
EOF
|
||||
```
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our helm chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's install our helm chart! (`dockercoins` is the path to the chart)
|
||||
```bash
|
||||
helm install dockercoins
|
||||
```
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- Since the application is already deployed, this will fail:<br>
|
||||
`Error: release loitering-otter failed: services "hasher" already exists`
|
||||
|
||||
- To avoid naming conflicts, we will deploy the application in another *namespace*
|
||||
@@ -137,4 +137,116 @@ Note: please DO NOT call the service `search`. It would collide with the TLD.
|
||||
|
||||
--
|
||||
|
||||
Our requests are load balanced across multiple pods.
|
||||
We may see `curl: (7) Failed to connect to _IP_ port 9200: Connection refused`.
|
||||
|
||||
This is normal while the service starts up.
|
||||
|
||||
--
|
||||
|
||||
Once it's running, our requests are load balanced across multiple pods.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## If we don't need a load balancer
|
||||
|
||||
- Sometimes, we want to access our scaled services directly:
|
||||
|
||||
- if we want to save a tiny little bit of latency (typically less than 1ms)
|
||||
|
||||
- if we need to connect over arbitrary ports (instead of a few fixed ones)
|
||||
|
||||
- if we need to communicate over another protocol than UDP or TCP
|
||||
|
||||
- if we want to decide how to balance the requests client-side
|
||||
|
||||
- ...
|
||||
|
||||
- In that case, we can use a "headless service"
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Headless services
|
||||
|
||||
- A headless service is obtained by setting the `clusterIP` field to `None`
|
||||
|
||||
(Either with `--cluster-ip=None`, or by providing a custom YAML)
|
||||
|
||||
- As a result, the service doesn't have a virtual IP address
|
||||
|
||||
- Since there is no virtual IP address, there is no load balancer either
|
||||
|
||||
- `kube-dns` will return the pods' IP addresses as multiple `A` records
|
||||
|
||||
- This gives us an easy way to discover all the replicas for a deployment
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Services and endpoints
|
||||
|
||||
- A service has a number of "endpoints"
|
||||
|
||||
- Each endpoint is a host + port where the service is available
|
||||
|
||||
- The endpoints are maintained and updated automatically by Kubernetes
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the endpoints that Kubernetes has associated with our `elastic` service:
|
||||
```bash
|
||||
kubectl describe service elastic
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
In the output, there will be a line starting with `Endpoints:`.
|
||||
|
||||
That line will list a bunch of addresses in `host:port` format.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Viewing endpoint details
|
||||
|
||||
- When we have many endpoints, our display commands truncate the list
|
||||
```bash
|
||||
kubectl get endpoints
|
||||
```
|
||||
|
||||
- If we want to see the full list, we can use one of the following commands:
|
||||
```bash
|
||||
kubectl describe endpoints elastic
|
||||
kubectl get endpoints elastic -o yaml
|
||||
```
|
||||
|
||||
- These commands will show us a list of IP addresses
|
||||
|
||||
- These IP addresses should match the addresses of the corresponding pods:
|
||||
```bash
|
||||
kubectl get pods -l run=elastic -o wide
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `endpoints` not `endpoint`
|
||||
|
||||
- `endpoints` is the only resource that cannot be singular
|
||||
|
||||
```bash
|
||||
$ kubectl get endpoint
|
||||
error: the server doesn't have a resource type "endpoint"
|
||||
```
|
||||
|
||||
- This is because the type itself is plural (unlike every other resource)
|
||||
|
||||
- There is no `endpoint` object: `type Endpoints struct`
|
||||
|
||||
- The type doesn't represent a single endpoint, but a list of endpoints
|
||||
|
||||
@@ -265,4 +265,4 @@ The `kube-system` namespace is used for the control plane.
|
||||
]
|
||||
--
|
||||
|
||||
- `kube-public` is created by kubeadm & [used for security bootstrapping](http://blog.kubernetes.io/2017/01/stronger-foundation-for-creating-and-managing-kubernetes-clusters.html)
|
||||
- `kube-public` is created by kubeadm & [used for security bootstrapping](https://kubernetes.io/blog/2017/01/stronger-foundation-for-creating-and-managing-kubernetes-clusters)
|
||||
|
||||
@@ -41,7 +41,8 @@ OK, what just happened?
|
||||
|
||||
- List most resource types:
|
||||
```bash
|
||||
kubectl get all
|
||||
kubectl get all # This was broken in Kubernetes 1.10, so ...
|
||||
kubectl get all -o custom-columns=KIND:.kind,NAME:.metadata.name
|
||||
```
|
||||
|
||||
]
|
||||
@@ -49,9 +50,9 @@ OK, what just happened?
|
||||
--
|
||||
|
||||
We should see the following things:
|
||||
- `deploy/pingpong` (the *deployment* that we just created)
|
||||
- `rs/pingpong-xxxx` (a *replica set* created by the deployment)
|
||||
- `po/pingpong-yyyy` (a *pod* created by the replica set)
|
||||
- A `Deployment` named `pingpong` (the thing that we just created)
|
||||
- A `ReplicaSet` named `pingpong-xxxx` (created by the deployment)
|
||||
- A `Pod` named `pingpong-yyyy` (created by the replica set)
|
||||
|
||||
---
|
||||
|
||||
@@ -137,9 +138,8 @@ We should see the following things:
|
||||
```
|
||||
|
||||
<!--
|
||||
```keys
|
||||
^C
|
||||
```
|
||||
```wait seq=3```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
@@ -181,9 +181,8 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
```
|
||||
|
||||
<!--
|
||||
```keys
|
||||
^C
|
||||
```
|
||||
```wait Running```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
- Destroy a pod:
|
||||
|
||||
@@ -52,9 +52,9 @@
|
||||
|
||||
(15 are listed in the Kubernetes documentation)
|
||||
|
||||
- It *looks like* you have a level 3 network, but it's only level 4
|
||||
- Pods have level 3 (IP) connectivity, but *services* are level 4
|
||||
|
||||
(The spec requires UDP and TCP, but not port ranges or arbitrary IP packets)
|
||||
(Services map to a single UDP or TCP port; no port ranges or arbitrary IP packets)
|
||||
|
||||
- `kube-proxy` is on the data path when connecting to a pod or container,
|
||||
<br/>and it's not particularly fast (relies on userland proxying or iptables)
|
||||
@@ -72,10 +72,32 @@
|
||||
- Unless you:
|
||||
|
||||
- routinely saturate 10G network interfaces
|
||||
|
||||
- count packet rates in millions per second
|
||||
|
||||
- run high-traffic VOIP or gaming platforms
|
||||
|
||||
- do weird things that involve millions of simultaneous connections
|
||||
<br/>(in which case you're already familiar with kernel tuning)
|
||||
|
||||
- If necessary, there are alternatives to `kube-proxy`; e.g.
|
||||
[`kube-router`](https://www.kube-router.io)
|
||||
|
||||
---
|
||||
|
||||
## The Container Network Interface (CNI)
|
||||
|
||||
- The CNI has a well-defined [specification](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration) for network plugins
|
||||
|
||||
- When a pod is created, Kubernetes delegates the network setup to CNI plugins
|
||||
|
||||
- Typically, a CNI plugin will:
|
||||
|
||||
- allocate an IP address (by calling an IPAM plugin)
|
||||
|
||||
- add a network interface into the pod's network namespace
|
||||
|
||||
- configure the interface as well as required routes etc.
|
||||
|
||||
- Using multiple plugins can be done with "meta-plugins" like CNI-Genie or Multus
|
||||
|
||||
- Not all CNI plugins are equal
|
||||
|
||||
(e.g. they don't all implement network policies, which are required to isolate pods)
|
||||
|
||||
144
slides/kube/logs-centralized.md
Normal file
@@ -0,0 +1,144 @@
|
||||
# Centralized logging
|
||||
|
||||
- Using `kubectl` or `stern` is simple; but it has drawbacks:
|
||||
|
||||
- when a node goes down, its logs are not available anymore
|
||||
|
||||
- we can only dump or stream logs; we want to search/index/count...
|
||||
|
||||
- We want to send all our logs to a single place
|
||||
|
||||
- We want to parse them (e.g. for HTTP logs) and index them
|
||||
|
||||
- We want a nice web dashboard
|
||||
|
||||
--
|
||||
|
||||
- We are going to deploy an EFK stack
|
||||
|
||||
---
|
||||
|
||||
## What is EFK?
|
||||
|
||||
- EFK is three components:
|
||||
|
||||
- ElasticSearch (to store and index log entries)
|
||||
|
||||
- Fluentd (to get container logs, process them, and put them in ElasticSearch)
|
||||
|
||||
- Kibana (to view/search log entries with a nice UI)
|
||||
|
||||
- The only component that we need to access from outside the cluster will be Kibana
|
||||
|
||||
---
|
||||
|
||||
## Deploying EFK on our cluster
|
||||
|
||||
- We are going to use a YAML file describing all the required resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- Load the YAML file into our cluster:
|
||||
```bash
|
||||
kubectl apply -f https://goo.gl/MUZhE4
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If we [look at the YAML file](https://goo.gl/MUZhE4), we see that
|
||||
it creates a daemon set, two deployments, two services,
|
||||
and a few roles and role bindings (to give fluentd the required permissions).
|
||||
|
||||
---
|
||||
|
||||
## The itinerary of a log line (before Fluentd)
|
||||
|
||||
- A container writes a line on stdout or stderr
|
||||
|
||||
- Both are typically piped to the container engine (Docker or otherwise)
|
||||
|
||||
- The container engine reads the line, and sends it to a logging driver
|
||||
|
||||
- The timestamp and stream (stdout or stderr) is added to the log line
|
||||
|
||||
- With the default configuration for Kubernetes, the line is written to a JSON file
|
||||
|
||||
(`/var/log/containers/pod-name_namespace_container-id.log`)
|
||||
|
||||
- That file is read when we invoke `kubectl logs`; we can access it directly too
|
||||
|
||||
---
|
||||
|
||||
## The itinerary of a log line (with Fluentd)
|
||||
|
||||
- Fluentd runs on each node (thanks to a daemon set)
|
||||
|
||||
- It binds-mounts `/var/log/containers` from the host (to access these files)
|
||||
|
||||
- It continuously scans this directory for new files; reads them; parses them
|
||||
|
||||
- Each log line becomes a JSON object, fully annotated with extra information:
|
||||
<br/>container id, pod name, Kubernetes labels ...
|
||||
|
||||
- These JSON objects are stored in ElasticSearch
|
||||
|
||||
- ElasticSearch indexes the JSON objects
|
||||
|
||||
- We can access the logs through Kibana (and perform searches, counts, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Accessing Kibana
|
||||
|
||||
- Kibana offers a web interface that is relatively straightforward
|
||||
|
||||
- Let's check it out!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check which `NodePort` was allocated to Kibana:
|
||||
```bash
|
||||
kubectl get svc kibana
|
||||
```
|
||||
|
||||
- With our web browser, connect to Kibana
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using Kibana
|
||||
|
||||
*Note: this is not a Kibana workshop! So this section is deliberately very terse.*
|
||||
|
||||
- The first time you connect to Kibana, you must "configure an index pattern"
|
||||
|
||||
- Just use the one that is suggested, `@timestamp`
|
||||
|
||||
- Then click "Discover" (in the top-left corner)
|
||||
|
||||
- You should see container logs
|
||||
|
||||
- Advice: in the left column, select a few fields to display, e.g.:
|
||||
|
||||
`kubernetes.host`, `kubernetes.pod_name`, `stream`, `log`
|
||||
|
||||
---
|
||||
|
||||
## Caveat emptor
|
||||
|
||||
We are using EFK because it is relatively straightforward
|
||||
to deploy on Kubernetes, without having to redeploy or reconfigure
|
||||
our cluster. But it doesn't mean that it will always be the best
|
||||
option for your use-case. If you are running Kubernetes in the
|
||||
cloud, you might consider using the cloud provider's logging
|
||||
infrastructure (if it can be integrated with Kubernetes).
|
||||
|
||||
The deployment method that we will use here has been simplified:
|
||||
there is only one ElasticSearch node. In a real deployment, you
|
||||
might use a cluster, both for performance and reliability reasons.
|
||||
But this is outside of the scope of this chapter.
|
||||
|
||||
The YAML file that we used creates all the resources in the
|
||||
`default` namespace, for simplicity. In a real scenario, you will
|
||||
create the resources in the `kube-system` namespace or in a dedicated namespace.
|
||||
127
slides/kube/logs-cli.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# Accessing logs from the CLI
|
||||
|
||||
- The `kubectl logs` commands has limitations:
|
||||
|
||||
- it cannot stream logs from multiple pods at a time
|
||||
|
||||
- when showing logs from multiple pods, it mixes them all together
|
||||
|
||||
- We are going to see how to do it better
|
||||
|
||||
---
|
||||
|
||||
## Doing it manually
|
||||
|
||||
- We *could* (if we were so inclined), write a program or script that would:
|
||||
|
||||
- take a selector as an argument
|
||||
|
||||
- enumerate all pods matching that selector (with `kubectl get -l ...`)
|
||||
|
||||
- fork one `kubectl logs --follow ...` command per container
|
||||
|
||||
- annotate the logs (the output of each `kubectl logs ...` process) with their origin
|
||||
|
||||
- preserve ordering by using `kubectl logs --timestamps ...` and merge the output
|
||||
|
||||
--
|
||||
|
||||
- We *could* do it, but thankfully, others did it for us already!
|
||||
|
||||
---
|
||||
|
||||
## Stern
|
||||
|
||||
[Stern](https://github.com/wercker/stern) is an open source project
|
||||
by [Wercker](http://www.wercker.com/).
|
||||
|
||||
From the README:
|
||||
|
||||
*Stern allows you to tail multiple pods on Kubernetes and multiple containers within the pod. Each result is color coded for quicker debugging.*
|
||||
|
||||
*The query is a regular expression so the pod name can easily be filtered and you don't need to specify the exact id (for instance omitting the deployment id). If a pod is deleted it gets removed from tail and if a new pod is added it automatically gets tailed.*
|
||||
|
||||
Exactly what we need!
|
||||
|
||||
---
|
||||
|
||||
## Installing Stern
|
||||
|
||||
- For simplicity, let's just grab a binary release
|
||||
|
||||
.exercise[
|
||||
|
||||
- Download a binary release from GitHub:
|
||||
```bash
|
||||
sudo curl -L -o /usr/local/bin/stern \
|
||||
https://github.com/wercker/stern/releases/download/1.6.0/stern_linux_amd64
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
These installation instructions will work on our clusters, since they are Linux amd64 VMs.
|
||||
|
||||
However, you will have to adapt them if you want to install Stern on your local machine.
|
||||
|
||||
---
|
||||
|
||||
## Using Stern
|
||||
|
||||
- There are two ways to specify the pods for which we want to see the logs:
|
||||
|
||||
- `-l` followed by a selector expression (like with many `kubectl` commands)
|
||||
|
||||
- with a "pod query", i.e. a regex used to match pod names
|
||||
|
||||
- These two ways can be combined if necessary
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the logs for all the rng containers:
|
||||
```bash
|
||||
stern rng
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Stern convenient options
|
||||
|
||||
- The `--tail N` flag shows the last `N` lines for each container
|
||||
|
||||
(Instead of showing the logs since the creation of the container)
|
||||
|
||||
- The `-t` / `--timestamps` flag shows timestamps
|
||||
|
||||
- The `--all-namespaces` flag is self-explanatory
|
||||
|
||||
.exercise[
|
||||
|
||||
- View what's up with the `weave` system containers:
|
||||
```bash
|
||||
stern --tail 1 --timestamps --all-namespaces weave
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using Stern with a selector
|
||||
|
||||
- When specifying a selector, we can omit the value for a label
|
||||
|
||||
- This will match all objects having that label (regardless of the value)
|
||||
|
||||
- Everything created with `kubectl run` has a label `run`
|
||||
|
||||
- We can use that property to view the logs of all the pods created with `kubectl run`
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the logs for all the things started with `kubectl run`:
|
||||
```bash
|
||||
stern -l run
|
||||
```
|
||||
|
||||
]
|
||||
@@ -1,236 +1,195 @@
|
||||
class: namespaces
|
||||
name: namespaces
|
||||
# Namespaces
|
||||
|
||||
# Improving isolation with User Namespaces
|
||||
- We cannot have two resources with the same name
|
||||
|
||||
- *Namespaces* are kernel mechanisms to compartimetalize the system
|
||||
|
||||
- There are different kind of namespaces: `pid`, `net`, `mnt`, `ipc`, `uts`, and `user`
|
||||
|
||||
- For a primer, see "Anatomy of a Container"
|
||||
([video](https://www.youtube.com/watch?v=sK5i-N34im8))
|
||||
([slides](https://www.slideshare.net/jpetazzo/cgroups-namespaces-and-beyond-what-are-containers-made-from-dockercon-europe-2015))
|
||||
|
||||
- The *user namespace* allows to map UIDs between the containers and the host
|
||||
|
||||
- As a result, `root` in a container can map to a non-privileged user on the host
|
||||
|
||||
Note: even without user namespaces, `root` in a container cannot go wild on the host.
|
||||
<br/>
|
||||
It is mediated by capabilities, cgroups, namespaces, seccomp, LSMs...
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## User Namespaces in Docker
|
||||
|
||||
- Optional feature added in Docker Engine 1.10
|
||||
|
||||
- Not enabled by default
|
||||
|
||||
- Has to be enabled at Engine startup, and affects all containers
|
||||
|
||||
- When enabled, `UID:GID` in containers are mapped to a different range on the host
|
||||
|
||||
- Safer than switching to a non-root user (with `-u` or `USER`) in the container
|
||||
<br/>
|
||||
(Since with user namespaces, root escalation maps to a non-privileged user)
|
||||
|
||||
- Can be selectively disabled per container by starting them with `--userns=host`
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## User Namespaces Caveats
|
||||
|
||||
When user namespaces are enabled, containers cannot:
|
||||
|
||||
- Use the host's network namespace (with `docker run --network=host`)
|
||||
|
||||
- Use the host's PID namespace (with `docker run --pid=host`)
|
||||
|
||||
- Run in privileged mode (with `docker run --privileged`)
|
||||
|
||||
... Unless user namespaces are disabled for the container, with flag `--userns=host`
|
||||
|
||||
External volume and graph drivers that don't support user mapping might not work.
|
||||
|
||||
All containers are currently mapped to the same UID:GID range.
|
||||
|
||||
Some of these limitations might be lifted in the future!
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Filesystem ownership details
|
||||
|
||||
When enabling user namespaces:
|
||||
|
||||
- the UID:GID on disk (in the images and containers) has to match the *mapped* UID:GID
|
||||
|
||||
- existing images and containers cannot work (their UID:GID would have to be changed)
|
||||
|
||||
For practical reasons, when enabling user namespaces, the Docker Engine places containers and images (and everything else) in a different directory.
|
||||
|
||||
As a resut, if you enable user namespaces on an existing installation:
|
||||
|
||||
- all containers and images (and e.g. Swarm data) disappear
|
||||
|
||||
- *if a node is a member of a Swarm, it is then kicked out of the Swarm*
|
||||
|
||||
- everything will re-appear if you disable user namespaces again
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Picking a node
|
||||
|
||||
- We will select a node where we will enable user namespaces
|
||||
|
||||
- This node will have to be re-added to the Swarm
|
||||
|
||||
- All containers and services running on this node will be rescheduled
|
||||
|
||||
- Let's make sure that we do not pick the node running the registry!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check on which node the registry is running:
|
||||
```bash
|
||||
docker service ps registry
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Pick any other node (noted `nodeX` in the next slides).
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Logging into the right Engine
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the right node:
|
||||
```bash
|
||||
ssh node`X`
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Configuring the Engine
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a configuration file for the Engine:
|
||||
```bash
|
||||
echo '{"userns-remap": "default"}' | sudo tee /etc/docker/daemon.json
|
||||
```
|
||||
|
||||
- Restart the Engine:
|
||||
```bash
|
||||
kill $(pidof dockerd)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Checking that User Namespaces are enabled
|
||||
|
||||
.exercise[
|
||||
- Notice the new Docker path:
|
||||
```bash
|
||||
docker info | grep var/lib
|
||||
```
|
||||
|
||||
- Notice the new UID:GID permissions:
|
||||
```bash
|
||||
sudo ls -l /var/lib/docker
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
You should see a line like the following:
|
||||
```
|
||||
drwx------ 11 296608 296608 4096 Aug 3 05:11 296608.296608
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Add the node back to the Swarm
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get our manager token from another node:
|
||||
```bash
|
||||
ssh node`Y` docker swarm join-token manager
|
||||
```
|
||||
|
||||
- Copy-paste the join command to the node
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Check the new UID:GID
|
||||
|
||||
.exercise[
|
||||
|
||||
- Run a background container on the node:
|
||||
```bash
|
||||
docker run -d --name lockdown alpine sleep 1000000
|
||||
```
|
||||
|
||||
- Look at the processes in this container:
|
||||
```bash
|
||||
docker top lockdown
|
||||
ps faux
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Comparing on-disk ownership with/without User Namespaces
|
||||
|
||||
.exercise[
|
||||
|
||||
- Compare the output of the two following commands:
|
||||
```bash
|
||||
docker run alpine ls -l /
|
||||
docker run --userns=host alpine ls -l /
|
||||
```
|
||||
|
||||
]
|
||||
(Or can we...?)
|
||||
|
||||
--
|
||||
|
||||
class: namespaces
|
||||
- We cannot have two resources *of the same type* with the same name
|
||||
|
||||
In the first case, it looks like things belong to `root:root`.
|
||||
|
||||
In the second case, we will see the "real" (on-disk) ownership.
|
||||
(But it's OK to have a `rng` service, a `rng` deployment, and a `rng` daemon set!)
|
||||
|
||||
--
|
||||
|
||||
class: namespaces
|
||||
- We cannot have two resources of the same type with the same name *in the same namespace*
|
||||
|
||||
Remember to get back to `node1` when finished!
|
||||
(But it's OK to have e.g. two `rng` services in different namespaces!)
|
||||
|
||||
--
|
||||
|
||||
- In other words: **the tuple *(type, name, namespace)* needs to be unique**
|
||||
|
||||
(In the resource YAML, the type is called `Kind`)
|
||||
|
||||
---
|
||||
|
||||
## Pre-existing namespaces
|
||||
|
||||
- If we deploy a cluster with `kubeadm`, we have three namespaces:
|
||||
|
||||
- `default` (for our applications)
|
||||
|
||||
- `kube-system` (for the control plane)
|
||||
|
||||
- `kube-public` (contains one secret used for cluster discovery)
|
||||
|
||||
- If we deploy differently, we may have different namespaces
|
||||
|
||||
---
|
||||
|
||||
## Creating namespaces
|
||||
|
||||
- We can create namespaces with a very minimal YAML, e.g.:
|
||||
```bash
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: blue
|
||||
EOF
|
||||
```
|
||||
|
||||
- If we are using a tool like Helm, it will create namespaces automatically
|
||||
|
||||
---
|
||||
|
||||
## Using namespaces
|
||||
|
||||
- We can pass a `-n` or `--namespace` flag to most `kubectl` commands:
|
||||
```bash
|
||||
kubectl -n blue get svc
|
||||
```
|
||||
|
||||
- We can also use *contexts*
|
||||
|
||||
- A context is a *(user, cluster, namespace)* tuple
|
||||
|
||||
- We can manipulate contexts with the `kubectl config` command
|
||||
|
||||
---
|
||||
|
||||
## Creating a context
|
||||
|
||||
- We are going to create a context for the `blue` namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- View existing contexts to see the cluster name and the current user:
|
||||
```bash
|
||||
kubectl config get-contexts
|
||||
```
|
||||
|
||||
- Create a new context:
|
||||
```bash
|
||||
kubectl config set-context blue --namespace=blue \
|
||||
--cluster=kubernetes --user=kubernetes-admin
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We have created a context; but this is just some configuration values.
|
||||
|
||||
The namespace doesn't exist yet.
|
||||
|
||||
---
|
||||
|
||||
## Using a context
|
||||
|
||||
- Let's switch to our new context and deploy the DockerCoins chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Use the `blue` context:
|
||||
```bash
|
||||
kubectl config use-context blue
|
||||
```
|
||||
|
||||
- Deploy DockerCoins:
|
||||
```bash
|
||||
helm install dockercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
In the last command line, `dockercoins` is just the local path where
|
||||
we created our Helm chart before.
|
||||
|
||||
---
|
||||
|
||||
## Viewing the deployed app
|
||||
|
||||
- Let's see if our Helm chart worked correctly!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the port number allocated to the `webui` service:
|
||||
```bash
|
||||
kubectl get svc webui
|
||||
```
|
||||
|
||||
- Point our browser to http://X.X.X.X:3xxxx
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the app to be up and running.
|
||||
|
||||
---
|
||||
|
||||
## Namespaces and isolation
|
||||
|
||||
- Namespaces *do not* provide isolation
|
||||
|
||||
- A pod in the `green` namespace can communicate with a pod in the `blue` namespace
|
||||
|
||||
- A pod in the `default` namespace can communicate with a pod in the `kube-system` namespace
|
||||
|
||||
- `kube-dns` uses a different subdomain for each namespace
|
||||
|
||||
- Example: from any pod in the cluster, you can connect to the Kubernetes API with:
|
||||
|
||||
`https://kubernetes.default.svc.cluster.local:443/`
|
||||
|
||||
---
|
||||
|
||||
## Isolating pods
|
||||
|
||||
- Actual isolation is implemented with *network policies*
|
||||
|
||||
- Network policies are resources (like deployments, services, namespaces...)
|
||||
|
||||
- Network policies specify which flows are allowed:
|
||||
|
||||
- between pods
|
||||
|
||||
- from pods to the outside world
|
||||
|
||||
- and vice-versa
|
||||
|
||||
---
|
||||
|
||||
## Network policies overview
|
||||
|
||||
- We can create as many network policies as we want
|
||||
|
||||
- Each network policy has:
|
||||
|
||||
- a *pod selector*: "which pods are targeted by the policy?"
|
||||
|
||||
- lists of ingress and/or egress rules: "which peers and ports are allowed or blocked?"
|
||||
|
||||
- If a pod is not targeted by any policy, traffic is allowed by default
|
||||
|
||||
- If a pod is targeted by at least one policy, traffic must be allowed explicitly
|
||||
|
||||
---
|
||||
|
||||
## More about network policies
|
||||
|
||||
- This remains a high level overview of network policies
|
||||
|
||||
- For more details, check:
|
||||
|
||||
- the [Kubernetes documentation about network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
|
||||
|
||||
- this [talk about network policies at KubeCon 2017 US](https://www.youtube.com/watch?v=3gGpMmYeEO8) by [@ahmetb](https://twitter.com/ahmetb)
|
||||
|
||||
@@ -94,6 +94,26 @@ That rollout should be pretty quick. What shows in the web UI?
|
||||
|
||||
---
|
||||
|
||||
## Give it some time
|
||||
|
||||
- At first, it looks like nothing is happening (the graph remains at the same level)
|
||||
|
||||
- According to `kubectl get deploy -w`, the `deployment` was updated really quickly
|
||||
|
||||
- But `kubectl get pods -w` tells a different story
|
||||
|
||||
- The old `pods` are still here, and they stay in `Terminating` state for a while
|
||||
|
||||
- Eventually, they are terminated; and then the graph decreases significantly
|
||||
|
||||
- This delay is due to the fact that our worker doesn't handle signals
|
||||
|
||||
- Kubernetes sends a "polite" shutdown request to the worker, which ignores it
|
||||
|
||||
- Eventually, Kubernetes gets impatient and kills the container
|
||||
|
||||
---
|
||||
|
||||
## Rolling out a boo-boo
|
||||
|
||||
- What happens if we make a mistake?
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
|
||||
6. Copy the configuration file generated by `kubeadm init`
|
||||
|
||||
- Check the [prepare VMs README](https://github.com/jpetazzo/container.training/blob/master/prepare-vms/README.md) for more details
|
||||
|
||||
---
|
||||
|
||||
## `kubeadm` drawbacks
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
## Versions installed
|
||||
|
||||
- Kubernetes 1.9.6 (but 1.10 is about to come out!)
|
||||
- Kubernetes 1.10.0
|
||||
- Docker Engine 18.03.0-ce
|
||||
- Docker Compose 1.18.0
|
||||
- Docker Compose 1.20.1
|
||||
|
||||
|
||||
.exercise[
|
||||
@@ -22,7 +22,7 @@ class: extra-details
|
||||
|
||||
## Kubernetes and Docker compatibility
|
||||
|
||||
- Kubernetes only validates Docker Engine versions 1.11.2, 1.12.6, 1.13.1, and 17.03.2
|
||||
- Kubernetes 1.10 only validates Docker Engine versions [1.11.2 to 1.13.1 and 17.03.x](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#external-dependencies)
|
||||
|
||||
--
|
||||
|
||||
|
||||
@@ -74,6 +74,12 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
---
|
||||
|
||||
## Stateful services (demo!)
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## HTTP traffic handling
|
||||
|
||||
- *Services* are layer 4 constructs
|
||||
@@ -93,13 +99,19 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
---
|
||||
|
||||
## Ingress with Træfik (demo!)
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## Logging and metrics
|
||||
|
||||
- Logging is delegated to the container engine
|
||||
|
||||
- Metrics are typically handled with Prometheus
|
||||
- Metrics are typically handled with [Prometheus](https://prometheus.io/)
|
||||
|
||||
(Heapster is a popular add-on)
|
||||
([Heapster](https://github.com/kubernetes/heapster) is a popular add-on)
|
||||
|
||||
---
|
||||
|
||||
@@ -157,7 +169,7 @@ Sorry Star Trek fans, this is not the federation you're looking for!
|
||||
|
||||
- Kubernetes master operation relies on etcd
|
||||
|
||||
- etcd uses the Raft protocol
|
||||
- etcd uses the [Raft](https://raft.github.io/) protocol
|
||||
|
||||
- Raft recommends low latency between nodes
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
## Intros
|
||||
|
||||
- Hello! We are:
|
||||
- Hello! We are:
|
||||
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Travis CI)
|
||||
- .emoji[⛵] Jérémy ([@jeremygarrouste](https://twitter.com/jeremygarrouste), Inpiwee)
|
||||
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Docker Inc.)
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
|
||||
- The workshop will run from 9am to 4pm
|
||||
- The training will run from 9:15 to 17:00
|
||||
|
||||
- There will be a lunch break at noon
|
||||
- There will be a lunch break at 12:30
|
||||
|
||||
(And coffee breaks!)
|
||||
|
||||
@@ -16,4 +16,4 @@
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
- Live feedback, questions, help on @@CHAT@@
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-auto
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
chapters:
|
||||
- common/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- common/about-slides.md
|
||||
- common/toc.md
|
||||
- - common/prereqs.md
|
||||
- swarm/versions.md
|
||||
- common/sampleapp.md
|
||||
- common/composescale.md
|
||||
- common/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- common/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/compose2swarm.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- common/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,57 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-manual
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
chapters:
|
||||
- common/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- common/about-slides.md
|
||||
- common/toc.md
|
||||
- - common/prereqs.md
|
||||
- swarm/versions.md
|
||||
- common/sampleapp.md
|
||||
- common/composescale.md
|
||||
- common/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- common/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
#- swarm/hostingregistry.md
|
||||
#- swarm/testingregistry.md
|
||||
#- swarm/btp-manual.md
|
||||
#- swarm/swarmready.md
|
||||
- swarm/compose2swarm.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/ipsec.md
|
||||
#- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
#- swarm/secrets.md
|
||||
#- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
#- swarm/stateful.md
|
||||
#- swarm/extratips.md
|
||||
- common/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,66 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
chapters:
|
||||
- common/title.md
|
||||
#- common/logistics.md
|
||||
- swarm/intro.md
|
||||
- common/about-slides.md
|
||||
- common/toc.md
|
||||
- - common/prereqs.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- common/sampleapp.md
|
||||
- common/composescale.md
|
||||
- common/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- common/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/compose2swarm.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- common/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,66 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
chapters:
|
||||
- common/title.md
|
||||
#- common/logistics.md
|
||||
- swarm/intro.md
|
||||
- common/about-slides.md
|
||||
- common/toc.md
|
||||
- - common/prereqs.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- common/sampleapp.md
|
||||
- common/composescale.md
|
||||
- common/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- common/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/compose2swarm.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
#- swarm/logging.md
|
||||
#- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- common/thankyou.md
|
||||
- swarm/links.md
|
||||
236
slides/swarm/namespaces.md
Normal file
@@ -0,0 +1,236 @@
|
||||
class: namespaces
|
||||
name: namespaces
|
||||
|
||||
# Improving isolation with User Namespaces
|
||||
|
||||
- *Namespaces* are kernel mechanisms to compartmentalize the system
|
||||
|
||||
- There are different kind of namespaces: `pid`, `net`, `mnt`, `ipc`, `uts`, and `user`
|
||||
|
||||
- For a primer, see "Anatomy of a Container"
|
||||
([video](https://www.youtube.com/watch?v=sK5i-N34im8))
|
||||
([slides](https://www.slideshare.net/jpetazzo/cgroups-namespaces-and-beyond-what-are-containers-made-from-dockercon-europe-2015))
|
||||
|
||||
- The *user namespace* allows to map UIDs between the containers and the host
|
||||
|
||||
- As a result, `root` in a container can map to a non-privileged user on the host
|
||||
|
||||
Note: even without user namespaces, `root` in a container cannot go wild on the host.
|
||||
<br/>
|
||||
It is mediated by capabilities, cgroups, namespaces, seccomp, LSMs...
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## User Namespaces in Docker
|
||||
|
||||
- Optional feature added in Docker Engine 1.10
|
||||
|
||||
- Not enabled by default
|
||||
|
||||
- Has to be enabled at Engine startup, and affects all containers
|
||||
|
||||
- When enabled, `UID:GID` in containers are mapped to a different range on the host
|
||||
|
||||
- Safer than switching to a non-root user (with `-u` or `USER`) in the container
|
||||
<br/>
|
||||
(Since with user namespaces, root escalation maps to a non-privileged user)
|
||||
|
||||
- Can be selectively disabled per container by starting them with `--userns=host`
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## User Namespaces Caveats
|
||||
|
||||
When user namespaces are enabled, containers cannot:
|
||||
|
||||
- Use the host's network namespace (with `docker run --network=host`)
|
||||
|
||||
- Use the host's PID namespace (with `docker run --pid=host`)
|
||||
|
||||
- Run in privileged mode (with `docker run --privileged`)
|
||||
|
||||
... Unless user namespaces are disabled for the container, with flag `--userns=host`
|
||||
|
||||
External volume and graph drivers that don't support user mapping might not work.
|
||||
|
||||
All containers are currently mapped to the same UID:GID range.
|
||||
|
||||
Some of these limitations might be lifted in the future!
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Filesystem ownership details
|
||||
|
||||
When enabling user namespaces:
|
||||
|
||||
- the UID:GID on disk (in the images and containers) has to match the *mapped* UID:GID
|
||||
|
||||
- existing images and containers cannot work (their UID:GID would have to be changed)
|
||||
|
||||
For practical reasons, when enabling user namespaces, the Docker Engine places containers and images (and everything else) in a different directory.
|
||||
|
||||
As a resut, if you enable user namespaces on an existing installation:
|
||||
|
||||
- all containers and images (and e.g. Swarm data) disappear
|
||||
|
||||
- *if a node is a member of a Swarm, it is then kicked out of the Swarm*
|
||||
|
||||
- everything will re-appear if you disable user namespaces again
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Picking a node
|
||||
|
||||
- We will select a node where we will enable user namespaces
|
||||
|
||||
- This node will have to be re-added to the Swarm
|
||||
|
||||
- All containers and services running on this node will be rescheduled
|
||||
|
||||
- Let's make sure that we do not pick the node running the registry!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check on which node the registry is running:
|
||||
```bash
|
||||
docker service ps registry
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Pick any other node (noted `nodeX` in the next slides).
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Logging into the right Engine
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the right node:
|
||||
```bash
|
||||
ssh node`X`
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Configuring the Engine
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a configuration file for the Engine:
|
||||
```bash
|
||||
echo '{"userns-remap": "default"}' | sudo tee /etc/docker/daemon.json
|
||||
```
|
||||
|
||||
- Restart the Engine:
|
||||
```bash
|
||||
kill $(pidof dockerd)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Checking that User Namespaces are enabled
|
||||
|
||||
.exercise[
|
||||
- Notice the new Docker path:
|
||||
```bash
|
||||
docker info | grep var/lib
|
||||
```
|
||||
|
||||
- Notice the new UID:GID permissions:
|
||||
```bash
|
||||
sudo ls -l /var/lib/docker
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
You should see a line like the following:
|
||||
```
|
||||
drwx------ 11 296608 296608 4096 Aug 3 05:11 296608.296608
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Add the node back to the Swarm
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get our manager token from another node:
|
||||
```bash
|
||||
ssh node`Y` docker swarm join-token manager
|
||||
```
|
||||
|
||||
- Copy-paste the join command to the node
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Check the new UID:GID
|
||||
|
||||
.exercise[
|
||||
|
||||
- Run a background container on the node:
|
||||
```bash
|
||||
docker run -d --name lockdown alpine sleep 1000000
|
||||
```
|
||||
|
||||
- Look at the processes in this container:
|
||||
```bash
|
||||
docker top lockdown
|
||||
ps faux
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: namespaces
|
||||
|
||||
## Comparing on-disk ownership with/without User Namespaces
|
||||
|
||||
.exercise[
|
||||
|
||||
- Compare the output of the two following commands:
|
||||
```bash
|
||||
docker run alpine ls -l /
|
||||
docker run --userns=host alpine ls -l /
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
class: namespaces
|
||||
|
||||
In the first case, it looks like things belong to `root:root`.
|
||||
|
||||
In the second case, we will see the "real" (on-disk) ownership.
|
||||
|
||||
--
|
||||
|
||||
class: namespaces
|
||||
|
||||
Remember to get back to `node1` when finished!
|
||||
85
slides/theme.css
Normal file
@@ -0,0 +1,85 @@
|
||||
@import url('https://fonts.googleapis.com/css?family=PT+Sans');
|
||||
|
||||
body {
|
||||
font-family: 'PT Sans', sans-serif;
|
||||
max-width: 900px;
|
||||
margin: 0 auto 0 auto;
|
||||
font-size: 13pt;
|
||||
background: lightgrey;
|
||||
}
|
||||
|
||||
body > div {
|
||||
background: white;
|
||||
padding: 0 5em 0 5em;
|
||||
}
|
||||
|
||||
ul, p, h1, h2, h3, h4, h5, h6 {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
h1, h2, h3 {
|
||||
padding-top: 1em;
|
||||
padding-bottom: 0.5em;
|
||||
}
|
||||
|
||||
ul, p {
|
||||
padding-bottom: 1em;
|
||||
}
|
||||
|
||||
img {
|
||||
width: 200px;
|
||||
float: left;
|
||||
margin-right: 1em;
|
||||
margin-bottom: 0.5em;
|
||||
margin-top: 3em;
|
||||
}
|
||||
|
||||
h2:nth-of-type(n+5) {
|
||||
color: #0069A8;
|
||||
}
|
||||
|
||||
h2:nth-of-type(-n+4) {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
h2:nth-of-type(1) {
|
||||
font-size: 3em;
|
||||
}
|
||||
|
||||
h2:nth-of-type(2) {
|
||||
font-size: 2em;
|
||||
}
|
||||
|
||||
h2:nth-of-type(3) {
|
||||
font-size: 1.5em;
|
||||
}
|
||||
|
||||
h2:nth-of-type(4) {
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
/* index */
|
||||
.index h4 {
|
||||
font-size: 2.0em;
|
||||
}
|
||||
.index h5 {
|
||||
font-size: 1.3em;
|
||||
}
|
||||
.index h6 {
|
||||
font-size: 1.0em;
|
||||
}
|
||||
.index h4, .index h5, .index h6, .index p {
|
||||
padding: 5pt;
|
||||
}
|
||||
div.index {
|
||||
}
|
||||
div.block {
|
||||
background: #e1f8ff;
|
||||
padding: 1em;
|
||||
margin: 2em;
|
||||
}
|
||||
.index {
|
||||
font-size: 1.5em;
|
||||
padding: 0.5em;
|
||||
}
|
||||
|
||||