diff --git a/autotest/autotest.py b/autotest/autotest.py
index 7d3c5dae..8299836e 100755
--- a/autotest/autotest.py
+++ b/autotest/autotest.py
@@ -143,7 +143,11 @@ for current_action, next_action in zip(actions, actions[1:]+[("bash", "true")]):
popen_options = dict(shell=True, cwd=cwd, stdin=subprocess.PIPE, preexec_fn=os.setpgrp)
# The follow hack allows to capture the environment variables set by `docker-machine env`
# FIXME: this doesn't handle `unset` for now
- if "eval $(docker-machine env" in current_action[1]:
+ if any([
+ "eval $(docker-machine env" in current_action[1],
+ "DOCKER_HOST" in current_action[1],
+ "COMPOSE_FILE" in current_action[1],
+ ]):
popen_options["stdout"] = subprocess.PIPE
current_action[1] += "\nenv"
proc = subprocess.Popen(current_action[1], **popen_options)
diff --git a/dockercoins/docker-compose.yml-logging b/dockercoins/docker-compose.yml-logging
index 5b6b37c6..c1503de9 100644
--- a/dockercoins/docker-compose.yml-logging
+++ b/dockercoins/docker-compose.yml-logging
@@ -4,7 +4,7 @@ rng:
- "8001:80"
log_driver: gelf
log_opt:
- gelf-address: "udp://AA.BB.CC.DD:EEEEE"
+ gelf-address: "udp://127.0.0.1:EEEEE"
hasher:
build: hasher
@@ -12,7 +12,7 @@ hasher:
- "8002:80"
log_driver: gelf
log_opt:
- gelf-address: "udp://AA.BB.CC.DD:EEEEE"
+ gelf-address: "udp://127.0.0.1:EEEEE"
webui:
build: webui
@@ -24,13 +24,13 @@ webui:
- "./webui/files/:/files/"
log_driver: gelf
log_opt:
- gelf-address: "udp://AA.BB.CC.DD:EEEEE"
+ gelf-address: "udp://127.0.0.1:EEEEE"
redis:
image: redis
log_driver: gelf
log_opt:
- gelf-address: "udp://AA.BB.CC.DD:EEEEE"
+ gelf-address: "udp://127.0.0.1:EEEEE"
worker:
build: worker
@@ -40,5 +40,5 @@ worker:
- redis
log_driver: gelf
log_opt:
- gelf-address: "udp://AA.BB.CC.DD:EEEEE"
+ gelf-address: "udp://127.0.0.1:EEEEE"
diff --git a/dockercoins/webui/files/index.html b/dockercoins/webui/files/index.html
index 6186d998..5ef4f4e6 100644
--- a/dockercoins/webui/files/index.html
+++ b/dockercoins/webui/files/index.html
@@ -50,7 +50,7 @@ function refresh () {
points.push({ x: s2.now, y: speed });
}
$("#speed").text("~" + speed.toFixed(1) + " hashes/second");
- var msg = ("I'm attending the @docker workshop at @praqma, "
+ var msg = ("I'm attending the @docker workshop at @neofonie, "
+ "and my #DockerCoins mining rig is crunching "
+ speed.toFixed(1) + " hashes/second! W00T!");
$("#tweet").attr(
diff --git a/www/htdocs/index.html b/www/htdocs/index.html
index 19af6429..3ac4c1b9 100644
--- a/www/htdocs/index.html
+++ b/www/htdocs/index.html
@@ -118,7 +118,7 @@ at e.g. 9am, and start at 9:30.
- Agenda:
.small[
-- 09:00-09:15 hello!
+- 09:00-09:15 hello and breakfast
- 09:15-10:45 part 1
- 10:45-11:00 coffee break
- 11:00-12:30 part 2
@@ -141,7 +141,7 @@ Remember to change:
-->
- Experimental chat support on
- [Gitter](https://gitter.im/jpetazzo/workshop-20160405-stockholm)
+ [Gitter](http://container.training/chat)
---
@@ -240,15 +240,14 @@ grep '^# ' index.html | grep -v '
(see below)
+- All hands-on sections are clearly identified, like the gray rectangle below
.exercise[
- This is the stuff you're supposed to do!
- Go to [container.training](http://container.training/) to view these slides
- Join the chat room on
- [Gitter](https://gitter.im/jpetazzo/workshop-20160405-stockholm)
+ [Gitter](http://container.training/chat)
]
@@ -319,12 +318,11 @@ Once in a while, the instructions will say:
There are multiple ways to do this:
-- create a new window or tab on your machine,
-
and SSH into the VM;
+- create a new window or tab on your machine, and SSH into the VM;
-- use tmux on the VM and open a new window in tmux.
+- use screen or tmux on the VM and open a new window from there.
-If you want to use screen or whatever, you're welcome!
+You are welcome to use the method that you feel the most comfortable with.
---
@@ -345,10 +343,10 @@ If you want to use screen or whatever, you're welcome!
## Brand new versions!
-- Engine 1.11-rc3
-- Compose 1.7-rc1
-- Swarm 1.2-rc1
-- Machine 0.7-rc1
+- Engine 1.12
+- Compose 1.7
+- Swarm 1.2
+- Machine 0.6
.exercise[
@@ -364,30 +362,39 @@ If you want to use screen or whatever, you're welcome!
---
+## Why are we not using the latest version of Machine?
+
+- The latest version of Machine is 0.7
+
+- The way it deploys Swarm is different from 0.6
+
+- This causes a regression in the strategy that we will use later
+
+- More details later!
+
+---
+
# Our sample application
-- Let's look at the general layout of the
- [source code](https://github.com/jpetazzo/orchestration-workshop)
+- Visit the GitHub repository with all the materials of this workshop:
+
https://github.com/jpetazzo/orchestration-workshop
+
+- The application is in the [dockercoins](
+ https://github.com/jpetazzo/orchestration-workshop/tree/master/dockercoins)
+ subdirectory
+
+- Let's look at the general layout of the source code:
+
+ there is a Compose file [docker-compose.yml](
+ https://github.com/jpetazzo/orchestration-workshop/blob/master/dockercoins/docker-compose.yml) ...
+
+ ... and 4 other services, each in its own directory:
-- Each directory = 1 microservice
- `rng` = web service generating random bytes
- `hasher` = web service computing hash of POSTed data
- `worker` = background process using `rng` and `hasher`
- `webui` = web interface to watch progress
-.exercise[
-
-
-
-- Clone the repository on `node1`:
-
`git clone git://github.com/jpetazzo/orchestration-workshop`
-
-]
-
-(Bonus points for forking on GitHub and cloning your fork!)
-
---
## What's this application?
@@ -419,6 +426,31 @@ class: pic
---
+## Getting the application source code
+
+- We will clone the GitHub repository
+
+- The repository also contains scripts and tools that we will use through the workshop
+
+.exercise[
+
+
+
+- Clone the repository on `node1`:
+ ```bash
+ git clone git://github.com/jpetazzo/orchestration-workshop
+ ```
+
+]
+
+(You can also fork the repository on GitHub and clone your fork if you prefer that.)
+
+---
+
# Running the application
Without further ado, let's start our application.
@@ -508,7 +540,7 @@ and displays aggregated logs.
- the worker doesn't update the counter after every loop, but up to once per second
- - the speed is computed by the browser, checking the counter up to once per second
+ - the speed is computed by the browser, checking the counter about once per second
- between two consecutive updates, the counter will increase either by 4, or by 0
@@ -598,10 +630,8 @@ and open a new terminal.
- Compose complains that it cannot find `docker-compose.yml`
- You need to go to the directory containing `docker-compose.yml`
-
- - or use the `-f` option to give the path to the Compose file
-
- - or set the `COMPOSE_FILE` environment variable
+
...or use the `-f` option to give the path to the Compose file
+
...or set the `COMPOSE_FILE` environment variable
.exercise[
@@ -654,12 +684,13 @@ and open a new terminal.
## Declaring port mapping
- Directly with the Docker Engine:
-
`docker run -d -p 8000:80 nginx`
-
`docker run -d -p 80 nginx`
-
`docker run -d -P nginx`
+ ```bash
+ docker run -d -p 8000:80 nginx
+ docker run -d -p 80 nginx
+ docker run -d -P nginx
+ ```
- With Docker Compose, in the `docker-compose.yml` file:
-
```yaml
rng:
…
@@ -1044,11 +1075,15 @@ WHY?!?
docker-compose scale rng=3
```
-- Compose doesn't deal with load balancing
+- Since Engine 1.11, we get round-robin DNS records
-- We would get 3 instances ...
+ (i.e. resolving `rng` will yield the IP addresses of all 3 containers)
-- ... But only the first one would serve traffic
+- But many (most) resolvers will sort records
+
+- So we would get 3 instances ...
+
+- ... But only one would serve traffic
---
@@ -1212,8 +1247,7 @@ faster for a well-behaved application.*
- The good
- We scaled a service, added a load balancer -
-
without changing a single line of code.
+ We scaled a service, added a load balancer - without changing a single line of code.
--
@@ -1624,16 +1658,10 @@ Shortcut: `docker-compose.yml-ambassador`
.exercise[
- Just tell Compose to do its thing:
-
- ```bash
- docker-compose up -d
- ```
+
`docker-compose up -d`
- Check that the stack is up and running:
-
- ```bash
- docker-compose ps
- ```
+
`docker-compose ps`
- Look at the web UI to make sure that it works fine
@@ -1641,30 +1669,56 @@ Shortcut: `docker-compose.yml-ambassador`
---
-## Start the stack on another machine
+## Controlling other Docker Engines
-- We will set the `DOCKER_HOST` variable
+- Many tools in the ecosystem will honor the `DOCKER_HOST` environment variable
-- `docker-compose` will detect and use it
+- Those tools include (obviously!) the Docker CLI and Docker Compose
-- Our Docker hosts are listening on port 55555
+- Our training VMs have been setup to accept API requests on port 55555
+
(without authentication - this is very insecure, by the way!)
.exercise[
-- Set the environment variable:
-
`export DOCKER_HOST=tcp://node2:55555`
+- Set the `DOCKER_HOST` variable to control `node2`, and execute a `docker` command:
+ ```bash
+ export DOCKER_HOST=tcp://node2:55555
+ docker ps
+ ```
+
+]
+
+You shouldn't see any container running at this point.
+
+---
+
+## Start the stack on another machine
+
+- We will tell Compose to bring up our stack on the other node
+
+- It will use the local code (we don't need to checkout the code on `node2`)
+
+.exercise[
- Start the stack:
-
`docker-compose up -d`
+ ```bash
+ docker-compose up -d
+ ```
- Check that it's running:
-
`docker-compose ps`
+ ```bash
+ docker-compose ps
+ ```
]
---
-## Scale!
+## Run the application on every node
+
+- We will repeat the previous step with a little shell loop
+
+ ... but introduce parallelism to save some time
.exercise[
@@ -1674,8 +1728,24 @@ Shortcut: `docker-compose.yml-ambassador`
for N in 3 4 5; do
DOCKER_HOST=tcp://node$N:55555 docker-compose up -d &
done
+ wait
```
+]
+
+Note: building the stack everywhere is not optimal. We will see later
+how to build once, and deploy the same build everywhere.
+
+---
+
+## Scale!
+
+- The app is built (and running!) everywhere
+
+- Scaling can be done very quickly
+
+.exercise[
+
- Add a bunch of workers all over the place:
```bash
@@ -1733,14 +1803,13 @@ Let's celebrate our success!
- sharing files between host and containers (source...)
-- The `volumes` directive expands to an host path
-
(e.g. `/home/docker/orchestration-workshop/dockercoins/webui/files`)
+- The `volumes` directive expands to an host path:
+
+ `/home/docker/orchestration-workshop/dockercoins/webui/files`
-- This host path exists on the local machine
-
(not on the others)
+- This host path exists on the local machine (not on the others)
-- This specific volume is used in development
-
(not in production)
+- This specific volume is used in development (not in production)
---
@@ -1748,12 +1817,11 @@ Let's celebrate our success!
- Let's use `docker-compose down`
-- It will stop and remove the DockerCoins app
-
(but leave other containers running)
+- It will stop and remove the DockerCoins app (but leave other containers running)
.exercise[
-- We can do another simple shell loop:
+- We can do another simple parallel shell loop:
```bash
for N in $(seq 1 5); do
export DOCKER_HOST=tcp://node$N:55555
@@ -1873,8 +1941,7 @@ ways to deploy ambassadors.
- Reacts to container start/stop events
-- Adds/removes scaled services in distributed config DB
-
(zookeeper, etcd, consul…)
+- Adds/removes scaled services in distributed config DB (zookeeper, etcd, consul…)
- Another daemon listens to config DB events,
adds/removes backends to load balancers configuration
@@ -1901,8 +1968,7 @@ ways to deploy ambassadors.
- Allow a flat network for your containers
-- Often requires an extra service to deal with BUM packets
-
(broadcast/unknown/multicast)
+- Often requires an extra service to deal with BUM packets (broadcast/unknown/multicast)
- e.g. a key/value store (Consul, Etcd, Zookeeper ...)
@@ -1932,8 +1998,7 @@ class: title
- possibly containing our backup tools
-- This works because the `redis` container image
-
stores its data on a volume
+- This works because the `redis` container image stores its data on a volume
---
@@ -1962,8 +2027,7 @@ class: title
-ti alpine sh
```
-- Look in `/data` in the container
-
(That's where Redis puts its data dumps)
+- Look in `/data` in the container (that's where Redis puts its data dumps)
]
---
@@ -1975,7 +2039,9 @@ class: title
.exercise[
- Connect to Redis:
-
`telnet redis 6379`
+ ```bash
+ telnet redis 6379
+ ```
- Issue commands `SAVE` then `QUIT`
@@ -2104,6 +2170,12 @@ to the default `bridge` network.
docker run -d --name ledata --label backup=please redis
```
+
+
]
---
@@ -2154,9 +2226,13 @@ to the default `bridge` network.
docker kill leweb ledata
```
-]
+
-Only the events for `ledata` will be shown.
+]
---
@@ -2174,9 +2250,8 @@ Only the events for `ledata` will be shown.
- List containers that have a `backup` label;
show their container ID, image, and the label:
```bash
- docker ps \
- --filter label=backup \
- --format '{{ .ID }} {{ .Image }} {{ .Label "backup" }}'
+ docker ps --filter label=backup \
+ --format '{{ .ID }} {{ .Image }} {{ .Label "backup" }}'
```
]
@@ -2205,8 +2280,7 @@ Only the events for `ledata` will be shown.
- To check logs, run e.g.
```bash
- docker run --volumes-from ... ubuntu sh -c \
- "grep WARN /logs/*.log"
+ docker run --volumes-from ... ubuntu sh -c "grep WARN /logs/*.log"
```
- Or just go interactive:
@@ -2228,12 +2302,10 @@ Only the events for `ledata` will be shown.
- Logging driver can specified globally, and per container
(changing it for a container overrides the global setting)
-- To change the global logging driver,
-
pass extra flags to the daemon
+- To change the global logging driver, pass extra flags to the daemon
(requires a daemon restart)
-- To override the logging driver for a container,
-
pass extra flags to `docker run`
+- To override the logging driver for a container, pass extra flags to `docker run`
---
@@ -2250,12 +2322,21 @@ Only the events for `ledata` will be shown.
- The flags are identical for `docker daemon` and `docker run`
-Tip #1: when provisioning with Docker Machine, use:
-```bash
-docker-machine create ... --engine-opt log-driver=...
-```
+---
-Tip #2: you can set logging options in Compose files.
+## Logging flags in practice
+
+- If you provision your nodes with Docker Machine,
+ you can set global logging flags (which will apply to all
+ containers started by a given Engine) like this:
+
+ ```bash
+ docker-machine create ... --engine-opt log-driver=...
+ ```
+
+- Otherwise, use your favorite method to edit or manage configuration files
+
+- You can set per-container logging options in Compose files
---
@@ -2416,8 +2497,7 @@ in the output.
- click "Auto-refresh" (top-right corner)
- click "5 seconds" (top-left of the list)
-- You should see a series of green bars
-
(with one new green bar every minute)
+- You should see a series of green bars (with one new green bar every minute)
---
@@ -2438,8 +2518,7 @@ in the output.
docker-compose ps logstash
```
-- Start a one-off container, overriding its logging driver:
-
(make sure to update the port number, of course)
+- Note the port number, and start our one-off container:
]
-Shortcut: `docker-compose.yml-logging`
-
(But you still have to update `AA.BB.CC.DD:EEEEE`!)
+Shortcut: `docker-compose.yml-logging` (remember to update the port number!)
---
@@ -2593,8 +2671,9 @@ of this workshop.
- Start all containers
-- This is like upgrading your Linux kernel,
-
but it will get better
+- This is like upgrading your Linux kernel, but it will get better
+
+(Docker Engine 1.11 is using containerd, which will ultimately allow seamless upgrades.)
---
@@ -2619,11 +2698,9 @@ of this workshop.
- When a vulnerability is announced:
- - if it affects your base images,
-
make sure they are fixed first
+ - if it affects your base images: make sure they are fixed first
- - if it affects downloaded packages,
-
make sure they are fixed first
+ - if it affects downloaded packages: make sure they are fixed first
- re-pull base images
@@ -2677,8 +2754,7 @@ volumes to the new containers, so data is preserved.
- We still have `myredis` running
-- We will use *shared network namespaces*
-
to perform network analysis
+- We will use *shared network namespaces* to perform network analysis
- Two containers sharing the same network namespace...
@@ -2703,7 +2779,7 @@ Ngrep uses libpcap (like tcpdump) to sniff network traffic.
-->
- Start a container with the same network namespace:
-
`docker run --net container:myredis -ti alpine sh`
+
`docker run --net container:dockercoins_redis_1 -ti alpine sh`
- Install ngrep:
`apk update && apk add ngrep`
@@ -2753,8 +2829,7 @@ class: title
- First presented in 2009
-- Initial goal: resource scheduler
-
(two-level/pessimistic)
+- Initial goal: resource scheduler (two-level/pessimistic)
- top-level "master" knows the global cluster state
@@ -2762,8 +2837,7 @@ class: title
- master allocates resources to "frameworks"
-- Container support added recently
-
(had to fit existing model)
+- Container support added recently (had to fit existing model)
- Network and service discovery is complex
@@ -2771,7 +2845,8 @@ class: title
## Mesos (in practice)
-- Easy to setup a test cluster (in containers!)
+- Super easy to setup a test cluster
+
(e.g. [minimesos](https://minimesos.org/) put all Mesos components in container)
- Great to accommodate mixed workloads
(see Marathon, Chronos, Aurora, and many more)
@@ -2788,8 +2863,7 @@ class: title
- Started in June 2014
-- Designed specifically as a platform for containers
-
("greenfield" design)
+- Designed specifically as a platform for containers ("greenfield" design)
- "pods" = groups of containers sharing network/storage
@@ -2797,8 +2871,7 @@ class: title
- extensive use of "tags" instead of e.g. tree hierarchy
-- Initially designed around Docker,
-
but doesn't hesitate to diverge in a few places
+- Initially designed around Docker, but doesn't hesitate to diverge in a few places
---
@@ -2813,11 +2886,11 @@ class: title
- Tends to be loved by ops more than devs
.small[(but keep in mind that it's evolving quite as fast as Docker)]
-- Adaptation is needed when it differs from Docker
-
.small[(need to learn new API, new tooling, new concepts)]
+- Setting up a local development cluster is non-trivial
+
.small[(the easiest way is to stage it up on e.g. GKE)]
- Bottom line: Kubernetes is not Docker!
-
.small[(different APIs, concepts, configuration files...)]
+
.small[(different concepts, APIs, tooling, concepts, configuration files...)]
---
@@ -2843,16 +2916,14 @@ class: title
## ECS (in practice)
-- Task definitions look like Compose files,
-
but are significantly different
+- Task definitions look like Compose files, but are significantly different
- Integration with e.g. ELB is suboptimal
(ELB requires all backends to run on the same port)
- Cluster deployment is made easier thanks to ECS CLI
-- Docker API gets partially exposed through ECS API,
-
with some features lagging behind
+- Docker API gets partially exposed through ECS API, with some features lagging behind
- Service discovery is painful
@@ -2888,16 +2959,20 @@ class: title
- Consolidates multiple Docker hosts into a single one
-- "Looks like" a Docker daemon, but it dispatches (schedules)
- your containers on multiple daemons
+- You talk to Swarm using the Docker API
+
+ → you can use all existing tools: Docker CLI, Docker Compose, etc.
-- Talks the Docker API front and back
-
(leverages the Docker API and ecosystem)
+- Swarm talks to your Docker Engines using the Docker API too
+
+ → you can use existing Engines without modification
+
+- Dispatches (schedules) your containers across the cluster, transparently
- Open source and written in Go (like Docker)
- Started by two of the original Docker authors
-
([@aluzzardi](https://twitter.com/aluzzardi) and [@vieux](https://twitter.com/vieux))
+ ([@aluzzardi](https://twitter.com/aluzzardi) and [@vieux](https://twitter.com/vieux))
---
@@ -2905,14 +2980,14 @@ class: title
- Stable since November 2015
+- Initially for small setups, easy to setup
+
- Tested with 1000 nodes + 50000 containers
.small[(without particular tuning; see DockerCon EU opening keynotes!)]
-- Perfect for some scenarios (Jenkins, grid...)
+- Still easy to setup, but scales perfectly to larger deployments too
-- Requires extra effort for Compose build, links...
-
-- Requires a key/value store to achieve high availability
+- Requires a key/value store for advanced features
- We'll see it in action!
@@ -3022,8 +3097,7 @@ class: pic
- The `generic` driver skips the first step
-- It can install Docker on any machine,
-
as long as you have SSH access
+- It can install Docker on any machine, as long as you have SSH access
- We will use that!
@@ -3096,8 +3170,7 @@ in the discovery service hosted by Docker Inc.
- The node continues to work even if the agent dies
-- Automatically started by Docker Machine
-
(when the `--swarm` option is passed)
+- Automatically started by Docker Machine (when the `--swarm` option is passed)
---
@@ -3141,8 +3214,7 @@ in the discovery service hosted by Docker Inc.
- you can still control individual nodes
- - you can start a new manager
-
(at this point, it is stateless)
+ - you can start a new manager (at this point, it is stateless)
- We'll setup active/passive redundancy later
@@ -3298,6 +3370,7 @@ Name: node1
-->
```bash
+ TOKEN=$(cat token)
docker-machine create --driver generic \
--swarm --swarm-discovery token://$TOKEN \
--generic-ssh-user docker --generic-ip-address `AA.BB.CC.DD` node2
@@ -3319,15 +3392,24 @@ Repeat for all 4 nodes. (Pro tip: look for name/address mapping in `/etc/hosts`!
- Deploy nodes 3, 4, and 5:
```bash
- grep node[345] /etc/hosts | grep -v ^127 |
- while read IPADDR NODENAME
- do docker-machine create --driver generic \
- --swarm --swarm-discovery token://$TOKEN \
- --generic-ssh-user docker \
- --generic-ip-address $IPADDR $NODENAME
- done
+ TOKEN=$(cat token)
+ grep node[345] /etc/hosts | grep -v ^127 |
+ while read IPADDR NODENAME
+ do docker-machine create --driver generic \
+ --swarm --swarm-discovery token://$TOKEN \
+ --generic-ssh-user docker \
+ --generic-ip-address $IPADDR $NODENAME
+ done
```
+]
+
+---
+
+## Checking the state of our cluster
+
+.exercise[
+