Compare commits

..

20 Commits

Author SHA1 Message Date
Jerome Petazzoni
687b61dbf4 fix-redirects.sh: adding forced redirect 2020-04-07 16:57:06 -05:00
Jerome Petazzoni
22f32ee4c0 Merge branch 'master' into qconsf2018 2018-11-09 02:25:18 -06:00
Jerome Petazzoni
ee3c2c3030 Merge branch 'ignore-preflight-errors' into qconsf2018 2018-11-09 02:23:53 -06:00
Jerome Petazzoni
45f9d7bf59 bump versions 2018-11-09 02:23:38 -06:00
Jerome Petazzoni
efb72c2938 Bump all the versions
Bump:
- stern
- Ubuntu

Also, each place where there is a 'bumpable' version, I added
a ##VERSION## marker, easily greppable.
2018-11-08 20:42:02 -06:00
Jerome Petazzoni
357d341d82 Ignore 'wrong Docker version' warning
For some reason, kubeadm doesn't want to deploy with Docker Engine 18.09.
Before, it would just issue a warning; but now apparently the warning blocks
the deployment. So... let's ignore the warning. (I've tested the content
and it works fine with Engine 18.09 as far as I can tell.)
2018-11-08 20:32:52 -06:00
Jerome Petazzoni
d4c338c62c Update prom slides for QCON preload 2018-11-07 23:08:51 -06:00
Jerome Petazzoni
d35d186249 Merge branch 'master' into qconsf2018 2018-11-01 19:48:17 -05:00
Jerome Petazzoni
6c8172d7b1 Merge branch 'work-around-kubectl-logs-bug' into qconsf2018 2018-11-01 19:45:45 -05:00
Jerome Petazzoni
d3fac47823 kubectl logs -l ... --tail ... is buggy.
(It always returns 10 lines of output instead
of the requested number.)

This works around the problem, by adding extra
explanations of the issue and providing a shell
function as a workaround.

See kubernetes/kubernetes#70554 for details.
2018-11-01 19:45:13 -05:00
Jerome Petazzoni
4f71074a06 Work around bug in kubectl logs
kubectl logs -l ... --tail ... is buggy.
(It always returns 10 lines of output instead
of the requested number.)

This works around the problem, by adding extra
explanations of the issue and providing a shell
function as a workaround.

See kubernetes/kubernetes#70554 for details.
2018-11-01 19:41:29 -05:00
Jerome Petazzoni
37470fc5ed Merge branch 'use-dockercoins-from-docker-hub' into qconsf2018 2018-11-01 19:08:57 -05:00
Jerome Petazzoni
98510f9f1c Setup qconsf2018 2018-11-01 16:10:03 -05:00
Jerome Petazzoni
6be0751147 Merge branch 'preinstall-helm-and-prometheus' into qconsf2018 2018-11-01 15:59:43 -05:00
Jerome Petazzoni
a40b291d54 Merge branch 'kubectl-create-deployment' into qconsf2018 2018-11-01 15:59:21 -05:00
Jerome Petazzoni
f24687e79f Merge branch 'jpetazzo-last-slide' into qconsf2018 2018-11-01 15:59:12 -05:00
Jerome Petazzoni
9f5f16dc09 Merge branch 'halfday-fullday-twodays' into qconsf2018 2018-11-01 15:59:03 -05:00
Jerome Petazzoni
9a5989d1f2 Merge branch 'enixlogo' into qconsf2018 2018-11-01 15:58:55 -05:00
Jerome Petazzoni
f01bc2a7a9 Fix overlapsing slide number and pics 2018-09-29 18:54:00 -05:00
Jerome Petazzoni
3eaa844c55 Add ENIX logo
Warning: do not merge this branch to your content, otherwise you
will get the ENIX logo in the top right of all your decks
2018-09-08 07:49:38 -05:00
85 changed files with 748 additions and 1529 deletions

1
.gitignore vendored
View File

@@ -7,7 +7,6 @@ slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html
slides/past.html
slides/slides.zip
node_modules
### macOS ###

View File

@@ -199,7 +199,7 @@ this section is for you!
locked-down computer, host firewall, etc.
- Horrible wifi, or ssh port TCP/22 not open on network! If wifi sucks you
can try using MOSH https://mosh.org which handles SSH over UDP. TMUX can also
prevent you from losing your place if you get disconnected from servers.
prevent you from loosing your place if you get disconnected from servers.
https://tmux.github.io
- Forget to print "cards" and cut them up for handing out IP's.
- Forget to have fun and focus on your students!

View File

@@ -5,3 +5,6 @@ RUN gem install thin
ADD hasher.rb /
CMD ["ruby", "hasher.rb"]
EXPOSE 80
HEALTHCHECK \
--interval=1s --timeout=2s --retries=3 --start-period=1s \
CMD curl http://localhost/ || exit 1

View File

@@ -2,14 +2,14 @@ version: "2"
services:
elasticsearch:
image: elasticsearch:2
image: elasticsearch
# If you need to access ES directly, just uncomment those lines.
#ports:
# - "9200:9200"
# - "9300:9300"
logstash:
image: logstash:2
image: logstash
command: |
-e '
input {
@@ -47,7 +47,7 @@ services:
- "12201:12201/udp"
kibana:
image: kibana:4
image: kibana
ports:
- "5601:5601"
environment:

View File

@@ -1,37 +1,3 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: consul
labels:
app: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
labels:
app: consul
---
apiVersion: v1
kind: Service
metadata:
@@ -58,7 +24,6 @@ spec:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -72,11 +37,18 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.4.0"
image: "consul:1.2.2"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\""
- "-retry-join=consul-0.consul.$(NAMESPACE).svc.cluster.local"
- "-retry-join=consul-1.consul.$(NAMESPACE).svc.cluster.local"
- "-retry-join=consul-2.consul.$(NAMESPACE).svc.cluster.local"
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"

View File

@@ -1,10 +0,0 @@
apiVersion: v1
Kind: Pod
metadata:
name: hello
namespace: default
spec:
containers:
- name: hello
image: nginx

View File

@@ -32,7 +32,7 @@ Virtualbox, Vagrant and Ansible
$ source path/to/your-ansible-clone/hacking/env-setup
- you need to repeat the last step every time you open a new terminal session
- you need to repeat the last step everytime you open a new terminal session
and want to use any Ansible command (but you'll probably only need to run
it once).

View File

@@ -54,9 +54,6 @@ need_infra() {
if [ -z "$1" ]; then
die "Please specify infrastructure file. (e.g.: infra/aws)"
fi
if [ "$1" = "--infra" ]; then
die "The infrastructure file should be passed directly to this command. Remove '--infra' and try again."
fi
if [ ! -f "$1" ]; then
die "Infrastructure file $1 doesn't exist."
fi

View File

@@ -123,7 +123,9 @@ _cmd_kube() {
pssh --timeout 200 "
if grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/admin.conf ]; then
kubeadm token generate > /tmp/token &&
sudo kubeadm init --token \$(cat /tmp/token)
sudo kubeadm init \
--token \$(cat /tmp/token) \
--ignore-preflight-errors=SystemVerification
fi"
# Put kubeconfig in ubuntu's and docker's accounts
@@ -147,7 +149,10 @@ _cmd_kube() {
pssh --timeout 200 "
if ! grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
TOKEN=\$(ssh -o StrictHostKeyChecking=no node1 cat /tmp/token) &&
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
sudo kubeadm join \
--discovery-token-unsafe-skip-ca-verification \
--ignore-preflight-errors=SystemVerification \
--token \$TOKEN node1:6443
fi"
# Install kubectx and kubens

View File

@@ -1,5 +1,5 @@
# Number of VMs per cluster
clustersize: 1
clustersize: 5
# Jinja2 template to use to generate ready-to-cut cards
cards_template: enix.html
@@ -18,9 +18,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
compose_version: 1.22.0
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -5,7 +5,7 @@ clustersize: 4
cards_template: jerome.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: A4
paper_size: Letter
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
@@ -23,4 +23,3 @@ machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -24,5 +24,4 @@ compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
docker_user_password: training

View File

@@ -1,17 +1,17 @@
{# Feel free to customize or override anything in there! #}
{%- set url = "http://FIXME.container.training" -%}
{%- set url = "http://septembre2018.container.training" -%}
{%- set pagesize = 9 -%}
{%- if clustersize == 1 -%}
{%- set workshop_name = "Docker workshop" -%}
{%- set cluster_or_machine = "machine virtuelle" -%}
{%- set this_or_each = "cette" -%}
{%- set plural = "" -%}
{%- set cluster_or_machine = "machine" -%}
{%- set this_or_each = "this" -%}
{%- set machine_is_or_machines_are = "machine is" -%}
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
{%- else -%}
{%- set workshop_name = "Kubernetes workshop" -%}
{%- set cluster_or_machine = "cluster" -%}
{%- set this_or_each = "chaque" -%}
{%- set plural = "s" -%}
{%- set this_or_each = "each" -%}
{%- set machine_is_or_machines_are = "machines are" -%}
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
{%- set image_src = image_src_kube -%}
@@ -19,14 +19,11 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head><style>
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
body, table {
margin: 0;
padding: 0;
line-height: 1em;
font-size: 15px;
font-family: 'Slabo 27px';
font-size: 14px;
}
table {
@@ -59,8 +56,8 @@ img {
}
img.enix {
height: 4.0em;
margin-top: 0.4em;
height: 4.5em;
margin-top: 0.2em;
}
img.kube {
@@ -89,9 +86,8 @@ img.kube {
<p>
Voici les informations permettant de se connecter à votre
{{ cluster_or_machine }} pour cette formation.
Vous pouvez vous connecter à {{ this_or_each }} machine virtuelle
avec n'importe quel client SSH.
cluster pour cette formation. Vous pouvez vous connecter
à ces machines virtuelles avec n'importe quel client SSH.
</p>
<p>
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
@@ -104,8 +100,8 @@ img.kube {
</p>
<p>
Adresse{{ plural }} IP :
<!--<img class="kube" src="{{ image_src }}" />-->
Vos serveurs sont :
<img class="kube" src="{{ image_src }}" />
<table>
{% for node in cluster %}
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>

View File

@@ -1,5 +1,5 @@
{# Feel free to customize or override anything in there! #}
{%- set url = "http://qconuk2019.container.training/" -%}
{%- set url = "http://qconsf2018.container.training/" -%}
{%- set pagesize = 9 -%}
{%- if clustersize == 1 -%}
{%- set workshop_name = "Docker workshop" -%}
@@ -114,15 +114,12 @@ img {
{% for x in range(pagesize) %}
<div class="back">
<br/>
<p>You got this at the workshop
"Getting Started With Kubernetes and Container Orchestration"
during QCON London (March 2019).</p>
<p>If you liked that workshop,
I can train your team or organization
on Docker, container, and Kubernetes,
with curriculums of 1 to 5 days.
</p>
<p>Interested? Contact me at:</p>
<p>You got this card at the workshop "Getting Started With Kubernetes and Container Orchestration"
during QCON San Francisco (November 2018).</p>
<p>That workshop was a 1-day version of a longer curriculum.</p>
<p>If you liked that workshop, the instructor (Jérôme Petazzoni) can deliver it
(or the longer version) to your team or organization.</p>
<p>You can reach him at:</p>
<p>jerome.petazzoni@gmail.com</p>
<p>Thank you!</p>
</div>

View File

@@ -1,4 +0,0 @@
FROM alpine:3.11
RUN apk add --no-cache entr py3-pip git zip
COPY requirements.txt .
RUN pip3 install -r requirements.txt

View File

@@ -34,14 +34,6 @@ compile each `foo.yml` file into `foo.yml.html`.
You can also run `./build.sh forever`: it will monitor the current
directory and rebuild slides automatically when files are modified.
If you have problems running `./build.sh` (because of
Python dependencies or whatever),
you can also run `docker-compose up` in this directory.
It will start the `./build.sh forever` script in a container.
It will also start a web server exposing the slides
(but the slides should also work if you load them from your
local filesystem).
## Publishing pipeline
@@ -61,4 +53,4 @@ You can run `./slidechecker foo.yml.html` to check for
missing images and show the number of slides in that deck.
It requires `phantomjs` to be installed. It takes some
time to run so it is not yet integrated with the publishing
pipeline.
pipeline.

View File

@@ -14,7 +14,6 @@ once)
./appendcheck.py $YAML.html
done
fi
zip -qr slides.zip . && echo "Created slides.zip archive."
;;
forever)

View File

@@ -156,36 +156,6 @@ Different deployments will use different underlying technologies.
---
## Service meshes
* A service mesh is a configurable network layer.
* It can provide service discovery, high availability, load balancing, observability...
* Service meshes are particularly useful for microservices applications.
* Service meshes are often implemented as proxies.
* Applications connect to the service mesh, which relays the connection where needed.
*Does that sound familiar?*
---
## Ambassadors and service meshes
* When using a service mesh, a "sidecar container" is often used as a proxy
* Our services connect (transparently) to that sidecar container
* That sidecar container figures out where to forward the traffic
... Does that sound familiar?
(It should, because service meshes are essentially app-wide or cluster-wide ambassadors!)
---
## Section summary
We've learned how to:
@@ -198,10 +168,3 @@ For more information about the ambassador pattern, including demos on Swarm and
* [SwarmWeek video about Swarm+Compose](https://youtube.com/watch?v=qbIvUvwa6As)
Some services meshes and related projects:
* [Istio](https://istio.io/)
* [Linkerd](https://linkerd.io/)
* [Gloo](https://gloo.solo.io/)

View File

@@ -36,7 +36,7 @@ docker run jpetazzo/hamba 80 www1:80 www2:80
* Appropriate for mandatory parameters (without which the service cannot start).
* Convenient for "toolbelt" services instantiated many times.
* Convenient for "toolbelt" services instanciated many times.
(Because there is no extra step: just run it!)
@@ -63,7 +63,7 @@ docker run -e ELASTICSEARCH_URL=http://es42:9201/ kibana
* Appropriate for optional parameters (since the image can provide default values).
* Also convenient for services instantiated many times.
* Also convenient for services instanciated many times.
(It's as easy as command-line parameters.)

View File

@@ -144,10 +144,6 @@ At a first glance, it looks like this would be particularly useful in scripts.
However, if we want to start a container and get its ID in a reliable way,
it is better to use `docker run -d`, which we will cover in a bit.
(Using `docker ps -lq` is prone to race conditions: what happens if someone
else, or another program or script, starts another container just before
we run `docker ps -lq`?)
---
## View the logs of a container

View File

@@ -131,12 +131,6 @@ Sending build context to Docker daemon 2.048 kB
* Be careful (or patient) if that directory is big and your link is slow.
* You can speed up the process with a [`.dockerignore`](https://docs.docker.com/engine/reference/builder/#dockerignore-file) file
* It tells docker to ignore specific files in the directory
* Only ignore files that you won't need in the build context!
---
## Executing each step

View File

@@ -78,7 +78,7 @@ First step: clone the source code for the app we will be working on.
```bash
$ cd
$ git clone https://github.com/jpetazzo/trainingwheels
$ git clone git://github.com/jpetazzo/trainingwheels
...
$ cd trainingwheels
```

View File

@@ -67,8 +67,7 @@ The following list is not exhaustive.
Furthermore, we limited the scope to Linux containers.
We can also find containers (or things that look like containers) on other platforms
like Windows, macOS, Solaris, FreeBSD ...
Containers also exist (sometimes with other names) on Windows, macOS, Solaris, FreeBSD ...
---
@@ -156,36 +155,6 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
---
## Kata containers
* OCI-compliant runtime.
* Fusion of two projects: Intel Clear Containers and Hyper runV.
* Run each container in a lightweight virtual machine.
* Requires to run on bare metal *or* with nested virtualization.
---
## gVisor
* OCI-compliant runtime.
* Implements a subset of the Linux kernel system calls.
* Written in go, uses a smaller subset of system calls.
* Can be heavily sandboxed.
* Can run in two modes:
* KVM (requires bare metal or nested virtualization),
* ptrace (no requirement, but slower).
---
## Overall ...
* The Docker Engine is very developer-centric:
@@ -205,3 +174,4 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
- Docker is a good default choice
- If you use Kubernetes, the engine doesn't matter

View File

@@ -528,7 +528,7 @@ Very short instructions:
- `docker network create mynet --driver overlay`
- `docker service create --network mynet myimage`
See https://jpetazzo.github.io/container.training for all the deets about clustering!
See http://jpetazzo.github.io/container.training for all the deets about clustering!
---
@@ -721,20 +721,3 @@ eth0 Link encap:Ethernet HWaddr 02:42:AC:15:00:03
...
```
]
---
class: extra-details
## Building with a custom network
* We can build a Dockerfile with a custom network with `docker build --network NAME`.
* This can be used to check that a build doesn't access the network.
(But keep in mind that most Dockerfiles will fail,
<br/>because they need to install remote packages and dependencies!)
* This may be used to access an internal package repository.
(But try to use a multi-stage build instead, if possible!)

View File

@@ -169,5 +169,5 @@ Would we give the same answers to the questions on the previous slide?
class: pic
![Cloud Native Landscape](https://landscape.cncf.io/images/landscape.png)
![Cloud Native Landscape](https://raw.githubusercontent.com/cncf/landscape/master/landscape/CloudNativeLandscape_latest.png)

View File

@@ -66,6 +66,14 @@ class: pic
---
class: pic
## Multiple containers sharing the same image
![layers](images/sharing-layers.jpg)
---
## Differences between containers and images
* An image is a read-only filesystem.
@@ -80,14 +88,6 @@ class: pic
---
class: pic
## Multiple containers sharing the same image
![layers](images/sharing-layers.jpg)
---
## Comparison with object-oriented programming
* Images are conceptually similar to *classes*.
@@ -118,7 +118,7 @@ If an image is read-only, how do we change it?
* The only way to create an image is by "freezing" a container.
* The only way to create a container is by instantiating an image.
* The only way to create a container is by instanciating an image.
* Help!
@@ -216,7 +216,7 @@ clock
---
## Self-hosted namespace
## Self-Hosted namespace
This namespace holds images which are not hosted on Docker Hub, but on third
party registries.
@@ -233,13 +233,6 @@ localhost:5000/wordpress
* `localhost:5000` is the host and port of the registry
* `wordpress` is the name of the image
Other examples:
```bash
quay.io/coreos/etcd
gcr.io/google-containers/hugo
```
---
## How do you store and manage images?
@@ -359,8 +352,6 @@ Do specify tags:
* To ensure that the same version will be used everywhere.
* To ensure repeatability later.
This is similar to what we would do with `pip install`, `npm install`, etc.
---
## Section summary

View File

@@ -82,11 +82,11 @@ class: extra-details
## Installing Docker on macOS and Windows
* On macOS, the recommended method is to use Docker Desktop for Mac:
* On macOS, the recommended method is to use Docker for Mac:
https://docs.docker.com/docker-for-mac/install/
* On Windows 10 Pro, Enterprise, and Education, you can use Docker Desktop for Windows:
* On Windows 10 Pro, Enterprise, and Education, you can use Docker for Windows:
https://docs.docker.com/docker-for-windows/install/
@@ -100,7 +100,7 @@ class: extra-details
---
## Docker Desktop for Mac and Docker Desktop for Windows
## Docker for Mac and Docker for Windows
* Special Docker Editions that integrate well with their respective host OS

View File

@@ -194,13 +194,9 @@ will have equal success with Fluent or other logging stacks!*
- We are going to use a Compose file describing the ELK stack.
- The Compose file is in the container.training repository on GitHub.
```bash
$ git clone https://github.com/jpetazzo/container.training
$ cd container.training
$ cd elk
$ docker-compose up
$ cd ~/container.training/stacks
$ docker-compose -f elk.yml up -d
```
- Let's have a look at the Compose file while it's deploying.
@@ -295,4 +291,4 @@ that you don't drop messages on the floor. Good luck.
If you want to learn more about the GELF driver,
have a look at [this blog post](
https://jpetazzo.github.io/2017/01/20/docker-logging-gelf/).
http://jpetazzo.github.io/2017/01/20/docker-logging-gelf/).

View File

@@ -293,23 +293,3 @@ We can achieve even smaller images if we use smaller base images.
However, if we use common base images (e.g. if we standardize on `ubuntu`),
these common images will be pulled only once per node, so they are
virtually "free."
---
## Build targets
* We can also tag an intermediary stage with `docker build --target STAGE --tag NAME`
* This will create an image (named `NAME`) corresponding to stage `STAGE`
* This can be used to easily access an intermediary stage for inspection
(Instead of parsing the output of `docker build` to find out the image ID)
* This can also be used to describe multiple images from a single Dockerfile
(Instead of using multiple Dockerfiles, which could go out of sync)
* Sometimes, we want to inspect a specific intermediary build stage.
* Or, we want to describe multiple images using a single Dockerfile.

View File

@@ -155,7 +155,7 @@ processes or data flows are given access to system resources.*
The scheduler is concerned mainly with:
- throughput (total amount of work done per time unit);
- throughput (total amount or work done per time unit);
- turnaround time (between submission and completion);
- response time (between submission and start);
- waiting time (between job readiness and execution);
@@ -243,76 +243,58 @@ Scheduling = deciding which hypervisor to use for each VM.
---
class: pic
## Scheduling with one resource
.center[![Not-so-good bin packing](images/binpacking-1d-1.gif)]
## Can we do better?
Can we do better?
---
class: pic
## Scheduling with one resource
.center[![Better bin packing](images/binpacking-1d-2.gif)]
## Yup!
Yup!
---
class: pic
## Scheduling with two resources
.center[![2D bin packing](images/binpacking-2d.gif)]
---
class: pic
## Scheduling with three resources
.center[![3D bin packing](images/binpacking-3d.gif)]
---
class: pic
## You need to be good at this
.center[![Tangram](images/tangram.gif)]
---
class: pic
## But also, you must be quick!
.center[![Tetris](images/tetris-1.png)]
---
class: pic
## And be web scale!
.center[![Big tetris](images/tetris-2.gif)]
---
class: pic
## And think outside (?) of the box!
.center[![3D tetris](images/tetris-3.png)]
---
class: pic
## Good luck!
.center[![FUUUUUU face](images/fu-face.jpg)]
@@ -390,7 +372,7 @@ It depends on:
(Marathon = long running processes; Chronos = run at intervals; ...)
- Commercial offering through DC/OS by Mesosphere.
- Commercial offering through DC/OS my Mesosphere.
---

View File

@@ -91,12 +91,12 @@ class: extra-details
* We need a Dockerized repository!
* Let's go to https://github.com/jpetazzo/trainingwheels and fork it.
* Go to the Docker Hub (https://hub.docker.com/) and sign-in. Select "Repositories" in the blue navigation menu.
* Select "Create" in the top-right bar, and select "Create Repository+".
* Go to the Docker Hub (https://hub.docker.com/).
* Select "Create" in the top-right bar, and select "Create Automated Build."
* Connect your Docker Hub account to your GitHub account.
* Click "Create" button.
* Then go to "Builds" folder.
* Click on Github icon and select your user and the repository that we just forked.
* In "Build rules" block near page bottom, put `/www` in "Build Context" column (or whichever directory the Dockerfile is in).
* Click "Save and Build" to build the repository immediately (without waiting for a git push).
* Select your user and the repository that we just forked.
* Create.
* Then go to "Build Settings."
* Put `/www` in "Dockerfile Location" (or whichever directory the Dockerfile is in).
* Click "Trigger" to build the repository immediately (without waiting for a git push).
* Subsequent builds will happen automatically, thanks to GitHub hooks.

View File

@@ -19,7 +19,7 @@ class: title
- install Docker on e.g. a cloud VM
- use https://www.play-with-docker.com/ to instantly get a training environment
- use http://www.play-with-docker.com/ to instantly get a training environment
---
@@ -91,7 +91,7 @@ $ ssh <login>@<ip-address>
* Git BASH (https://git-for-windows.github.io/)
* MobaXterm (https://mobaxterm.mobatek.net/)
* MobaXterm (http://moabaxterm.mobatek.net)
---

View File

@@ -119,9 +119,9 @@ Nano and LinuxKit VMs in Hyper-V!)
- golang, mongo, python, redis, hello-world ... and more being added
- you should still use `--plaform` with multi-os images to be certain
- you should still use `--plaform` with multi-os images to be certain
- Windows Containers now support `localhost` accessible containers (July 2018)
- Windows Containers now support `localhost` accessable containers (July 2018)
- Microsoft (April 2018) added Hyper-V support to Windows 10 Home ...
@@ -135,8 +135,8 @@ Most "official" Docker images don't run on Windows yet.
Places to Look:
- Hub Official: https://hub.docker.com/u/winamd64/
- Hub Official: https://hub.docker.com/u/winamd64/
- Microsoft: https://hub.docker.com/r/microsoft/
---
@@ -153,12 +153,12 @@ Places to Look:
- PowerShell [Tab Completion: DockerCompletion](https://github.com/matt9ucci/DockerCompletion)
- Best Shell GUI: [Cmder.net](https://cmder.net/)
- Best Shell GUI: [Cmder.net](http://cmder.net/)
- Good Windows Container Blogs and How-To's
- Docker DevRel [Elton Stoneman, Microsoft MVP](https://blog.sixeyed.com/)
- Dockers DevRel [Elton Stoneman, Microsoft MVP](https://blog.sixeyed.com/)
- Docker Captain [Nicholas Dille](https://dille.name/blog/)
- Docker Captian [Nicholas Dille](https://dille.name/blog/)
- Docker Captain [Stefan Scherer](https://stefanscherer.github.io/)

View File

@@ -401,7 +401,7 @@ or providing extra features. For instance:
* [REX-Ray](https://rexray.io/) - create and manage volumes backed by an enterprise storage system (e.g.
SAN or NAS), or by cloud block stores (e.g. EBS, EFS).
* [Portworx](https://portworx.com/) - provides distributed block store for containers.
* [Portworx](http://portworx.com/) - provides distributed block store for containers.
* [Gluster](https://www.gluster.org/) - open source software-defined distributed storage that can scale
to several petabytes. It provides interfaces for object, block and file storage.

View File

@@ -30,7 +30,7 @@ class: self-paced
- These slides include *tons* of exercises and examples
- They assume that you have access to a machine running Docker
- They assume that you have acccess to a machine running Docker
- If you are attending a workshop or tutorial:
<br/>you will be given specific instructions to access a cloud VM

View File

@@ -1,16 +0,0 @@
version: "2"
services:
www:
image: nginx
volumes:
- .:/usr/share/nginx/html
ports:
- 80
builder:
build: .
volumes:
- ..:/repo
working_dir: /repo/slides
command: ./build.sh forever

View File

@@ -1,16 +1,14 @@
#!/usr/bin/env python3
#!/usr/bin/env python2
# coding: utf-8
TEMPLATE="""<html>
<head>
<title>{{ title }}</title>
<link rel="stylesheet" href="index.css">
<meta charset="UTF-8">
</head>
<body>
<div class="main">
<table>
<tr><td class="header" colspan="3">{{ title }}</td></tr>
<tr><td class="details" colspan="3">Note: while some workshops are delivered in French, slides are always in English.</td></tr>
{% if coming_soon %}
<tr><td class="title" colspan="3">Coming soon near you</td></tr>
@@ -19,10 +17,7 @@ TEMPLATE="""<html>
<tr>
<td>{{ item.title }}</td>
<td>{% if item.slides %}<a class="slides" href="{{ item.slides }}" />{% endif %}</td>
<td>{% if item.attend %}<a class="attend" href="{{ item.attend }}" />
{% else %}
<p class="details">{{ item.status }}</p>
{% endif %}</td>
<td><a class="attend" href="{{ item.attend }}" /></td>
</tr>
<tr>
<td class="details">Scheduled {{ item.prettydate }} at {{ item.event }} in {{item.city }}.</td>
@@ -36,10 +31,7 @@ TEMPLATE="""<html>
{% for item in past_workshops[:5] %}
<tr>
<td>{{ item.title }}</td>
<td>{% if item.slides %}<a class="slides" href="{{ item.slides }}" />
{% else %}
<p class="details">{{ item.status }}</p>
{% endif %}</td>
<td><a class="slides" href="{{ item.slides }}" /></td>
<td>{% if item.video %}<a class="video" href="{{ item.video }}" />{% endif %}</td>
</tr>
<tr>
@@ -106,49 +98,34 @@ TEMPLATE="""<html>
</table>
</div>
</body>
</html>"""
</html>""".decode("utf-8")
import datetime
import jinja2
import yaml
items = yaml.safe_load(open("index.yaml"))
# Items with a date correspond to scheduled sessions.
# Items without a date correspond to self-paced content.
# The date should be specified as a string (e.g. 2018-11-26).
# It can also be a list of two elements (e.g. [2018-11-26, 2018-11-28]).
# The latter indicates an event spanning multiple dates.
# The first date will be used in the generated page, but the event
# will be considered "current" (and therefore, shown in the list of
# upcoming events) until the second date.
items = yaml.load(open("index.yaml"))
for item in items:
if "date" in item:
date = item["date"]
if type(date) == list:
date_begin, date_end = date
else:
date_begin, date_end = date, date
suffix = {
1: "st", 2: "nd", 3: "rd",
21: "st", 22: "nd", 23: "rd",
31: "st"}.get(date_begin.day, "th")
31: "st"}.get(date.day, "th")
# %e is a non-standard extension (it displays the day, but without a
# leading zero). If strftime fails with ValueError, try to fall back
# on %d (which displays the day but with a leading zero when needed).
try:
item["prettydate"] = date_begin.strftime("%B %e{}, %Y").format(suffix)
item["prettydate"] = date.strftime("%B %e{}, %Y").format(suffix)
except ValueError:
item["prettydate"] = date_begin.strftime("%B %d{}, %Y").format(suffix)
item["begin"] = date_begin
item["end"] = date_end
item["prettydate"] = date.strftime("%B %d{}, %Y").format(suffix)
today = datetime.date.today()
coming_soon = [i for i in items if i.get("date") and i["end"] >= today]
coming_soon.sort(key=lambda i: i["begin"])
past_workshops = [i for i in items if i.get("date") and i["end"] < today]
past_workshops.sort(key=lambda i: i["begin"], reverse=True)
coming_soon = [i for i in items if i.get("date") and i["date"] >= today]
coming_soon.sort(key=lambda i: i["date"])
past_workshops = [i for i in items if i.get("date") and i["date"] < today]
past_workshops.sort(key=lambda i: i["date"], reverse=True)
self_paced = [i for i in items if not i.get("date")]
recorded_workshops = [i for i in items if i.get("video")]
@@ -160,10 +137,10 @@ with open("index.html", "w") as f:
past_workshops=past_workshops,
self_paced=self_paced,
recorded_workshops=recorded_workshops
))
).encode("utf-8"))
with open("past.html", "w") as f:
f.write(template.render(
title="Container Training",
all_past_workshops=past_workshops
))
).encode("utf-8"))

View File

@@ -1,118 +1,3 @@
- date: 2019-06-18
country: ca
city: Montréal
event: Elapse Technologies
title: Getting Started With Kubernetes And Orchestration
speaker: jpetazzo
status: coming soon
hidden: http://elapsetech.com/formation/kubernetes-101
- date: 2019-06-17
country: ca
city: Montréal
event: Elapse Technologies
title: Getting Started With Docker And Containers
speaker: jpetazzo
status: coming soon
hidden: http://elapsetech.com/formation/docker-101
- date: 2019-05-01
country: us
city: Cleveland, OH
event: PyCon
speaker: jpetazzo, s0ulshake
title: Getting started with Kubernetes and container orchestration
attend: https://us.pycon.org/2019/schedule/presentation/74/
- date: 2019-04-28
country: us
city: Chicago, IL
event: GOTO
speaker: jpetazzo
title: Getting Started With Kubernetes and Container Orchestration
attend: https://gotochgo.com/2019/workshops/148
- date: [2019-04-23, 2019-04-24]
country: fr
city: Paris
event: ENIX SAS
speaker: "jpetazzo, rdegez"
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
- date: [2019-04-15, 2019-04-16]
country: fr
city: Paris
event: ENIX SAS
speaker: "jpetazzo, alexbuisine"
title: Bien démarrer avec les conteneurs (in French)
lang: fr
attend: https://enix.io/fr/services/formation/bien-demarrer-avec-les-conteneurs/
- date: 2019-03-08
country: uk
city: London
event: QCON
speaker: jpetazzo
title: Getting Started With Kubernetes and Container Orchestration
attend: https://qconlondon.com/london2019/workshop/getting-started-kubernetes-and-container-orchestration
slides: https://qconuk2019.container.training/
- date: 2019-02-25
country: ca
city: Montréal
event: Elapse Technologies
speaker: jpetazzo
title: <strike>Getting Started With Docker And Containers</strike> (rescheduled for June 2019)
status: rescheduled
- date: 2019-02-26
country: ca
city: Montréal
event: Elapse Technologies
speaker: jpetazzo
title: <strike>Getting Started With Kubernetes And Orchestration</strike> (rescheduled for June 2019)
status: rescheduled
- date: 2019-02-28
country: ca
city: Québec
lang: fr
event: Elapse Technologies
speaker: jpetazzo
title: <strike>Bien démarrer avec Docker et les conteneurs (in French)</strike>
status: cancelled
- date: 2019-03-01
country: ca
city: Québec
lang: fr
event: Elapse Technologies
speaker: jpetazzo
title: <strike>Bien démarrer avec Docker et l'orchestration (in French)</strike>
status: cancelled
- date: [2019-01-07, 2019-01-08]
country: fr
city: Paris
event: ENIX SAS
speaker: "jpetazzo, alexbuisine"
title: Bien démarrer avec les conteneurs (in French)
lang: fr
attend: https://enix.io/fr/services/formation/bien-demarrer-avec-les-conteneurs/
slides: https://intro-2019-01.container.training
- date: [2018-12-17, 2018-12-18]
country: fr
city: Paris
event: ENIX SAS
speaker: "jpetazzo, rdegez"
title: Déployer ses applications avec Kubernetes
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
slides: http://decembre2018.container.training
- date: 2018-11-08
city: San Francisco, CA
country: us
@@ -196,7 +81,7 @@
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Déployer ses applications avec Kubernetes
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
slides: https://septembre2018.container.training

View File

@@ -133,8 +133,6 @@ class: extra-details
→ We are user `kubernetes-admin`, in group `system:masters`.
(We will see later how and why this gives us the permissions that we have.)
---
## User certificates in practice
@@ -540,7 +538,7 @@ It's important to note a couple of details in these flags ...
- But that we can't create things:
```
./kubectl create deployment testrbac --image=nginx
./kubectl create deployment --image=nginx
```
- Exit the container with `exit` or `^D`
@@ -569,45 +567,3 @@ It's important to note a couple of details in these flags ...
kubectl auth can-i list nodes \
--as system:serviceaccount:<namespace>:<name-of-service-account>
```
---
class: extra-details
## Where do our permissions come from?
- When interacting with the Kubernetes API, we are using a client certificate
- We saw previously that this client certificate contained:
`CN=kubernetes-admin` and `O=system:masters`
- Let's look for these in existing ClusterRoleBindings:
```bash
kubectl get clusterrolebindings -o yaml |
grep -e kubernetes-admin -e system:masters
```
(`system:masters` should show up, but not `kubernetes-admin`.)
- Where does this match come from?
---
class: extra-details
## The `system:masters` group
- If we eyeball the output of `kubectl get clusterrolebindings -o yaml`, we'll find out!
- It is in the `cluster-admin` binding:
```bash
kubectl describe clusterrolebinding cluster-admin
```
- This binding associates `system:masters` to the cluster role `cluster-admin`
- And the `cluster-admin` is, basically, `root`:
```bash
kubectl describe clusterrole cluster-admin
```

View File

@@ -36,9 +36,7 @@
## Creating a daemon set
<!-- ##VERSION## -->
- Unfortunately, as of Kubernetes 1.13, the CLI cannot create daemon sets
- Unfortunately, as of Kubernetes 1.12, the CLI cannot create daemon sets
--
@@ -254,29 +252,78 @@ The master node has [taints](https://kubernetes.io/docs/concepts/configuration/t
---
## Is this working?
## What are all these pods doing?
- Look at the web UI
- Let's check the logs of all these `rng` pods
- All these pods have the label `app=rng`:
- the first pod, because that's what `kubectl create deployment` does
- the other ones (in the daemon set), because we
*copied the spec from the first one*
- Therefore, we can query everybody's logs using that `app=rng` selector
.exercise[
- Check the logs of all the pods having a label `app=rng`:
```bash
kubectl logs -l app=rng --tail 1
```
]
--
- The graph should now go above 10 hashes per second!
--
- It looks like the newly created pods are serving traffic correctly
- How and why did this happen?
(We didn't do anything special to add them to the `rng` service load balancer!)
It appears that *all the pods* are serving requests at the moment.
---
# Labels and selectors
## Working around `kubectl logs` bugs
- That last command didn't show what we needed
- We mentioned earlier that regression affecting `kubectl logs` ...
(see [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for more details)
- Let's work around the issue by executing `kubectl logs` one pod at a time
- For convenience, we'll define a little shell function
---
## Our helper function
- The function `ktail` below will:
- list the names of all pods matching a selector
- display the last line of log for each pod
.exercise[
- Define `ktail`:
```bash
ktail () {
kubectl get pods -o name -l $1 |
xargs -rn1 kubectl logs --tail 1
}
```
- Try it:
```bash
ktail app=rng
```
]
---
## The magic of selectors
- The `rng` *service* is load balancing requests to a set of pods
- That set of pods is defined by the *selector* of the `rng` service
- This set of pods is defined as "pods having the label `app=rng`"
.exercise[
@@ -287,60 +334,19 @@ The master node has [taints](https://kubernetes.io/docs/concepts/configuration/t
]
- The selector is `app=rng`
- It means "all the pods having the label `app=rng`"
(They can have additional labels as well, that's OK!)
When we created additional pods with this label, they were
automatically detected by `svc/rng` and added as *endpoints*
to the associated load balancer.
---
## Selector evaluation
- We can use selectors with many `kubectl` commands
- For instance, with `kubectl get`, `kubectl logs`, `kubectl delete` ... and more
.exercise[
- Get the list of pods matching selector `app=rng`:
```bash
kubectl get pods -l app=rng
kubectl get pods --selector app=rng
```
]
But ... why do these pods (in particular, the *new* ones) have this `app=rng` label?
---
## Where do labels come from?
- When we create a deployment with `kubectl create deployment rng`,
<br/>this deployment gets the label `app=rng`
- The replica sets created by this deployment also get the label `app=rng`
- The pods created by these replica sets also get the label `app=rng`
- When we created the daemon set from the deployment, we re-used the same spec
- Therefore, the pods created by the daemon set get the same labels
.footnote[Note: when we use `kubectl run stuff`, the label is `run=stuff` instead.]
---
## Updating load balancer configuration
- We would like to remove a pod from the load balancer
## Removing the first pod from the load balancer
- What would happen if we removed that pod, with `kubectl delete pod ...`?
--
It would be re-created immediately (by the replica set or the daemon set)
The `replicaset` would re-create it immediately.
--
@@ -348,272 +354,90 @@ But ... why do these pods (in particular, the *new* ones) have this `app=rng` la
--
It would *also* be re-created immediately
The `replicaset` would re-create it immediately.
--
Why?!?
---
## Selectors for replica sets and daemon sets
- The "mission" of a replica set is:
"Make sure that there is the right number of pods matching this spec!"
- The "mission" of a daemon set is:
"Make sure that there is a pod matching this spec on each node!"
... Because what matters to the `replicaset` is the number of pods *matching that selector.*
--
- *In fact,* replica sets and daemon sets do not check pod specifications
- They merely have a *selector*, and they look for pods matching that selector
- Yes, we can fool them by manually creating pods with the "right" labels
- Bottom line: if we remove our `app=rng` label ...
... The pod "diseappears" for its parent, which re-creates another pod to replace it
---
class: extra-details
## Isolation of replica sets and daemon sets
- Since both the `rng` daemon set and the `rng` replica set use `app=rng` ...
... Why don't they "find" each other's pods?
- But but but ... Don't we have more than one pod with `app=rng` now?
--
- *Replica sets* have a more specific selector, visible with `kubectl describe`
(It looks like `app=rng,pod-template-hash=abcd1234`)
- *Daemon sets* also have a more specific selector, but it's invisible
(It looks like `app=rng,controller-revision-hash=abcd1234`)
- As a result, each controller only "sees" the pods it manages
The answer lies in the exact selector used by the `replicaset` ...
---
## Removing a pod from the load balancer
## Deep dive into selectors
- Currently, the `rng` service is defined by the `app=rng` selector
- The only way to remove a pod is to remove or change the `app` label
- ... But that will cause another pod to be created instead!
- What's the solution?
--
- We need to change the selector of the `rng` service!
- Let's add another label to that selector (e.g. `enabled=yes`)
---
## Complex selectors
- If a selector specifies multiple labels, they are understood as a logical *AND*
(In other words: the pods must match all the labels)
- Kubernetes has support for advanced, set-based selectors
(But these cannot be used with services, at least not yet!)
---
## The plan
1. Add the label `enabled=yes` to all our `rng` pods
2. Update the selector for the `rng` service to also include `enabled=yes`
3. Toggle traffic to a pod by manually adding/removing the `enabled` label
4. Profit!
*Note: if we swap steps 1 and 2, it will cause a short
service disruption, because there will be a period of time
during which the service selector won't match any pod.
During that time, requests to the service will time out.
By doing things in the order above, we guarantee that there won't
be any interruption.*
---
## Adding labels to pods
- We want to add the label `enabled=yes` to all pods that have `app=rng`
- We could edit each pod one by one with `kubectl edit` ...
- ... Or we could use `kubectl label` to label them all
- `kubectl label` can use selectors itself
- Let's look at the selectors for the `rng` *deployment* and the associated *replica set*
.exercise[
- Add `enabled=yes` to all pods that have `app=rng`:
- Show detailed information about the `rng` deployment:
```bash
kubectl label pods -l app=rng enabled=yes
kubectl describe deploy rng
```
]
---
## Updating the service selector
- We need to edit the service specification
- Reminder: in the service definition, we will see `app: rng` in two places
- the label of the service itself (we don't need to touch that one)
- the selector of the service (that's the one we want to change)
.exercise[
- Update the service to add `enabled: yes` to its selector:
- Show detailed information about the `rng` replica:
<br/>(The second command doesn't require you to get the exact name of the replica set)
```bash
kubectl edit service rng
kubectl describe rs rng-yyyyyyyy
kubectl describe rs -l app=rng
```
<!--
```wait Please edit the object below```
```keys /app: rng```
```keys ^J```
```keys noenabled: yes```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
]
--
... And then we get *the weirdest error ever.* Why?
The replica set selector also has a `pod-template-hash`, unlike the pods in our daemon set.
---
## When the YAML parser is being too smart
# Updating a service through labels and selectors
- YAML parsers try to help us:
- What if we want to drop the `rng` deployment from the load balancer?
- `xyz` is the string `"xyz"`
- Option 1:
- `42` is the integer `42`
- destroy it
- `yes` is the boolean value `true`
- Option 2:
- If we want the string `"42"` or the string `"yes"`, we have to quote them
- add an extra *label* to the daemon set
- So we have to use `enabled: "yes"`
- update the service *selector* to refer to that *label*
.footnote[For a good laugh: if we had used "ja", "oui", "si" ... as the value, it would have worked!]
--
Of course, option 2 offers more learning opportunities. Right?
---
## Updating the service selector, take 2
## Add an extra label to the daemon set
.exercise[
- We will update the daemon set "spec"
- Update the service to add `enabled: "yes"` to its selector:
```bash
kubectl edit service rng
```
- Option 1:
<!--
```wait Please edit the object below```
```keys /app: rng```
```keys ^J```
```keys noenabled: "yes"```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
- edit the `rng.yml` file that we used earlier
]
- load the new definition with `kubectl apply`
This time it should work!
- Option 2:
If we did everything correctly, the web UI shouldn't show any change.
- use `kubectl edit`
--
*If you feel like you got this💕🌈, feel free to try directly.*
*We've included a few hints on the next slides for your convenience!*
---
## Updating labels
- We want to disable the pod that was created by the deployment
- All we have to do, is remove the `enabled` label from that pod
- To identify that pod, we can use its name
- ... Or rely on the fact that it's the only one with a `pod-template-hash` label
- Good to know:
- `kubectl label ... foo=` doesn't remove a label (it sets it to an empty string)
- to remove label `foo`, use `kubectl label ... foo-`
- to change an existing label, we would need to add `--overwrite`
---
## Removing a pod from the load balancer
.exercise[
- In one window, check the logs of that pod:
```bash
POD=$(kubectl get pod -l app=rng,pod-template-hash -o name)
kubectl logs --tail 1 --follow $POD
```
(We should see a steady stream of HTTP logs)
- In another window, remove the label from the pod:
```bash
kubectl label pod -l app=rng,pod-template-hash enabled-
```
(The stream of HTTP logs should stop immediately)
]
There might be a slight change in the web UI (since we removed a bit
of capacity from the `rng` service). If we remove more pods,
the effect should be more visible.
---
class: extra-details
## Updating the daemon set
- If we scale up our cluster by adding new nodes, the daemon set will create more pods
- These pods won't have the `enabled=yes` label
- If we want these pods to have that label, we need to edit the daemon set spec
- We can do that with e.g. `kubectl edit daemonset rng`
---
class: extra-details
## We've put resources in your resources
- Reminder: a daemon set is a resource that creates more resources!
@@ -626,9 +450,7 @@ class: extra-details
- the label(s) of the resource(s) created by the first resource (in the `template` block)
- We would need to update the selector and the template
(metadata labels are not mandatory)
- You need to update the selector and the template (metadata labels are not mandatory)
- The template must match the selector
@@ -636,6 +458,175 @@ class: extra-details
---
## Adding our label
- Let's add a label `isactive: yes`
- In YAML, `yes` should be quoted; i.e. `isactive: "yes"`
.exercise[
- Update the daemon set to add `isactive: "yes"` to the selector and template label:
```bash
kubectl edit daemonset rng
```
<!--
```wait Please edit the object below```
```keys /app: rng```
```keys ^J```
```keys noisactive: "yes"```
```keys ^[``` ]
```keys /app: rng```
```keys ^J```
```keys oisactive: "yes"```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
- Update the service to add `isactive: "yes"` to its selector:
```bash
kubectl edit service rng
```
<!--
```wait Please edit the object below```
```keys /app: rng```
```keys ^J```
```keys noisactive: "yes"```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
]
---
## Checking what we've done
.exercise[
- Check the most recent log line of all `app=rng` pods to confirm that exactly one per node is now active:
```bash
kubectl logs -l app=rng --tail 1
```
]
The timestamps should give us a hint about how many pods are currently receiving traffic.
.exercise[
- Look at the pods that we have right now:
```bash
kubectl get pods
```
]
---
## Cleaning up
- The pods of the deployment and the "old" daemon set are still running
- We are going to identify them programmatically
.exercise[
- List the pods with `app=rng` but without `isactive=yes`:
```bash
kubectl get pods -l app=rng,isactive!=yes
```
- Remove these pods:
```bash
kubectl delete pods -l app=rng,isactive!=yes
```
]
---
## Cleaning up stale pods
```
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
rng-54f57d4d49-7pt82 1/1 Terminating 0 51m
rng-54f57d4d49-vgz9h 1/1 Running 0 22s
rng-b85tm 1/1 Terminating 0 39m
rng-hfbrr 1/1 Terminating 0 39m
rng-vplmj 1/1 Running 0 7m
rng-xbpvg 1/1 Running 0 7m
[...]
```
- The extra pods (noted `Terminating` above) are going away
- ... But a new one (`rng-54f57d4d49-vgz9h` above) was restarted immediately!
--
- Remember, the *deployment* still exists, and makes sure that one pod is up and running
- If we delete the pod associated to the deployment, it is recreated automatically
---
## Deleting a deployment
.exercise[
- Remove the `rng` deployment:
```bash
kubectl delete deployment rng
```
]
--
- The pod that was created by the deployment is now being terminated:
```
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
rng-54f57d4d49-vgz9h 1/1 Terminating 0 4m
rng-vplmj 1/1 Running 0 11m
rng-xbpvg 1/1 Running 0 11m
[...]
```
Ding, dong, the deployment is dead! And the daemon set lives on.
---
## Avoiding extra pods
- When we changed the definition of the daemon set, it immediately created new pods. We had to remove the old ones manually.
- How could we have avoided this?
--
- By adding the `isactive: "yes"` label to the pods before changing the daemon set!
- This can be done programmatically with `kubectl patch`:
```bash
PATCH='
metadata:
labels:
isactive: "yes"
'
kubectl get pods -l app=rng -l controller-revision-hash -o name |
xargs kubectl patch -p "$PATCH"
```
---
## Labels and debugging
- When a pod is misbehaving, we can delete it: another one will be recreated

View File

@@ -164,21 +164,6 @@ The chart's metadata includes an URL to the project's home page.
---
## Viewing installed charts
- Helm keeps track of what we've installed
.exercise[
- List installed Helm charts:
```bash
helm list
```
]
---
## Creating a chart
- We are going to show a way to create a *very simplified* chart

View File

@@ -292,16 +292,12 @@ We could! But the *deployment* would notice it right away, and scale back to the
]
Unfortunately, `--follow` cannot (yet) be used to stream the logs from multiple containers.
<br/>
(But this will change in the future; see [PR #67573](https://github.com/kubernetes/kubernetes/pull/67573).)
---
class: extra-details
## `kubectl logs -l ... --tail N`
- If we run this with Kubernetes 1.12, the last command shows multiple lines
- With Kubernetes 1.12 (and up to at least 1.12.2), the last command shows multiple lines
- This is a regression when `--tail` is used together with `-l`/`--selector`
@@ -309,9 +305,7 @@ class: extra-details
(instead of the number of lines specified on the command line)
- The problem was fixed in Kubernetes 1.13
*See [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for details.*
- See [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for details
---

View File

@@ -12,15 +12,13 @@
.exercise[
<!-- ##VERSION## -->
- Download the `kubectl` binary from one of these links:
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.13.4/bin/linux/amd64/kubectl)
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.12.2/bin/linux/amd64/kubectl)
|
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.13.4/bin/darwin/amd64/kubectl)
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.12.2/bin/darwin/amd64/kubectl)
|
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.13.4/bin/windows/amd64/kubectl.exe)
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.12.2/bin/windows/amd64/kubectl.exe)
- On Linux and macOS, make the binary executable with `chmod +x kubectl`

View File

@@ -68,7 +68,7 @@
kubectl -n blue get svc
```
- We can also change our current *context*
- We can also use *contexts*
- A context is a *(user, cluster, namespace)* tuple
@@ -76,9 +76,9 @@
---
## Viewing existing contexts
## Creating a context
- On our training environments, at this point, there should be only one context
- We are going to create a context for the `blue` namespace
.exercise[
@@ -87,79 +87,29 @@
kubectl config get-contexts
```
]
- The current context (the only one!) is tagged with a `*`
- What are NAME, CLUSTER, AUTHINFO, and NAMESPACE?
---
## What's in a context
- NAME is an arbitrary string to identify the context
- CLUSTER is a reference to a cluster
(i.e. API endpoint URL, and optional certificate)
- AUTHINFO is a reference to the authentication information to use
(i.e. a TLS client certificate, token, or otherwise)
- NAMESPACE is the namespace
(empty string = `default`)
---
## Switching contexts
- We want to use a different namespace
- Solution 1: update the current context
*This is appropriate if we need to change just one thing (e.g. namespace or authentication).*
- Solution 2: create a new context and switch to it
*This is appropriate if we need to change multiple things and switch back and forth.*
- Let's go with solution 1!
---
## Updating a context
- This is done through `kubectl config set-context`
- We can update a context by passing its name, or the current context with `--current`
.exercise[
- Update the current context to use the `blue` namespace:
- Create a new context:
```bash
kubectl config set-context --current --namespace=blue
```
- Check the result:
```bash
kubectl config get-contexts
kubectl config set-context blue --namespace=blue \
--cluster=kubernetes --user=kubernetes-admin
```
]
We have created a context; but this is just some configuration values.
The namespace doesn't exist yet.
---
## Using our new namespace
## Using a context
- Let's check that we are in our new namespace, then deploy the DockerCoins chart
- Let's switch to our new context and deploy the DockerCoins chart
.exercise[
- Verify that the new context is empty:
- Use the `blue` context:
```bash
kubectl get all
kubectl config use-context blue
```
- Deploy DockerCoins:
@@ -189,46 +139,7 @@ we created our Helm chart before.
]
If the graph shows up but stays at zero, check the next slide!
---
## Troubleshooting
If did the exercices from the chapter about labels and selectors,
the app that you just created may not work, because the `rng` service
selector has `enabled=yes` but the pods created by the `rng` daemon set
do not have that label.
How can we troubleshoot that?
- Query individual services manually
→ the `rng` service will time out
- Inspect the services with `kubectl describe service`
→ the `rng` service will have an empty list of backends
---
## Fixing the broken service
The easiest option is to add the `enabled=yes` label to the relevant pods.
.exercise[
- Add the `enabled` label to the pods of the `rng` daemon set:
```bash
kubectl label pods -l app=rng enabled=yes
```
]
The *best* option is to change either the service definition, or the
daemon set definition, so that their respective selectors match correctly.
*This is left as an exercise for the reader!*
Note: it might take a minute or two for the app to be up and running.
---
@@ -270,19 +181,30 @@ daemon set definition, so that their respective selectors match correctly.
.exercise[
- View the names of the contexts:
```bash
kubectl config get-contexts
```
- Switch back to the original context:
```bash
kubectl config set-context --current --namespace=
kubectl config use-context kubernetes-admin@kubernetes
```
]
Note: we could have used `--namespace=default` for the same result.
---
## Switching namespaces more easily
- Defining a new context for each namespace can be cumbersome
- We can also alter the current context with this one-liner:
```bash
kubectl config set-context --current --namespace=foo
```
- We can also use a little helper tool called `kubens`:
```bash

View File

@@ -247,9 +247,9 @@ The second command will fail and time out after 3 seconds.
- Some network plugins only have partial support for network policies
- For instance, Weave added support for egress rules [in version 2.4](https://github.com/weaveworks/weave/pull/3313) (released in July 2018)
- For instance, Weave [doesn't support ipBlock (yet)](https://github.com/weaveworks/weave/issues/3168)
- But only recently added support for ipBlock [in version 2.5](https://github.com/weaveworks/weave/pull/3367) (released in Nov 2018)
- Weave added support for egress rules [in version 2.4](https://github.com/weaveworks/weave/pull/3313) (released in July 2018)
- Unsupported features might be silently ignored

View File

@@ -1,42 +1,12 @@
# Shipping images with a registry
class: title
- Initially, our app was running on a single node
- We could *build* and *run* in the same place
- Therefore, we did not need to *ship* anything
- Now that we want to run on a cluster, things are different
- The easiest way to ship container images is to use a registry
Our app on Kube
---
## How Docker registries work (a reminder)
## What's on the menu?
- What happens when we execute `docker run alpine` ?
- If the Engine needs to pull the `alpine` image, it expands it into `library/alpine`
- `library/alpine` is expanded into `index.docker.io/library/alpine`
- The Engine communicates with `index.docker.io` to retrieve `library/alpine:latest`
- To use something else than `index.docker.io`, we specify it in the image name
- Examples:
```bash
docker pull gcr.io/google-containers/alpine-with-bash:1.0
docker build -t registry.mycompany.io:5000/myimage:awesome .
docker push registry.mycompany.io:5000/myimage:awesome
```
---
## The plan
We are going to:
In this part, we will:
- **build** images for our app,
@@ -44,42 +14,25 @@ We are going to:
- **run** deployments using these images,
- expose (with a ClusterIP) the deployments that need to communicate together,
- expose these deployments so they can communicate with each other,
- expose (with a NodePort) the web UI so we can access it from outside.
- expose the web UI so we can access it from outside.
---
## Building and shipping our app
## The plan
- We will pick a registry
- Build on our control node (`node1`)
(let's pretend the address will be `REGISTRY:PORT`)
- Tag images so that they are named `$REGISTRY/servicename`
- We will build on our control node (`node1`)
- Upload them to a registry
(the images will be named `REGISTRY:PORT/servicename`)
- Create deployments using the images
- We will push the images to the registry
- Expose (with a ClusterIP) the services that need to communicate
- These images will be usable by the other nodes of the cluster
(i.e., we could do `docker run REGISTRY:PORT/servicename` from these nodes)
---
## A shortcut opportunity
- As it happens, the images that we need do already exist on the Docker Hub:
https://hub.docker.com/r/dockercoins/
- We could use them instead of using our own registry and images
*In the following slides, we are going to show how to run a registry
and use it to host container images. We will also show you how to
use the existing images from the Docker Hub, so that you can catch
up (or skip altogether the build/push part) if needed.*
- Expose (with a NodePort) the WebUI
---
@@ -87,20 +40,11 @@ up (or skip altogether the build/push part) if needed.*
- We could use the Docker Hub
- There are alternatives like Quay
- Or a service offered by our cloud provider (ACR, GCR, ECR...)
- Each major cloud provider has an option as well
- Or we could just self-host that registry
(ACR on Azure, ECR on AWS, GCR on Google Cloud...)
- There are also commercial products to run our own registry
(Docker EE, Quay...)
- And open source options, too!
*We are going to self-host an open source registry because it's the most generic solution for this workshop. We will use Docker's reference
implementation for simplicity.*
*We'll self-host the registry because it's the most generic solution for this workshop.*
---
@@ -122,7 +66,7 @@ implementation for simplicity.*
---
## Deploying a self-hosted registry
# Deploying a self-hosted registry
- We will deploy a registry container, and expose it with a NodePort
@@ -308,7 +252,7 @@ class: extra-details
- Or building or pushing the images ...
- Don't worry: you can easily use pre-built images from the Docker Hub!
- Don't worry: we provide pre-built images hosted on the Docker Hub!
- The images are named `dockercoins/worker:v0.1`, `dockercoins/rng:v0.1`, etc.
@@ -323,7 +267,7 @@ class: extra-details
---
# Running our application on Kubernetes
## Deploying all the things
- We can now deploy our code (as well as a redis instance)
@@ -376,7 +320,7 @@ kubectl wait deploy/worker --for condition=available
---
## Connecting containers together
# Exposing services internally
- Three deployments need to be reachable by others: `hasher`, `redis`, `rng`
@@ -423,7 +367,7 @@ We should now see the `worker`, well, working happily.
---
## Exposing services for external access
# Exposing services for external access
- Now we would like to access the Web UI

View File

@@ -122,13 +122,13 @@
- Create a 10 GB file on each node:
```bash
for N in $(seq 1 4); do ssh node$N sudo truncate --size 10G /portworx.blk; done
for N in $(seq 1 5); do ssh node$N sudo truncate --size 10G /portworx.blk; done
```
(If SSH asks to confirm host keys, enter `yes` each time.)
- Associate the file to a loop device on each node:
```bash
for N in $(seq 1 4); do ssh node$N sudo losetup /dev/loop4 /portworx.blk; done
for N in $(seq 1 5); do ssh node$N sudo losetup /dev/loop4 /portworx.blk; done
```
]

View File

@@ -151,7 +151,7 @@ scrape_configs:
## Running Prometheus on our cluster
We need to:
We would need to:
- Run the Prometheus server in a pod
@@ -171,19 +171,21 @@ We need to:
## Helm Charts to the rescue
- To make our lives easier, we are going to use a Helm Chart
- To make our lives easier, we could use a Helm Chart
- The Helm Chart will take care of all the steps explained above
- The Helm Chart would take care of all the steps explained above
(including some extra features that we don't need, but won't hurt)
- In fact, Prometheus has been pre-installed on our clusters with Helm
(it was pre-installed so that it would be populated with metrics by now)
---
## Step 1: install Helm
## Step 1: if we had to install Helm
- If we already installed Helm earlier, these commands won't break anything
.exercice[
- Note that if Helm is already installed, these commands won't break anything
- Install Tiller (Helm's server-side component) on our cluster:
```bash
@@ -196,27 +198,17 @@ We need to:
--clusterrole=cluster-admin --serviceaccount=kube-system:default
```
]
---
## Step 2: install Prometheus
## Step 2: if we had to install Prometheus
- Skip this if we already installed Prometheus earlier
(in doubt, check with `helm list`)
.exercice[
- Install Prometheus on our cluster:
- This is how we would use Helm to deploy Prometheus on the cluster:
```bash
helm install stable/prometheus \
--set server.service.type=NodePort \
--set server.persistentVolume.enabled=false
```
]
The provided flags:
- expose the server web UI (and API) on a NodePort
@@ -235,11 +227,13 @@ The provided flags:
- Figure out the NodePort that was allocated to the Prometheus server:
```bash
kubectl get svc | grep prometheus-server
kubectl get svc -n kube-system | grep prometheus-server
```
- With your browser, connect to that port
(spoiler alert: it should be 30090)
]
---
@@ -475,14 +469,12 @@ class: extra-details
- The Kubernetes service endpoints exporter uses tag `pod` instead
- See [this blog post](https://www.robustperception.io/exposing-the-software-version-to-prometheus) or [this other one](https://www.weave.works/blog/aggregating-pod-resource-cpu-memory-usage-arbitrary-labels-prometheus/) to see how to perform "joins"
- And this is why we can't have nice things
- Alas, Prometheus cannot "join" time series with different labels
- See [Prometheus issue #2204](https://github.com/prometheus/prometheus/issues/2204) for the rationale
(see [Prometheus issue #2204](https://github.com/prometheus/prometheus/issues/2204) for the rationale)
([this comment](https://github.com/prometheus/prometheus/issues/2204#issuecomment-261515520) in particular if you want a workaround involving relabeling)
- There is a workaround involving relabeling, but it's "not cheap"
- Then see [this blog post](https://www.robustperception.io/exposing-the-software-version-to-prometheus) or [this other one](https://www.weave.works/blog/aggregating-pod-resource-cpu-memory-usage-arbitrary-labels-prometheus/) to see how to perform "joins"
- see [this comment](https://github.com/prometheus/prometheus/issues/2204#issuecomment-261515520) for an overview
- or [this blog post](https://5pi.de/2017/11/09/use-prometheus-vector-matching-to-get-kubernetes-utilization-across-any-pod-label/) for a complete description of the process
- There is a good chance that the situation will improve in the future

View File

@@ -6,7 +6,7 @@
<!-- ##VERSION## -->
- We used `kubeadm` on freshly installed VM instances running Ubuntu LTS
- We used `kubeadm` on freshly installed VM instances running Ubuntu 18.04 LTS
1. Install Docker
@@ -53,8 +53,8 @@
[GKE](https://cloud.google.com/kubernetes-engine/)
- If you are on AWS:
[EKS](https://aws.amazon.com/eks/),
[eksctl](https://eksctl.io/),
[EKS](https://aws.amazon.com/eks/)
or
[kops](https://github.com/kubernetes/kops)
- On a local machine:

View File

@@ -266,9 +266,7 @@ spec:
---
# Running a Consul cluster
- Here is a good use-case for Stateful sets!
## Stateful sets in action
- We are going to deploy a Consul cluster with 3 nodes
@@ -296,54 +294,42 @@ consul agent -data=dir=/consul/data -client=0.0.0.0 -server -ui \
-retry-join=`Y.Y.Y.Y`
```
- Replace X.X.X.X and Y.Y.Y.Y with the addresses of other nodes
- We need to replace X.X.X.X and Y.Y.Y.Y with the addresses of other nodes
- The same command-line can be used on all nodes (convenient!)
- We can specify DNS names, but then they have to be FQDN
- It's OK for a pod to include itself in the list as well
- We can therefore use the same command-line on all nodes (easier!)
---
## Cloud Auto-join
## Discovering the addresses of other pods
- Since version 1.4.0, Consul can use the Kubernetes API to find its peers
- When a service is created for a stateful set, individual DNS entries are created
- This is called [Cloud Auto-join]
- These entries are constructed like this:
- Instead of passing an IP address, we need to pass a parameter like this:
`<name-of-stateful-set>-<n>.<name-of-service>.<namespace>.svc.cluster.local`
```
consul agent -retry-join "provider=k8s label_selector=\"app=consul\""
```
- `<n>` is the number of the pod in the set (starting at zero)
- Consul needs to be able to talk to the Kubernetes API
- If we deploy Consul in the default namespace, the names could be:
- We can provide a `kubeconfig` file
- If Consul runs in a pod, it will use the *service account* of the pod
[Cloud Auto-join]: https://www.consul.io/docs/agent/cloud-auto-join.html#kubernetes-k8s-
---
## Setting up Cloud auto-join
- We need to create a service account for Consul
- We need to create a role that can `list` and `get` pods
- We need to bind that role to the service account
- And of course, we need to make sure that Consul pods use that service account
- `consul-0.consul.default.svc.cluster.local`
- `consul-1.consul.default.svc.cluster.local`
- `consul-2.consul.default.svc.cluster.local`
---
## Putting it all together
- The file `k8s/consul.yaml` defines the required resources
(service account, cluster role, cluster role binding, service, stateful set)
- The file `k8s/consul.yaml` defines a service and a stateful set
- It has a few extra touches:
- the name of the namespace is injected through an environment variable
- a `podAntiAffinity` prevents two pods from running on the same node
- a `preStop` hook makes the pod leave the cluster when shutdown gracefully

View File

@@ -1,237 +0,0 @@
# Static pods
- Hosting the Kubernetes control plane on Kubernetes has advantages:
- we can use Kubernetes' replication and scaling features for the control plane
- we can leverage rolling updates to upgrade the control plane
- However, there is a catch:
- deploying on Kubernetes requires the API to be available
- the API won't be available until the control plane is deployed
- How can we get out of that chicken-and-egg problem?
---
## A possible approach
- Since each component of the control plane can be replicated ...
- We could set up the control plane outside of the cluster
- Then, once the cluster is fully operational, create replicas running on the cluster
- Finally, remove the replicas that are running outside of the cluster
*What could possibly go wrong?*
---
## Sawing off the branch you're sitting on
- What if anything goes wrong?
(During the setup or at a later point)
- Worst case scenario, we might need to:
- set up a new control plane (outside of the cluster)
- restore a backup from the old control plane
- move the new control plane to the cluster (again)
- This doesn't sound like a great experience
---
## Static pods to the rescue
- Pods are started by kubelet (an agent running on every node)
- To know which pods it should run, the kubelet queries the API server
- The kubelet can also get a list of *static pods* from:
- a directory containing one (or multiple) *manifests*, and/or
- a URL (serving a *manifest*)
- These "manifests" are basically YAML definitions
(As produced by `kubectl get pod my-little-pod -o yaml --export`)
---
## Static pods are dynamic
- Kubelet will periodically reload the manifests
- It will start/stop pods accordingly
(i.e. it is not necessary to restart the kubelet after updating the manifests)
- When connected to the Kubernetes API, the kubelet will create *mirror pods*
- Mirror pods are copies of the static pods
(so they can be seen with e.g. `kubectl get pods`)
---
## Bootstrapping a cluster with static pods
- We can run control plane components with these static pods
- They can start without requiring access to the API server
- Once they are up and running, the API becomes available
- These pods are then visible through the API
(We cannot upgrade them from the API, though)
*This is how kubeadm has initialized our clusters.*
---
## Static pods vs normal pods
- The API only gives us a read-only access to static pods
- We can `kubectl delete` a static pod ...
... But the kubelet will restart it immediately
- Static pods can be selected just like other pods
(So they can receive service traffic)
- A service can select a mixture of static and other pods
---
## From static pods to normal pods
- Once the control plane is up and running, it can be used to create normal pods
- We can then set up a copy of the control plane in normal pods
- Then the static pods can be removed
- The scheduler and the controller manager use leader election
(Only one is active at a time; removing an instance is seamless)
- Each instance of the API server adds itself to the `kubernetes` service
- Etcd will typically require more work!
---
## From normal pods back to static pods
- Alright, but what if the control plane is down and we need to fix it?
- We restart it using static pods!
- This can be done automatically with the [Pod Checkpointer]
- The Pod Checkpointer automatically generates manifests of running pods
- The manifests are used to restart these pods if API contact is lost
(More details in the [Pod Checkpointer] documentation page)
- This technique is used by [bootkube]
[Pod Checkpointer]: https://github.com/kubernetes-incubator/bootkube/blob/master/cmd/checkpoint/README.md
[bootkube]: https://github.com/kubernetes-incubator/bootkube
---
## Where should the control plane run?
*Is it better to run the control plane in static pods, or normal pods?*
- If I'm a *user* of the cluster: I don't care, it makes no difference to me
- What if I'm an *admin*, i.e. the person who installs, upgrades, repairs... the cluster?
- If I'm using a managed Kubernetes cluster (AKS, EKS, GKE...) it's not my problem
(I'm not the one setting up and managing the control plane)
- If I already picked a tool (kubeadm, kops...) to set up my cluster, the tool decides for me
- What if I haven't picked a tool yet, or if I'm installing from scratch?
- static pods = easier to set up, easier to troubleshoot, less risk of outage
- normal pods = easier to upgrade, easier to move (if nodes need to be shut down)
---
## Static pods in action
- On our clusters, the `staticPodPath` is `/etc/kubernetes/manifests`
.exercise[
- Have a look at this directory:
```bash
ls -l /etc/kubernetes/manifests
```
]
We should see YAML files corresponding to the pods of the control plane.
---
## Running a static pod
- We are going to add a pod manifest to the directory, and kubelet will run it
.exercise[
- Copy a manifest to the directory:
```bash
sudo cp ~/container.training/k8s/just-a-pod.yaml /etc/kubernetes/manifests
```
- Check that it's running:
```bash
kubectl get pods
```
]
The output should include a pod named `hello-node1`.
---
## Remarks
In the manifest, the pod was named `hello`.
```yaml
apiVersion: v1
Kind: Pod
metadata:
name: hello
namespace: default
spec:
containers:
- name: hello
image: nginx
```
The `-node1` suffix was added automatically by kubelet.
If we delete the pod (with `kubectl delete`), it will be recreated immediately.
To delete the pod, we need to delete (or move) the manifest file.

View File

@@ -1,7 +1,7 @@
## Versions installed
- Kubernetes 1.13.4
- Docker Engine 18.09.3
- Kubernetes 1.12.2
- Docker Engine 18.09.0
- Docker Compose 1.21.1
<!-- ##VERSION## -->
@@ -23,7 +23,7 @@ class: extra-details
## Kubernetes and Docker compatibility
- Kubernetes 1.13.x only validates Docker Engine versions [up to 18.06](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md#external-dependencies)
- Kubernetes 1.12.x only validates Docker Engine versions [1.11.2 to 1.13.1 and 17.03.x](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md#external-dependencies)
--
@@ -35,9 +35,7 @@ class: extra-details
class: extra-details
- No!
- "Validates" = continuous integration builds with very extensive (and expensive) testing
- "Validates" = continuous integration builds
- The Docker API is versioned, and offers strong backward-compatibility

View File

@@ -1,15 +1,15 @@
title: |
Getting Started
With Kubernetes and
Container Orchestration
Getting Started With
Kubernetes and
Container Orchestration
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/jpetazzo/workshop-20190307-london)"
#chat: "In person!"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "Gitter ([Thursday](https://gitter.im/jpetazzo/workshop-20181108-sanfrancisco)|[Friday](https://gitter.im/jpetazzo/workshop-20181109-sanfrancisco))"
gitrepo: github.com/jpetazzo/container.training
slides: http://qconuk2019.container.training/
slides: http://qconsf2018.container.training/
exclude:
- self-paced

View File

@@ -57,7 +57,6 @@ chapters:
- - k8s/owners-and-dependents.md
- k8s/statefulsets.md
- k8s/portworx.md
- k8s/staticpods.md
- - k8s/whatsnext.md
- k8s/links.md
- shared/thankyou.md

View File

@@ -57,7 +57,6 @@ chapters:
- - k8s/owners-and-dependents.md
- k8s/statefulsets.md
- k8s/portworx.md
- k8s/staticpods.md
- - k8s/whatsnext.md
- k8s/links.md
- shared/thankyou.md

View File

@@ -1,16 +1,13 @@
## Intros
- Hello! We are:
- .emoji[👷🏻‍♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Travis CI)
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
- Hello! I'm
Jérôme Petazzoni ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
- The workshop will run from 9am to 4pm
- There will be a lunch break at noon
- There will be a lunch break from noon to 1pm
(And coffee breaks at 10:30am and 2:30pm)
(And coffee breaks!)
- Feel free to interrupt for questions at any time

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python2
# transforms a YAML manifest into a HTML workshop file
import glob
@@ -26,19 +26,12 @@ def anchor(title):
return "toc-" + title
class Interstitials(object):
def __init__(self):
self.index = 0
self.images = [url.strip() for url in open("interstitials.txt") if url.strip()]
def next(self):
index = self.index % len(self.images)
index += 1
return self.images[index]
interstitials = Interstitials()
def interstitials_generator():
images = [url.strip() for url in open("interstitials.txt") if url.strip()]
while True:
for image in images:
yield image
interstitials = interstitials_generator()
def insertslide(markdown, title):
@@ -166,6 +159,8 @@ def gentoc(tree, path=()):
# Returns: (epxandedmarkdown,[list of titles])
# The list of titles can be nested.
def processchapter(chapter, filename):
if isinstance(chapter, unicode):
return processchapter(chapter.encode("utf-8"), filename)
if isinstance(chapter, str):
if "\n" in chapter:
titles = re.findall("^# (.*)", chapter, re.MULTILINE)
@@ -188,14 +183,14 @@ try:
if "REPOSITORY_URL" in os.environ:
repo = os.environ["REPOSITORY_URL"]
else:
repo = subprocess.check_output(["git", "config", "remote.origin.url"]).decode("ascii")
repo = subprocess.check_output(["git", "config", "remote.origin.url"])
repo = repo.strip().replace("git@github.com:", "https://github.com/")
if "BRANCH" in os.environ:
branch = os.environ["BRANCH"]
else:
branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]).decode("ascii")
branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
branch = branch.strip()
base = subprocess.check_output(["git", "rev-parse", "--show-prefix"]).decode("ascii")
base = subprocess.check_output(["git", "rev-parse", "--show-prefix"])
base = base.strip().strip("/")
urltemplate = ("{repo}/tree/{branch}/{base}/{filename}"
.format(repo=repo, branch=branch, base=base, filename="{}"))
@@ -203,12 +198,12 @@ except:
logging.exception("Could not generate repository URL; generating local URLs instead.")
urltemplate = "file://{pwd}/{filename}".format(pwd=os.environ["PWD"], filename="{}")
try:
commit = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).decode("ascii")
commit = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
except:
logging.exception("Could not figure out HEAD commit.")
commit = "??????"
try:
dirtyfiles = subprocess.check_output(["git", "status", "--porcelain"]).decode("ascii")
dirtyfiles = subprocess.check_output(["git", "status", "--porcelain"])
except:
logging.exception("Could not figure out repository cleanliness.")
dirtyfiles = "?? git status --porcelain failed"

17
slides/override.css Normal file
View File

@@ -0,0 +1,17 @@
.remark-slide-content:not(.pic) {
background-repeat: no-repeat;
background-position: 99% 1%;
background-size: 8%;
background-image: url(https://enix.io/static/img/logos/logo-domain-cropped.png);
}
div.extra-details:not(.pic) {
background-image: url("images/extra-details.png"), url(https://enix.io/static/img/logos/logo-domain-cropped.png);
background-position: 0.5% 1%, 99% 1%;
background-size: 4%, 8%;
}
.remark-slide-content:not(.pic) div.remark-slide-number {
top: 16px;
right: 112px
}

View File

@@ -1 +0,0 @@
3.7

View File

@@ -54,84 +54,49 @@ and displays aggregated logs.
---
## What's this application?
## More detail on our sample application
--
- Visit the GitHub repository with all the materials of this workshop:
<br/>https://@@GITREPO@@
- It is a DockerCoin miner! .emoji[💰🐳📦🚢]
- The application is in the [dockercoins](
https://@@GITREPO@@/tree/master/dockercoins)
subdirectory
--
- Let's look at the general layout of the source code:
- No, you can't buy coffee with DockerCoins
there is a Compose file [docker-compose.yml](
https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml) ...
--
- How DockerCoins works:
- generate a few random bytes
- hash these bytes
- increment a counter (to keep track of speed)
- repeat forever!
--
- DockerCoins is *not* a cryptocurrency
(the only common points are "randomness", "hashing", and "coins" in the name)
---
## DockerCoins in the microservices era
- DockerCoins is made of 5 services:
... and 4 other services, each in its own directory:
- `rng` = web service generating random bytes
- `hasher` = web service computing hash of POSTed data
- `worker` = background process calling `rng` and `hasher`
- `worker` = background process using `rng` and `hasher`
- `webui` = web interface to watch progress
- `redis` = data store (holds a counter updated by `worker`)
- These 5 services are visible in the application's Compose file,
[docker-compose.yml](
https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml)
---
## How DockerCoins works
class: extra-details
- `worker` invokes web service `rng` to generate random bytes
## Compose file format version
- `worker` invokes web service `hasher` to hash these bytes
*Particularly relevant if you have used Compose before...*
- `worker` does this in an infinite loop
- Compose 1.6 introduced support for a new Compose file format (aka "v2")
- every second, `worker` updates `redis` to indicate how many loops were done
- Services are no longer at the top level, but under a `services` section
- `webui` queries `redis`, and computes and exposes "hashing speed" in our browser
- There has to be a `version` key at the top level, with value `"2"` (as a string, not an integer)
*(See diagram on next slide!)*
- Containers are placed on a dedicated network, making links unnecessary
---
class: pic
![Diagram showing the 5 containers of the applications](images/dockercoins-diagram.svg)
- There are other minor differences, but upgrade is easy and straightforward
---
## Service discovery in container-land
How does each service find out the address of the other ones?
--
- We do not hard-code IP addresses in the code
- We do not hard-code FQDN in the code, either
@@ -185,46 +150,35 @@ class: extra-details
---
## Show me the code!
## What's this application?
- You can check the GitHub repository with all the materials of this workshop:
<br/>https://@@GITREPO@@
--
- The application is in the [dockercoins](
https://@@GITREPO@@/tree/master/dockercoins)
subdirectory
- It is a DockerCoin miner! .emoji[💰🐳📦🚢]
- The Compose file ([docker-compose.yml](
https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml))
lists all 5 services
--
- `redis` is using an official image from the Docker Hub
- No, you can't buy coffee with DockerCoins
- `hasher`, `rng`, `worker`, `webui` are each built from a Dockerfile
--
- Each service's Dockerfile and source code is in its own directory
- How DockerCoins works:
(`hasher` is in the [hasher](https://@@GITREPO@@/blob/master/dockercoins/hasher/) directory,
`rng` is in the [rng](https://@@GITREPO@@/blob/master/dockercoins/rng/)
directory, etc.)
- `worker` asks to `rng` to generate a few random bytes
- `worker` feeds these bytes into `hasher`
- and repeat forever!
- every second, `worker` updates `redis` to indicate how many loops were done
- `webui` queries `redis`, and computes and exposes "hashing speed" in your browser
---
class: extra-details
class: pic
## Compose file format version
*This is relevant only if you have used Compose before 2016...*
- Compose 1.6 introduced support for a new Compose file format (aka "v2")
- Services are no longer at the top level, but under a `services` section
- There has to be a `version` key at the top level, with value `"2"` (as a string, not an integer)
- Containers are placed on a dedicated network, making links unnecessary
- There are other minor differences, but upgrade is easy and straightforward
![Diagram showing the 5 containers of the applications](images/dockercoins-diagram.svg)
---
@@ -278,7 +232,7 @@ Docker Engine is running on a different machine. This can be the case if:
When you run DockerCoins in development mode, the web UI static files
are mapped to the container using a volume. Alas, volumes can only
work on a local environment, or when using Docker Desktop for Mac or Windows.
work on a local environment, or when using Docker4Mac or Docker4Windows.
How to fix this?

View File

@@ -40,10 +40,10 @@ chapters:
- swarm/testingregistry.md
- swarm/btp-manual.md
- swarm/swarmready.md
- swarm/stacks.md
- swarm/compose2swarm.md
- swarm/cicd.md
- swarm/updatingservices.md
- swarm/rollingupdates.md
#- swarm/rollingupdates.md
- swarm/healthchecks.md
- - swarm/operatingswarm.md
- swarm/netshoot.md

View File

@@ -40,7 +40,7 @@ chapters:
#- swarm/testingregistry.md
#- swarm/btp-manual.md
#- swarm/swarmready.md
- swarm/stacks.md
- swarm/compose2swarm.md
- swarm/cicd.md
- swarm/updatingservices.md
#- swarm/rollingupdates.md

View File

@@ -41,7 +41,7 @@ chapters:
- swarm/testingregistry.md
- swarm/btp-manual.md
- swarm/swarmready.md
- swarm/stacks.md
- swarm/compose2swarm.md
- swarm/cicd.md
- |
name: part-2

View File

@@ -41,7 +41,7 @@ chapters:
- swarm/testingregistry.md
- swarm/btp-manual.md
- swarm/swarmready.md
- swarm/stacks.md
- swarm/compose2swarm.md
- |
name: part-2

View File

@@ -10,10 +10,9 @@
- And run this little for loop:
```bash
cd ~/container.training/dockercoins
export REGISTRY=127.0.0.1:5000
export TAG=v0.1
REGISTRY=127.0.0.1:5000 TAG=v1
for SERVICE in hasher rng webui worker; do
docker build -t $REGISTRY/$SERVICE:$TAG ./$SERVICE
docker tag dockercoins_$SERVICE $REGISTRY/$SERVICE:$TAG
docker push $REGISTRY/$SERVICE
done
```
@@ -120,12 +119,12 @@ It alters the code path for `docker run`, so it is allowed only under strict cir
- Start the other services:
```bash
export REGISTRY=127.0.0.1:5000
export TAG=v0.1
for SERVICE in hasher rng webui worker; do
docker service create --network dockercoins --detach=true \
--name $SERVICE $REGISTRY/$SERVICE:$TAG
done
REGISTRY=127.0.0.1:5000
TAG=v1
for SERVICE in hasher rng webui worker; do
docker service create --network dockercoins --detach=true \
--name $SERVICE $REGISTRY/$SERVICE:$TAG
done
```
]

View File

@@ -22,7 +22,7 @@ class: btp-manual
---
# Swarm Stacks
# Integration with Compose
- Compose is great for local development
@@ -48,7 +48,7 @@ class: btp-manual
- Resource limits are moved to this `deploy` section
- See [here](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md#upgrading) for the complete list of changes
- See [here](https://github.com/aanand/docker.github.io/blob/8524552f99e5b58452fcb1403e1c273385988b71/compose/compose-file.md#upgrading) for the complete list of changes
- Supersedes *Distributed Application Bundles*
@@ -149,7 +149,7 @@ Our registry is not *exactly* identical to the one deployed with `docker service
- Each stack gets its own overlay network
- Services of the stack are connected to this network
- Services of the task are connected to this network
<br/>(unless specified differently in the Compose file)
- Services get network aliases matching their name in the Compose file

View File

@@ -375,7 +375,7 @@ Some presentations from the Docker Distributed Systems Summit in Berlin:
([slides](https://speakerdeck.com/aluzzardi/heart-of-the-swarmkit-topology-management))
- Heart of the SwarmKit: Store, Topology & Object Model
([slides](https://www.slideshare.net/Docker/heart-of-the-swarmkit-store-topology-object-model))
([slides](http://www.slideshare.net/Docker/heart-of-the-swarmkit-store-topology-object-model))
([video](https://www.youtube.com/watch?v=EmePhjGnCXY))
And DockerCon Black Belt talks:

View File

@@ -49,7 +49,7 @@ This will display the unlock key. Copy-paste it somewhere safe.
]
Note: if you are doing the workshop on your own, using nodes
that you [provisioned yourself](https://@@GITREPO@@/tree/master/prepare-machine) or with [Play-With-Docker](https://play-with-docker.com/), you might have to use a different method to restart the Engine.
that you [provisioned yourself](https://@@GITREPO@@/tree/master/prepare-machine) or with [Play-With-Docker](http://play-with-docker.com/), you might have to use a different method to restart the Engine.
---

View File

@@ -14,9 +14,9 @@
More resources on this topic:
- [Do not use Docker-in-Docker for CI](
https://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/)
http://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/)
- [One container to rule them all](
https://jpetazzo.github.io/2016/04/03/one-container-to-rule-them-all/)
http://jpetazzo.github.io/2016/04/03/one-container-to-rule-them-all/)
---

View File

@@ -8,7 +8,7 @@ What about web interfaces to control and manage Swarm?
- [Portus](http://port.us.org) is a SUSE-backed open source web GUI for registry.
- Find lots of other Swarm tools in the [Awesome Docker list](https://awesome-docker.netlify.com).
- Find lots of other Swarm tools in the [Awesome Docker list](http://awesome-docker.netlify.com).
---

View File

@@ -1,6 +1,6 @@
name: healthchecks
# Health checks and auto-rollbacks
# Health checks
(New in Docker Engine 1.12)
@@ -61,11 +61,11 @@ name: healthchecks
---
## Enabling health checks and auto-rollbacks
## Automated rollbacks
Here is a comprehensive example using the CLI:
.small[
.sall[
```bash
docker service update \
--update-delay 5s \
@@ -96,8 +96,6 @@ We will use the following Compose file (`stacks/dockercoins+healthcheck.yml`):
hasher:
build: dockercoins/hasher
image: ${REGISTRY-127.0.0.1:5000}/hasher:${TAG-latest}
healthcheck:
test: curl -f http://localhost/ || exit 1
deploy:
replicas: 7
update_config:
@@ -111,9 +109,7 @@ We will use the following Compose file (`stacks/dockercoins+healthcheck.yml`):
---
## Enabling auto-rollback in dockercoins
We need to update our services with a healthcheck.
## Enabling auto-rollback
.exercise[
@@ -122,22 +118,44 @@ We need to update our services with a healthcheck.
cd ~/container.training/stacks
```
- Deploy the updated stack with healthchecks built-in:
- Deploy the updated stack:
```bash
docker stack deploy --compose-file dockercoins+healthcheck.yml dockercoins
```
]
This will also scale the `hasher` service to 7 instances.
---
## Visualizing a rolling update
First, let's make an "innocent" change and deploy it.
.exercise[
- Update the `sleep` delay in the code:
```bash
sed -i "s/sleep 0.1/sleep 0.2/" dockercoins/hasher/hasher.rb
```
- Build, ship, and run the new image:
```bash
export TAG=v0.5
docker-compose -f dockercoins+healthcheck.yml build
docker-compose -f dockercoins+healthcheck.yml push
docker service update dockercoins_hasher \
--image=127.0.0.1:5000/hasher:$TAG
```
]
---
## Visualizing an automated rollback
- Here's a good example of why healthchecks are necessary
- This breaking change will prevent the app from listening on the correct port
- The container still runs fine, it just won't accept connections on port 80
And now, a breaking change that will cause the health check to fail:
.exercise[
@@ -148,10 +166,11 @@ We need to update our services with a healthcheck.
- Build, ship, and run the new image:
```bash
export TAG=v0.3
export TAG=v0.6
docker-compose -f dockercoins+healthcheck.yml build
docker-compose -f dockercoins+healthcheck.yml push
docker service update --image=127.0.0.1:5000/hasher:$TAG dockercoins_hasher
docker service update dockercoins_hasher \
--image=127.0.0.1:5000/hasher:$TAG
```
]

View File

@@ -2,7 +2,7 @@
- This was initially written by [Jérôme Petazzoni](https://twitter.com/jpetazzo) to support in-person,
instructor-led workshops and tutorials
- Over time, [multiple contributors](https://@@GITREPO@@/graphs/contributors) also helped to improve these materials — thank you!
- You can also follow along on your own, at your own pace
@@ -15,7 +15,7 @@
[documentation](https://docs.docker.com/) ...
- ... And looking for answers in the [Docker forums](forums.docker.com),
[StackOverflow](https://stackoverflow.com/questions/tagged/docker),
[StackOverflow](http://stackoverflow.com/questions/tagged/docker),
and other outlets
---

View File

@@ -3,10 +3,10 @@
- [Docker Community Slack](https://community.docker.com/registrations/groups/4316)
- [Docker Community Forums](https://forums.docker.com/)
- [Docker Hub](https://hub.docker.com)
- [Docker Blog](https://blog.docker.com/)
- [Docker documentation](https://docs.docker.com/)
- [Docker Blog](http://blog.docker.com/)
- [Docker documentation](http://docs.docker.com/)
- [Docker on StackOverflow](https://stackoverflow.com/questions/tagged/docker)
- [Docker on Twitter](https://twitter.com/docker)
- [Play With Docker Hands-On Labs](https://training.play-with-docker.com/)
- [Docker on Twitter](http://twitter.com/docker)
- [Play With Docker Hands-On Labs](http://training.play-with-docker.com/)
.footnote[These slides (and future updates) are on → https://container.training/]
.footnote[These slides (and future updates) are on → http://container.training/]

View File

@@ -417,4 +417,4 @@ that you don't drop messages on the floor. Good luck.
If you want to learn more about the GELF driver,
have a look at [this blog post](
https://jpetazzo.github.io/2017/01/20/docker-logging-gelf/).
http://jpetazzo.github.io/2017/01/20/docker-logging-gelf/).

View File

@@ -454,7 +454,7 @@ class: snap
.exercise[
- Run the following command *on every node*:
- Run the following command *on every node:*
```bash
snapd -t 0 -l 1 --tribe --tribe-seed node1:6000
```
@@ -636,7 +636,7 @@ class: snap
]
Note: this will allow any node to publish metrics data to `localhost:8086`,
and it will allow us to access the admin interface by connecting to any node
and it will allows us to access the admin interface by connecting to any node
on port 8083.
.warning[Make sure to use InfluxDB 0.13; a few things changed in 1.0

View File

@@ -160,7 +160,7 @@ class: self-paced
- Keep managers in one region (multi-zone/datacenter/rack)
- Groups of 3 or 5 nodes: all are managers. Beyond 5, separate out managers and workers
- Groups of 3 or 5 nodes: all are managers. Beyond 5, seperate out managers and workers
- Groups of 10-100 nodes: pick 5 "stable" nodes to be managers
@@ -173,7 +173,7 @@ class: self-paced
Cloud pro-tip: use separate auto-scaling groups for managers and workers
See docker's "[Running Docker at scale](https://success.docker.com/article/running-docker-ee-at-scale)" document
See docker's "[Running Docker at scale](http://success.docker.com/article/running-docker-ee-at-scale)" document
]
---

View File

@@ -25,7 +25,7 @@ class: self-paced
## Catching up
Assuming you have 5 nodes provided by
[Play-With-Docker](https://www.play-with-docker/), do this from `node1`:
[Play-With-Docker](http://www.play-with-docker/), do this from `node1`:
```bash
docker swarm init --advertise-addr eth0

View File

@@ -1,44 +1,72 @@
# Rolling updates
- Let's force an update on hasher to watch it update
- Let's change a scaled service: `worker`
.exercise[
- First lets scale up hasher to 7 replicas:
```bash
docker service scale dockercoins_hasher=7
```
- Edit `worker/worker.py`
- Force a rolling update (replace containers) to different image:
- Locate the `sleep` instruction and change the delay
- Build, ship, and run our changes:
```bash
docker service update --image 127.0.0.1:5000/hasher:v0.1 dockercoins_hasher
export TAG=v0.4
docker-compose -f dockercoins.yml build
docker-compose -f dockercoins.yml push
docker stack deploy -c dockercoins.yml dockercoins
```
]
- You can run `docker events` in a separate `node1` shell to see Swarm actions
---
- You can use `--force` to replace containers without a config change
## Viewing our update as it rolls out
.exercise[
- Check the status of the `dockercoins_worker` service:
```bash
watch docker service ps dockercoins_worker
```
<!-- ```wait dockercoins_worker.1``` -->
<!-- ```keys ^C``` -->
- Hide the tasks that are shutdown:
```bash
watch -n1 "docker service ps dockercoins_worker | grep -v Shutdown.*Shutdown"
```
<!-- ```wait dockercoins_worker.1``` -->
<!-- ```keys ^C``` -->
]
If you had stopped the workers earlier, this will automatically restart them.
By default, SwarmKit does a rolling upgrade, one instance at a time.
We should therefore see the workers being updated one my one.
---
## Changing the upgrade policy
- We can change many options on how updates happen
- We can set upgrade parallelism (how many instances to update at the same time)
- And upgrade delay (how long to wait between two batches of instances)
.exercise[
- Change the parallelism to 2, and the max failed container updates to 25%:
- Change the parallelism to 2 and the delay to 5 seconds:
```bash
docker service update --update-parallelism 2 \
--update-max-failure-ratio .25 dockercoins_hasher
docker service update dockercoins_worker \
--update-parallelism 2 --update-delay 5s
```
]
- No containers were replaced, this is called a "no op" change
- Service metadata-only changes don't require orchestrator operations
The current upgrade will continue at a faster pace.
---
@@ -62,17 +90,15 @@
- At any time (e.g. before the upgrade is complete), we can rollback:
- by editing the Compose file and redeploying
- by editing the Compose file and redeploying;
- by using the special `--rollback` flag with `service update`
- by using `docker service rollback`
- or with the special `--rollback` flag
.exercise[
- Try to rollback the webui service:
- Try to rollback the service:
```bash
docker service rollback dockercoins_webui
docker service update dockercoins_worker --rollback
```
]
@@ -85,8 +111,6 @@ What happens with the web UI graph?
- Rollback reverts to the previous service definition
- see `PreviousSpec` in `docker service inspect <servicename>`
- If we visualize successive updates as a stack:
- it doesn't "pop" the latest update

View File

@@ -28,7 +28,7 @@
Remove `-v` if you don't like verbose things.
Shameless promo: for more Go and Docker love, check
[this blog post](https://jpetazzo.github.io/2016/09/09/go-docker/)!
[this blog post](http://jpetazzo.github.io/2016/09/09/go-docker/)!
Note: in the unfortunate event of SwarmKit *master* branch being broken,
the build might fail. In that case, just skip the Swarm tools section.

View File

@@ -14,12 +14,11 @@
---
## Updating a single service with `service update`
## Updating a single service the hard way
- To update a single service, we could do the following:
```bash
export REGISTRY=127.0.0.1:5000
export TAG=v0.2
REGISTRY=localhost:5000 TAG=v0.3
IMAGE=$REGISTRY/dockercoins_webui:$TAG
docker build -t $IMAGE webui/
docker push $IMAGE
@@ -32,11 +31,11 @@
---
## Updating services with `stack deploy`
## Updating services the easy way
- With the Compose integration, all we have to do is:
```bash
export TAG=v0.2
export TAG=v0.3
docker-compose -f composefile.yml build
docker-compose -f composefile.yml push
docker stack deploy -c composefile.yml nameofstack
@@ -48,8 +47,6 @@
- We don't need to learn new commands!
- It will diff each service and only update ones that changed
---
## Changing the code
@@ -58,11 +55,26 @@
.exercise[
- Update the size of text on our webui:
- Edit the file `webui/files/index.html`:
```bash
sed -i "s/15px/50px/" dockercoins/webui/files/index.html
vi dockercoins/webui/files/index.html
```
<!-- ```wait <title>``` -->
- Locate the `font-size` CSS attribute and increase it (at least double it)
<!--
```keys /font-size```
```keys ^J```
```keys lllllllllllllcw45px```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
- Save and exit
]
---
@@ -80,7 +92,7 @@
- Build, ship, and run:
```bash
export TAG=v0.2
export TAG=v0.3
docker-compose -f dockercoins.yml build
docker-compose -f dockercoins.yml push
docker stack deploy -c dockercoins.yml dockercoins
@@ -88,8 +100,6 @@
]
- Because we're tagging all images in this demo v0.2, deploy will update all apps, FYI
---
## Viewing our changes

View File

@@ -1,8 +1,8 @@
## Brand new versions!
- Engine 18.09
- Compose 1.23
- Machine 0.16
- Engine 18.06
- Compose 1.22
- Machine 0.15
.exercise[
@@ -17,7 +17,7 @@
---
## Wait, what, 18.09 ?!?
## Wait, what, 17.12 ?!?
--

View File

@@ -4,6 +4,7 @@
<title>@@TITLE@@</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<link rel="stylesheet" href="workshop.css">
<link rel="stylesheet" href="override.css">
</head>
<body>
<!--

View File

@@ -10,8 +10,6 @@ services:
hasher:
build: dockercoins/hasher
image: ${REGISTRY-127.0.0.1:5000}/hasher:${TAG-latest}
healthcheck:
test: curl -f http://localhost/ || exit 1
deploy:
replicas: 7
update_config:

View File

@@ -0,0 +1,35 @@
version: "3"
services:
rng:
build: dockercoins/rng
image: ${REGISTRY-127.0.0.1:5000}/rng:${TAG-latest}
deploy:
mode: global
hasher:
build: dockercoins/hasher
image: ${REGISTRY-127.0.0.1:5000}/hasher:${TAG-latest}
deploy:
replicas: 7
update_config:
delay: 5s
failure_action: rollback
max_failure_ratio: .5
monitor: 5s
parallelism: 1
webui:
build: dockercoins/webui
image: ${REGISTRY-127.0.0.1:5000}/webui:${TAG-latest}
ports:
- "8000:80"
redis:
image: redis
worker:
build: dockercoins/worker
image: ${REGISTRY-127.0.0.1:5000}/worker:${TAG-latest}
deploy:
replicas: 10

View File

@@ -5,7 +5,7 @@ services:
image: elasticsearch:2
logstash:
image: logstash:2
image: logstash
command: |
-e '
input {