Compare commits

...

12 Commits

Author SHA1 Message Date
Jerome Petazzoni
ae9780feea fix-redirects.sh: adding forced redirect 2020-04-07 16:50:33 -05:00
Jerome Petazzoni
bb7d03751f Ready to deploy 2019-10-30 11:54:28 -05:00
Jerome Petazzoni
efe491c05d cbr0 woes 2019-10-29 20:34:50 -05:00
Jerome Petazzoni
a150f53fa7 Merge branch 'master' into lisa-2019-10 2019-10-29 20:31:31 -05:00
Jerome Petazzoni
1d8353b7e2 settings 2019-10-29 20:29:50 -05:00
Jerome Petazzoni
93b1cc5e6e Fixes 2019-10-29 20:29:26 -05:00
Jerome Petazzoni
4ad56bd8e7 3 nodes are enough 2019-10-29 19:49:39 -05:00
Jerome Petazzoni
aefa0576a7 Merge branch 'master' into lisa-2019-10 2019-10-29 19:48:35 -05:00
Jerome Petazzoni
52a7434e70 fixes 2019-10-29 19:43:39 -05:00
Jerome Petazzoni
0e4ed4fa5a Tutorial 2019-10-29 19:37:28 -05:00
Jerome Petazzoni
d01635b5fb Last minute fixes 2019-10-28 13:14:08 -05:00
Jerome Petazzoni
e9e650ee48 Push 2019-10-28 11:40:06 -05:00
28 changed files with 1050 additions and 570 deletions

View File

@@ -1,5 +1,5 @@
# Number of VMs per cluster
clustersize: 4
clustersize: 3
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
@@ -26,3 +26,8 @@ machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
url: https://lisa-2019-10.container.training
event: tutorial
backside: true
clusternumber: 10

25
prepare-vms/setup-lisa.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/sh
set -e
export AWS_INSTANCE_TYPE=t3a.small
INFRA=infra/aws-us-west-2
STUDENTS=120
PREFIX=$(date +%Y-%m-%d-%H-%M)
SETTINGS=jerome
TAG=$PREFIX-$SETTINGS
./workshopctl start \
--tag $TAG \
--infra $INFRA \
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
./workshopctl deploy $TAG
./workshopctl disabledocker $TAG
./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl cards $TAG

View File

@@ -212,16 +212,18 @@ img.kube {
{% for x in range(pagesize) %}
<div class="back">
<br/>
<p>You got this at the workshop
"Getting Started With Kubernetes and Container Orchestration"
during QCON London (March 2019).</p>
<p>If you liked that workshop,
<p>You got this at the tutorial:<br/>
"Deep Dive into Kubernetes Internals
for Builders and Operators" during LISA
in Portland (October 2019).
</p>
<p>If you liked that tutorial,
I can train your team or organization
on Docker, container, and Kubernetes,
with curriculums of 1 to 5 days.
with courses of 1 to 5 days.
</p>
<p>Interested? Contact me at:</p>
<p>jerome.petazzoni@gmail.com</p>
<p><strong>jerome.petazzoni@gmail.com</strong></p>
<p>Thank you!</p>
</div>
{% endfor %}

View File

@@ -5,3 +5,5 @@
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
/ /lisa.html 200!

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 KiB

View File

@@ -1,63 +0,0 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
- - containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- - containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- containers/Resource_Limits.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Windows_Containers.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- - containers/Docker_Machine.md
- containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md
- containers/Logging.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
#- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md

View File

@@ -1,63 +0,0 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
exclude:
- in-person
chapters:
- shared/title.md
# - shared/logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Windows_Containers.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md

View File

@@ -110,9 +110,9 @@ class: extra-details
## In practice: kube-router
- We are going to set up a new cluster
- We are going to reconfigure our cluster
- For this new cluster, we will use kube-router
(control plane and kubelets
- kube-router will provide the "pod network"
@@ -184,73 +184,79 @@ class: extra-details
## The plan
- We'll work in a new cluster (named `kuberouter`)
- We'll update the control plane's configuration
- We will run a simple control plane (like before)
- the controller manager will allocate `podCIDR` subnets
- ... But this time, the controller manager will allocate `podCIDR` subnets
(so that we don't have to manually assign subnets to individual nodes)
- we will allow privileged containers
- We will create a DaemonSet for kube-router
- We will join nodes to the cluster
- We will restart kubelets in CNI mode
- The DaemonSet will automatically start a kube-router pod on each node
---
## Logging into the new cluster
## Getting the files
.exercise[
- Log into node `kuberouter1`
- Clone the workshop repository:
- If you haven't cloned the training repo yet, do it:
```bash
git clone https://@@GITREPO@@
cd ~
git clone https://container.training
```
- Move to this directory:
- Then move to this directory:
```bash
cd container.training/compose/kube-router-k8s-control-plane
cd ~/container.training/compose/kube-router-k8s-control-plane
```
]
---
## Our control plane
## Changes to the control plane
- We will use a Compose file to start the control plane
- It is similar to the one we used with the `kubenet` cluster
- The API server is started with `--allow-privileged`
- The API server must be started with `--allow-privileged`
(because we will start kube-router in privileged pods)
- The controller manager is started with extra flags too:
- The controller manager must be started with extra flags too:
`--allocate-node-cidrs` and `--cluster-cidr`
- We need to edit the Compose file to set the Cluster CIDR
.exercise[
- Make these changes!
(You might have to restart scheduler and controller manager, too.)
]
.footnote[If your control plane is broken, don't worry!
<br/>We provide a Compose file to catch up.]
---
## Starting the control plane
## Catching up
- Our cluster CIDR will be `10.C.0.0/16`
(where `C` is our cluster number)
- If your control plane is broken, here is how to start a new one
.exercise[
- Edit the Compose file to set the Cluster CIDR:
- Make sure the Docker Engine is running, or start it with:
```bash
vim docker-compose.yaml
dockerd
```
- Edit the Compose file to change the `--cluster-cidr`
- Our cluster CIDR will be `10.C.0.0/16`
<br/>
(where `C` is our cluster number)
- Start the control plane:
```bash
docker-compose up
@@ -278,7 +284,7 @@ class: extra-details
- The address of the API server will be `http://A.B.C.D:8080`
(where `A.B.C.D` is the public address of `kuberouter1`, running the control plane)
(where `A.B.C.D` is the public address of `node1`, running the control plane)
.exercise[
@@ -294,46 +300,9 @@ class: extra-details
]
Note: the DaemonSet won't create any pods (yet) since there are no nodes (yet).
---
## Generating the kubeconfig for kubelet
- This is similar to what we did for the `kubenet` cluster
.exercise[
- Generate the kubeconfig file (replacing `X.X.X.X` with the address of `kuberouter1`):
```bash
kubectl config set-cluster cni --server http://`X.X.X.X`:8080
kubectl config set-context cni --cluster cni
kubectl config use-context cni
cp ~/.kube/config ~/kubeconfig
```
]
---
## Distributing kubeconfig
- We need to copy that kubeconfig file to the other nodes
.exercise[
- Copy `kubeconfig` to the other nodes:
```bash
for N in 2 3; do
scp ~/kubeconfig kuberouter$N:
done
```
]
---
## Starting kubelet
## Restarting kubelets
- We don't need the `--pod-cidr` option anymore
@@ -350,37 +319,59 @@ Note: the DaemonSet won't create any pods (yet) since there are no nodes (yet).
- Open more terminals and join the other nodes:
```bash
ssh kuberouter2 sudo kubelet --kubeconfig ~/kubeconfig --network-plugin=cni
ssh kuberouter3 sudo kubelet --kubeconfig ~/kubeconfig --network-plugin=cni
ssh node2 sudo kubelet --kubeconfig ~/kubeconfig --network-plugin=cni
ssh node3 sudo kubelet --kubeconfig ~/kubeconfig --network-plugin=cni
```
]
---
## Setting up a test
## Check kuberouter pods
- Let's create a Deployment and expose it with a Service
- Make sure that kuberouter pods are running
.exercise[
- Create a Deployment running a web server:
- List pods in the `kube-system` namespace:
```bash
kubectl create deployment web --image=jpetazzo/httpenv
```
- Scale it so that it spans multiple nodes:
```bash
kubectl scale deployment web --replicas=5
```
- Expose it with a Service:
```bash
kubectl expose deployment web --port=8888
kubectl get pods --namespace=kube-system
```
]
If the pods aren't running, it could be:
- privileged containers aren't enabled
<br/>(add `--allow-privileged` flag to the API server)
- missing service account token
<br/>(add `--disable-admission-plugins=ServiceAccount` flag)
---
## Testing
- Let's delete all pods
- They should be re-created with new, correct addresses
.exercise[
- Delete all pods:
```bash
kubectl delete pods --all
```
- Check the new pods:
```bash
kuectl get pods -o wide
```
]
Note: if you provisioned a new control plane, re-create and re-expose the deployment.
---
## Checking that everything works
@@ -403,6 +394,20 @@ Note that if you send multiple requests, they are load-balanced in a round robin
This shows that we are using IPVS (vs. iptables, which picked random endpoints).
Problems? Check next slide!
---
## If it doesn't quite work ...
- If we used kubenet before, we now have a `cbr0` bridge
- This bridge (and its subnet) might conflict with what we're using now
- To see if it's the case, check if you have duplicate routes with `ip ro`
- To fix it, delete the old bridge with `ip link del cbr0`
---
## Troubleshooting
@@ -462,7 +467,7 @@ We should see the local pod CIDR connected to `kube-bridge`, and the other nodes
These commands will give an error message that includes:
```
dial tcp: lookup kuberouterX on 127.0.0.11:53: no such host
dial tcp: lookup nodeX on 127.0.0.11:53: no such host
```
What does that mean?
@@ -475,7 +480,7 @@ What does that mean?
- By default, it creates a connection using the kubelet's name
(e.g. `http://kuberouter1:...`)
(e.g. `http://node1:...`)
- This requires our nodes names to be in DNS

View File

@@ -31,31 +31,12 @@
---
## Our environment
- We will use the machine indicated as `dmuc1`
(this stands for "Dessine Moi Un Cluster" or "Draw Me A Sheep",
<br/>in homage to Saint-Exupery's "The Little Prince")
- This machine:
- runs Ubuntu LTS
- has Kubernetes, Docker, and etcd binaries installed
- but nothing is running
---
## Checking our environment
- Let's make sure we have everything we need first
.exercise[
- Log into the `dmuc1` machine
- Get root:
```bash
sudo -i
@@ -547,7 +528,7 @@ Success!
Our node should show up.
Its name will be its hostname (it should be `dmuc1`).
Its name will be its hostname (it should be `node1`).
---

View File

@@ -4,41 +4,11 @@
- Let's see what it takes to add more nodes
- We are going to use another set of machines: `kubenet`
---
## The environment
## Next steps
- We have 3 identical machines: `kubenet1`, `kubenet2`, `kubenet3`
- The Docker Engine is installed (and running) on these machines
- The Kubernetes packages are installed, but nothing is running
- We will use `kubenet1` to run the control plane
---
## The plan
- Start the control plane on `kubenet1`
- Join the 3 nodes to the cluster
- Deploy and scale a simple web server
.exercise[
- Log into `kubenet1`
]
---
## Running the control plane
- We will use a Compose file to start the control plane components
- We will need some files that are on the tutorial GitHub repo
.exercise[
@@ -47,6 +17,56 @@
git clone https://@@GITREPO@@
```
]
---
## Control plane
- We can use the control plane that we deployed on node1
- If that didn't quite work, don't panic!
- We provide a way to catch up and get a control plane in a pinch
---
## Cleaning up
- Only do this if your control plane doesn't work and want to start over
.exercise[
- Reboot the node to make sure nothing else is running:
```bash
sudo reboot
```
- Log in again:
```bash
ssh docker@`A.B.C.D`
```
- Get root:
```
sudo -i
```
]
---
## Catching up
- We will use a Compose file to start the control plane components
.exercise[
- Start the Docker Engine:
```bash
dockerd
```
- Go to the `compose/simple-k8s-control-plane` directory:
```bash
cd container.training/compose/simple-k8s-control-plane
@@ -84,7 +104,7 @@
class: extra-details
## Differences from `dmuc`
## Differences with the other control plane
- Our new control plane listens on `0.0.0.0` instead of the default `127.0.0.1`
@@ -120,12 +140,9 @@ class: extra-details
.exercise[
- Copy `kubeconfig` to the other nodes:
```bash
for N in 2 3; do
scp ~/kubeconfig kubenet$N:
done
```
- Copy `~/.kube/config` to the other nodes
(Given the size of the file, you can copy-paste it!)
]
@@ -133,28 +150,49 @@ class: extra-details
## Starting kubelet
- Reminder: kubelet needs to run as root; don't forget `sudo`!
*The following assumes that you copied the kubeconfig file to /tmp/kubeconfig.*
.exercise[
- Join the first node:
```bash
sudo kubelet --kubeconfig ~/kubeconfig
```
- Log into node2
- Open more terminals and join the other nodes to the cluster:
- Start the Docker Engine:
```bash
ssh kubenet2 sudo kubelet --kubeconfig ~/kubeconfig
ssh kubenet3 sudo kubelet --kubeconfig ~/kubeconfig
sudo dockerd &
```
- Start kubelet:
```bash
sudo kubelet --kubeconfig /tmp/kubeconfig
```
]
Repeat on more nodes if desired.
---
## If we're running the "old" control plane
- By default, the API server only listens on localhost
- The other nodes will not be able to connect
(symptom: a flood of `node "nodeX" not found` messages)
- We need to add `--address 0.0.0.0` to the API server
(yes, [this will expose our API server to all kinds of shenanigans](https://twitter.com/TabbySable/status/1188901099446554624))
- Restarting API server might cause scheduler and controller manager to quit
(you might have to restart them)
---
## Checking cluster status
- We should now see all 3 nodes
- We should now see all the nodes
- At first, their `STATUS` will be `NotReady`
@@ -179,14 +217,14 @@ class: extra-details
.exercise[
- Create a Deployment running NGINX:
- Create a Deployment running httpenv:
```bash
kubectl create deployment web --image=nginx
kubectl create deployment httpenv --image=jpetazzo/httpenv
```
- Scale it:
```bash
kubectl scale deployment web --replicas=5
kubectl scale deployment httpenv --replicas=5
```
]
@@ -197,7 +235,7 @@ class: extra-details
- The pods will be scheduled on the nodes
- The nodes will pull the `nginx` image, and start the pods
- The nodes will pull the `jpetazzo/httpenv` image, and start the pods
- What are the IP addresses of our pods?
@@ -403,7 +441,7 @@ class: extra-details
- Expose our Deployment:
```bash
kubectl expose deployment web --port=80
kubectl expose deployment httpenv --port=8888
```
]
@@ -416,7 +454,7 @@ class: extra-details
- Retrieve the ClusterIP address:
```bash
kubectl get svc web
kubectl get svc httpenv
```
- Send a few requests to the ClusterIP address (with `curl`)

3
slides/lisa.html Normal file
View File

@@ -0,0 +1,3 @@
<a href="talk.yml.html">Slides for the talk (Monday)</a>
|
<a href="tutorial.yml.html">Slides for the tutorial (Wednesday)</a>

272
slides/lisa/begin.md Normal file
View File

@@ -0,0 +1,272 @@
class: title
@@TITLE@@
.footnote[![QR Code to the slides](images/qrcode-lisa.png)☝🏻 Slides!]
---
## Outline
- Introductions
- Kubernetes anatomy
- Building a 1-node cluster
- Connecting to services
- Adding more nodes
- What's missing
---
class: title
Introductions
---
class: tutorial-only
## Viewer advisory
- Have you attended my talk on Monday?
--
- Then you may experience *déjà-vu* during the next few minutes
(Sorry!)
--
- But I promise we'll soon build (and break) some clusters!
---
## Hi!
- Jérôme Petazzoni ([@jpetazzo](https://twitter.com/jpetazzo))
- 🇫🇷🇺🇸🇩🇪
- 📦🧔🏻
- 🐋(📅📅📅📅📅📅📅)
- 🔥🧠😢💊 ([1], [2], [3])
- 👨🏻‍🏫✨☸️💰
- 😄👍🏻
[1]: http://jpetazzo.github.io/2018/09/06/the-depression-gnomes/
[2]: http://jpetazzo.github.io/2018/02/17/seven-years-at-docker/
[3]: http://jpetazzo.github.io/2017/12/24/productivity-depression-kanban-emoji/
???
I'm French, living in the US, with also a foot in Berlin (Germany).
I'm a container hipster: I was running containers in production,
before it was cool.
I worked 7 years at Docker, which according to Corey Quinn,
is "long enough to be legally declared dead".
I also struggled a few years with depressed and burn-out.
It's not what I'll discuss today, but it's a topic that matters
a lot to me, and I wrote a bit about it, check my blog if you'd like.
After a break, I decided to do something I love:
teaching witchcraft. I deliver Kubernetes training.
As you can see, I love emojis, but if you don't, it's OK.
(There will be much less emojis on the following slides.)
---
## Why this talk?
- One of my goals in 2018: pass the CKA exam
--
- Things I knew:
- kubeadm
- kubectl run, expose, YAML, Helm
- ancient container lore
--
- Things I didn't:
- how Kubernetes *really* works
- deploy Kubernetes The Hard Way
---
## Scope
- Goals:
- learn enough about Kubernetes to ace that exam
- learn enough to teach that stuff
- Non-goals:
- set up a *production* cluster from scratch
- build everything from source
---
## Why are *you* here?
--
- Need/want/must build Kubernetes clusters
--
- Just curious about Kubernetes internals
--
- The Zelda theme
--
- (Other, please specify)
--
class: tutorial-only
.footnote[*Damn. Jérôme is even using the same jokes for his talk and his tutorial!<br/>This guy really has no shame. Tsk.*]
---
class: title
TL,DR
---
class: title
*The easiest way to install Kubernetes
is to get someone else to do it for you.*
(Me, after extensive research.)
???
Which means that if any point, you decide to leave,
I will not take it personally, but assume that you
eventually saw the light, and that you would like to
hire me or some of my colleagues to build your
Kubernetes clusters. It's all good.
---
class: talk-only
## This talk is also available as a tutorial
- Wednesday, October 30, 2019 - 11:00 am12:30 pm
- Salon ABCD
- Same content
- Everyone will get a cluster of VMs
- Everyone will be able to do the stuff that I'll demo today!
---
class: title
The Truth¹ About Kubernetes
.footnote[¹Some of it]
---
## What we want to do
```bash
kubectl run web --image=nginx --replicas=3
```
*or*
```bash
kubectl create deployment web --image=nginx
kubectl scale deployment web --replicas=3
```
*then*
```bash
kubectl expose deployment web --port=80
curl http://...
```
???
Kubernetes might feel like an imperative system,
because we can say "run this; do that."
---
## What really happens
- `kubectl` generates a manifest describing a Deployment
- That manifest is sent to the Kubernetes API server
- The Kubernetes API server validates the manifest
- ... then persists it to etcd
- Some *controllers* wake up and do a bunch of stuff
.footnote[*The amazing diagram on the next slide is courtesy of [Lucas Käldström](https://twitter.com/kubernetesonarm).*]
???
In reality, it is a declarative system.
We write manifests, descriptions of what we want, and Kubernetes tries to make it happen.
---
class: pic
![Diagram showing Kubernetes architecture](images/k8s-arch4-thanks-luxas.png)
???
What we're really doing, is storing a bunch of objects in etcd.
But etcd, unlike a SQL database, doesn't have schemas or types.
So to prevent us from dumping any kind of trash data in etcd,
We have to read/write to it through the API server.
The API server will enforce typing and consistency.
Etcd doesn't have schemas or types, but it has the ability to
watch a key or set of keys, meaning that it's possible to subscribe
to updates of objects.
The controller manager is a process that has a bunch of loops,
each one responsible for a specific type of object.
So there is one that will watch the deployments, and as soon
as we create, updated, delete a deployment, it will wake up
and do something about it.

167
slides/lisa/cni.md Normal file
View File

@@ -0,0 +1,167 @@
class: title
Beyond kubenet
---
## When kubenet is not enough (1/2)
- IP address allocation is rigid
(one subnet per node)
- What about DHCP?
- What about e.g. ENI on AWS?
(allocating Elastic Network Interfaces to containers)
---
## When kubenet is not enough (1/2)
- Containers are connected to a Linux bridge
- What about:
- Open vSwitch
- VXLAN
- skipping layer 2
- using directly a network interface (macvlan, SR-IOV...)
---
## The Container Network Interface
- Allows us to decouple network configuration from Kubernetes
- Implemented by plugins
- Plugins are executables that will be invoked by kubelet
- Plugins are responsible for:
- allocating IP addresses for containers
- configuring the network for containers
- Plugins can be combined and chained when it makes sense
---
## Combining plugins
- Interface could be created by e.g. `vlan` or `bridge` plugin
- IP address could be allocated by e.g. `dhcp` or `host-local` plugin
- Interface parameters (MTU, sysctls) could be tweaked by the `tuning` plugin
The reference plugins are available [here].
Look in each plugin's directory for its documentation.
[here]: https://github.com/containernetworking/plugins/tree/master/plugins
---
## How plugins are invoked
- Parameters are given through environment variables, including:
- CNI_COMMAND: desired operation (ADD, DEL, CHECK, or VERSION)
- CNI_CONTAINERID: container ID
- CNI_NETNS: path to network namespace file
- CNI_IFNAME: what the network interface should be named
- The network configuration must be provided to the plugin on stdin
(this avoids race conditions that could happen by passing a file path)
---
## Setting up CNI
- We are going to use kube-router
- kube-router will provide the "pod network"
(connectivity with pods)
- kube-router will also provide internal service connectivity
(replacing kube-proxy)
- kube-router can also function as a Network Policy Controller
(implementing firewalling between pods)
---
## How kube-router works
- Very simple architecture
- Does not introduce new CNI plugins
(uses the `bridge` plugin, with `host-local` for IPAM)
- Pod traffic is routed between nodes
(no tunnel, no new protocol)
- Internal service connectivity is implemented with IPVS
- kube-router daemon runs on every node
---
## What kube-router does
- Connect to the API server
- Obtain the local node's `podCIDR`
- Inject it into the CNI configuration file
(we'll use `/etc/cni/net.d/10-kuberouter.conflist`)
- Obtain the addresses of all nodes
- Establish a *full mesh* BGP peering with the other nodes
- Exchange routes over BGP
- Add routes to the Linux kernel
---
## What's BGP?
- BGP (Border Gateway Protocol) is the protocol used between internet routers
- It [scales](https://www.cidr-report.org/as2.0/)
pretty [well](https://www.cidr-report.org/cgi-bin/plota?file=%2fvar%2fdata%2fbgp%2fas2.0%2fbgp-active%2etxt&descr=Active%20BGP%20entries%20%28FIB%29&ylabel=Active%20BGP%20entries%20%28FIB%29&with=step)
(it is used to announce the 700k CIDR prefixes of the internet)
- It is spoken by many hardware routers from many vendors
- It also has many software implementations (Quagga, Bird, FRR...)
- Experienced network folks generally know it (and appreciate it)
- It also used by Calico (another popular network system for Kubernetes)
- Using BGP allows us to interconnect our "pod network" with other systems
---
class: pic
![Demo time!](images/demo-with-kht.png)

56
slides/lisa/dmuc.md Normal file
View File

@@ -0,0 +1,56 @@
class: title
Building a 1-node cluster
---
## Requirements
- Linux machine (x86_64)
2 GB RAM, 1 CPU is OK
- Root (for Docker and Kubelet)
- Binaries:
- etcd
- Kubernetes
- Docker
---
## What we will do
- Create a deployment
(with `kubectl create deployment`)
- Look for our pods
- If pods are created: victory
- Else: troubleshoot, try again
.footnote[*Note: the exact commands that I run will be available
in the slides of the tutorial.*]
---
class: pic
![Demo time!](images/demo-with-kht.png)
---
## What have we done?
- Started a basic Kubernetes control plane
(no authentication; many features are missing)
- Deployed a few pods

128
slides/lisa/end.md Normal file
View File

@@ -0,0 +1,128 @@
class: title, talk-only
What's missing?
---
## What's missing?
- Mostly: security
- Notably: RBAC
- Also: availabilty
---
## TLS! TLS everywhere!
- Create certs for the control plane:
- etcd
- API server
- controller manager
- scheduler
- Create individual certs for nodes
- Create the service account key pair
---
## Service accounts
- The controller manager will generate tokens for service accounts
(these tokens are JWT, JSON Web Tokens, signed with a specific key)
- The API server will validate these tokens (with the matching key)
---
## Nodes
- Enable NodeRestriction admission controller
- authorizes kubelet to update their own node and pods data
- Enable Node Authorizer
- prevents kubelets from accessing data that they shouldn't
- only authorize access to e.g. a configmap if a pod is using it
- Bootstrap tokens
- add nodes to the cluster safely+dynamically
---
## Consequences of API server outage
- What happens if the API server goes down?
- kubelet will try to reconnect (as long as necessary)
- our apps will be just fine (but autoscaling will be broken)
- How can we improve the API server availability?
- redundancy (the API server is stateless)
- achieve a low MTTR
---
## Improving API server availability
- Redundancy implies to add one layer
(between API clients and servers)
- Multiple options available:
- external load balancer
- local load balancer (NGINX, HAProxy... on each node)
- DNS Round-Robin
---
## Achieving a low MTTR
- Run the control plane in highly available VMs
(e.g. many hypervisors can do that, with shared or mirrored storage)
- Run the control plane in highly available containers
(e.g. on another Kubernetes cluster)
---
class: title
Thank you!
---
## A word from my sponsor
- If you liked this presentation and would like me to train your team ...
Contact me: jerome.petazzoni@gmail.com
- Thank you! ♥️
- Also, slides👇🏻
![QR code to the slides](images/qrcode-lisa.png)

77
slides/lisa/env.md Normal file
View File

@@ -0,0 +1,77 @@
class: title
Let's get this party started!
---
class: pic
![Oprah's "you get a car" picture](images/you-get-a-cluster.jpg)
---
## Everyone gets their own cluster
- Everyone should have a little printed card
- That card has IP address / login / password for a personal cluster
- That cluster will be up for the duration of the tutorial
(but not much longer, alas, because these cost $$$)
---
## How these clusters are deployed
- Create a bunch of cloud VMs
(today: Ubuntu 18.04 on AWS EC2)
- Install binaries, create user account
(with parallel-ssh because it's *fast*)
- Generate the little cards with a Jinja2 template
- If you want to do it for your own tutorial:
check the [prepare-vms](https://github.com/jpetazzo/container.training/tree/master/prepare-vms) directory in the training repo!
---
## Exercises
- Labs and exercises are clearly identified
.exercise[
- This indicate something that you are invited to do
- First, let's log into the first node of the cluster:
```bash
ssh docker@`A.B.C.D`
```
(Replace A.B.C.D with the IP address of the first node)
]
---
## Slides
- These slides are available online
.exercise[
- Open this slides deck in a local browser:
```open
@@SLIDES@@
```
- Select the tutorial link
- Type the number of that slide + ENTER
]

39
slides/lisa/kubenet.md Normal file
View File

@@ -0,0 +1,39 @@
class: title
Adding more nodes
---
## What do we need to do?
- More machines!
- Can we "just" start kubelet on these machines?
--
- We need to update the kubeconfig file used by kubelet
- It currently uses `localhost:8080` for the API server
- We need to change that!
---
## What we will do
- Get more nodes
- Generate a new kubeconfig file
(pointing to the node running the API server)
- Start more kubelets
- Scale up our deployment
---
class: pic
![Demo time!](images/demo-with-kht.png)

34
slides/lisa/kubeproxy.md Normal file
View File

@@ -0,0 +1,34 @@
class: title
Pod-to-service networking
---
## What we will do
- Create a service to connect to our pods
(with `kubectl expose deployment`)
- Try to connect to the service's ClusterIP
- If it works: victory
- Else: troubleshoot, try again
.footnote[*Note: the exact commands that I run will be available
in the slides of the tutorial.*]
---
class: pic
![Demo time!](images/demo-with-kht.png)
---
## What have we done?
- Started kube-proxy
- ... which created a bunch of iptables rules

View File

@@ -157,7 +157,7 @@ def processchapter(chapter, filename):
return processchapter(chapter.encode("utf-8"), filename)
if isinstance(chapter, str):
if "\n" in chapter:
titles = re.findall("^# (.*)", chapter, re.MULTILINE)
titles = [] # re.findall("^# (.*)", chapter, re.MULTILINE)
slidefooter = ".debug[{}]".format(makelink(filename))
chapter = chapter.replace("\n---\n", "\n{}\n---\n".format(slidefooter))
chapter += "\n" + slidefooter

View File

@@ -1,65 +0,0 @@
title: |
Container Orchestration
with Docker and Swarm
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
exclude:
- self-paced
- snap
- btp-auto
- benchmarking
- elk-manual
- prom-manual
chapters:
- shared/title.md
- logistics.md
- swarm/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- shared/connecting.md
- swarm/versions.md
- shared/sampleapp.md
- shared/composescale.md
- shared/hastyconclusions.md
- shared/composedown.md
- swarm/swarmkit.md
- shared/declarative.md
- swarm/swarmmode.md
- swarm/creatingswarm.md
#- swarm/machine.md
- swarm/morenodes.md
- - swarm/firstservice.md
- swarm/ourapponswarm.md
- swarm/hostingregistry.md
- swarm/testingregistry.md
- swarm/btp-manual.md
- swarm/swarmready.md
- swarm/stacks.md
- swarm/cicd.md
- swarm/updatingservices.md
- swarm/rollingupdates.md
- swarm/healthchecks.md
- - swarm/operatingswarm.md
- swarm/netshoot.md
- swarm/ipsec.md
- swarm/swarmtools.md
- swarm/security.md
- swarm/secrets.md
- swarm/encryptionatrest.md
- swarm/leastprivilege.md
- swarm/apiscope.md
- - swarm/logging.md
- swarm/metrics.md
- swarm/gui.md
- swarm/stateful.md
- swarm/extratips.md
- shared/thankyou.md
- swarm/links.md

View File

@@ -1,64 +0,0 @@
title: |
Container Orchestration
with Docker and Swarm
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
exclude:
- self-paced
- snap
- btp-manual
- benchmarking
- elk-manual
- prom-manual
chapters:
- shared/title.md
- logistics.md
- swarm/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- shared/connecting.md
- swarm/versions.md
- shared/sampleapp.md
- shared/composescale.md
- shared/hastyconclusions.md
- shared/composedown.md
- swarm/swarmkit.md
- shared/declarative.md
- swarm/swarmmode.md
- swarm/creatingswarm.md
#- swarm/machine.md
- swarm/morenodes.md
- - swarm/firstservice.md
- swarm/ourapponswarm.md
#- swarm/hostingregistry.md
#- swarm/testingregistry.md
#- swarm/btp-manual.md
#- swarm/swarmready.md
- swarm/stacks.md
- swarm/cicd.md
- swarm/updatingservices.md
#- swarm/rollingupdates.md
#- swarm/healthchecks.md
- - swarm/operatingswarm.md
#- swarm/netshoot.md
#- swarm/ipsec.md
#- swarm/swarmtools.md
- swarm/security.md
#- swarm/secrets.md
#- swarm/encryptionatrest.md
- swarm/leastprivilege.md
- swarm/apiscope.md
- swarm/logging.md
- swarm/metrics.md
#- swarm/stateful.md
#- swarm/extratips.md
- shared/thankyou.md
- swarm/links.md

View File

@@ -1,73 +0,0 @@
title: |
Container Orchestration
with Docker and Swarm
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
exclude:
- in-person
- btp-auto
chapters:
- shared/title.md
#- shared/logistics.md
- swarm/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- shared/connecting.md
- swarm/versions.md
- |
name: part-1
class: title, self-paced
Part 1
- shared/sampleapp.md
- shared/composescale.md
- shared/hastyconclusions.md
- shared/composedown.md
- swarm/swarmkit.md
- shared/declarative.md
- swarm/swarmmode.md
- swarm/creatingswarm.md
#- swarm/machine.md
- swarm/morenodes.md
- - swarm/firstservice.md
- swarm/ourapponswarm.md
- swarm/hostingregistry.md
- swarm/testingregistry.md
- swarm/btp-manual.md
- swarm/swarmready.md
- swarm/stacks.md
- swarm/cicd.md
- |
name: part-2
class: title, self-paced
Part 2
- - swarm/operatingswarm.md
- swarm/netshoot.md
- swarm/swarmnbt.md
- swarm/ipsec.md
- swarm/updatingservices.md
- swarm/rollingupdates.md
- swarm/healthchecks.md
- swarm/nodeinfo.md
- swarm/swarmtools.md
- - swarm/security.md
- swarm/secrets.md
- swarm/encryptionatrest.md
- swarm/leastprivilege.md
- swarm/apiscope.md
- swarm/logging.md
- swarm/metrics.md
- swarm/stateful.md
- swarm/extratips.md
- shared/thankyou.md
- swarm/links.md

View File

@@ -1,72 +0,0 @@
title: |
Container Orchestration
with Docker and Swarm
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
exclude:
- in-person
- btp-auto
chapters:
- shared/title.md
#- shared/logistics.md
- swarm/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- shared/connecting.md
- swarm/versions.md
- |
name: part-1
class: title, self-paced
Part 1
- shared/sampleapp.md
- shared/composescale.md
- shared/hastyconclusions.md
- shared/composedown.md
- swarm/swarmkit.md
- shared/declarative.md
- swarm/swarmmode.md
- swarm/creatingswarm.md
#- swarm/machine.md
- swarm/morenodes.md
- - swarm/firstservice.md
- swarm/ourapponswarm.md
- swarm/hostingregistry.md
- swarm/testingregistry.md
- swarm/btp-manual.md
- swarm/swarmready.md
- swarm/stacks.md
- |
name: part-2
class: title, self-paced
Part 2
- - swarm/operatingswarm.md
#- swarm/netshoot.md
#- swarm/swarmnbt.md
- swarm/ipsec.md
- swarm/updatingservices.md
- swarm/rollingupdates.md
#- swarm/healthchecks.md
- swarm/nodeinfo.md
- swarm/swarmtools.md
- - swarm/security.md
- swarm/secrets.md
- swarm/encryptionatrest.md
- swarm/leastprivilege.md
- swarm/apiscope.md
#- swarm/logging.md
#- swarm/metrics.md
- swarm/stateful.md
- swarm/extratips.md
- shared/thankyou.md
- swarm/links.md

22
slides/talk.yml Normal file
View File

@@ -0,0 +1,22 @@
title: |
Deep Dive into
Kubernetes Internals
for Builders and Operators
(LISA2019 talk)
chat: ""
gitrepo: ""
slides: ""
exclude:
- tutorial-only
chapters:
- lisa/begin.md
- k8s/deploymentslideshow.md
- lisa/dmuc.md
- lisa/kubeproxy.md
- lisa/kubenet.md
- lisa/cni.md
- lisa/end.md

22
slides/tutorial.yml Normal file
View File

@@ -0,0 +1,22 @@
title: |
Deep Dive into
Kubernetes Internals
for Builders and Operators
(LISA2019 tutorial)
chat: ""
gitrepo: container.training
slides: https://lisa-2019-10.container.training
exclude:
- talk-only
chapters:
- lisa/begin.md
- k8s/deploymentslideshow.md
- lisa/env.md
- k8s/dmuc.md
- k8s/multinode.md
- k8s/cni.md
- lisa/end.md

View File

@@ -109,8 +109,8 @@ div.pic p {
div.pic img {
display: block;
margin: auto;
max-width: 1210px;
max-height: 550px;
max-width: 100%;
max-height: 100%;
}
div.pic h1, div.pic h2, div.title h1, div.title h2 {
text-align: center;

View File

@@ -28,6 +28,8 @@
var slideshow = remark.create({
ratio: '16:9',
highlightSpans: true,
slideNumberFormat: '#LISA19 — @jpetazzo — %current%/%total%',
countIncrementalSlides: false,
excludedClasses: [@@EXCLUDE@@]
});
</script>