Compare commits

..

19 Commits

Author SHA1 Message Date
Jérôme Petazzoni
77606044f6 😈 Demonware advanced Kubernetes custom content 2023-12-07 15:31:04 -06:00
Jérôme Petazzoni
dbfda8b458 🐞 Typo fix 2023-12-06 15:31:09 -06:00
Jérôme Petazzoni
c8fc67c995 📃 Update V's name and social media link 2023-12-04 16:41:03 -06:00
Jérôme Petazzoni
28222db2e4 Add 1-second pre-pssh delay
Seems to help with AT&T fiber router.
(Actually it takes a longer delay to make a difference,
like 10 seconds, but this patch makes the delay configurable.)
2023-12-04 16:38:33 -06:00
Jérôme Petazzoni
a38f930858 📦 Use new k8s package repositories 2023-12-03 21:33:25 -06:00
Jérôme Petazzoni
2cef200726 Add DMUC+RBAC exercises 2023-12-03 15:38:43 -06:00
Jérôme Petazzoni
1f77a52137 📃 Flesh out upgrade information
Add the official policy (which is to drain nodes before upgrading),
and give some explanations about when it may/may not be fine to
upgrade without draining nodes.
2023-11-30 16:45:11 -06:00
Jérôme Petazzoni
b188e0f8a9 🔧 Mention priorityClasses around resource pressure 2023-11-30 16:10:12 -06:00
Jérôme Petazzoni
ac203a128d Add content about disruptions and PDB 2023-11-30 15:36:32 -06:00
Jérôme Petazzoni
a9920e5cf0 🌐 Add IPv6 support in netlify DNS scriptlet 2023-11-30 15:32:03 -06:00
Jérôme Petazzoni
d1047f950d 📃 Update resource limits to add ephemeral-storage 2023-11-29 14:23:24 -06:00
Jérôme Petazzoni
e380509ffe 💈 Tweak CSS for consistent spacing after titles 2023-11-29 14:22:54 -06:00
Jérôme Petazzoni
b5c754211e Mention Validating Admission Policies and CEL 2023-11-24 12:29:44 -06:00
Jérôme Petazzoni
cc57d983b2 🔧 Add Linode portal size for reference 2023-10-30 13:12:20 +01:00
Jérôme Petazzoni
fd86e6079d ✂️ Remove Service Catalog
This doesn't seem to be supported anymore, and looking at
https://github.com/kubernetes-retired/service-catalog/tree/master
it even looks like the whole thing might be deprecated?
2023-10-26 18:20:09 +02:00
Jérôme Petazzoni
08f2e76082 🐞 Fix a couple of typos 2023-10-26 17:53:53 +02:00
Jérôme Petazzoni
db848767c1 Update kubebuilder instructions for new controller semantics 2023-10-26 17:49:26 +02:00
Jérôme Petazzoni
c07f52c493 🔧 Add function to delete CloudFlare DNS records 2023-10-22 09:20:39 +02:00
Jérôme Petazzoni
016c8fc863 🔧 Add GP2 instance size to portal env (for reference) 2023-10-17 10:17:29 +02:00
49 changed files with 1403 additions and 2358 deletions

View File

@@ -0,0 +1,13 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: my-pdb
spec:
#minAvailable: 2
#minAvailable: 90%
maxUnavailable: 1
#maxUnavailable: 10%
selector:
matchLabels:
app: my-app

View File

@@ -10,9 +10,18 @@ fi
. ~/creds/creds.cloudflare.dns
cloudflare() {
case "$1" in
GET|POST|DELETE)
METHOD="$1"
shift
;;
*)
METHOD=""
;;
esac
URI=$1
shift
http https://api.cloudflare.com/client/v4/$URI "$@" "Authorization:Bearer $CLOUDFLARE_TOKEN"
http --ignore-stdin $METHOD https://api.cloudflare.com/client/v4/$URI "$@" "Authorization:Bearer $CLOUDFLARE_TOKEN"
}
_list_zones() {
@@ -32,6 +41,15 @@ _populate_zone() {
done
}
_clear_zone() {
ZONE_ID=$(_get_zone_id $1)
for RECORD_ID in $(
cloudflare zones/$ZONE_ID/dns_records | jq -r .result[].id
); do
cloudflare DELETE zones/$ZONE_ID/dns_records/$RECORD_ID
done
}
_add_zone() {
cloudflare zones "name=$1"
}

View File

@@ -12,7 +12,7 @@
echo "$0 del <recordid>"
echo ""
echo "Example to create a A record for eu.container.training:"
echo "$0 add eu 185.145.250.0"
echo "$0 add eu A 185.145.250.0"
echo ""
exit 1
}
@@ -49,27 +49,29 @@ ZONE_ID=$(netlify dns_zones |
_list() {
netlify dns_zones/$ZONE_ID/dns_records |
jq -r '.[] | select(.type=="A") | [.hostname, .type, .value, .id] | @tsv'
jq -r '.[] | select(.type=="A" or .type=="AAAA") | [.hostname, .type, .value, .id] | @tsv' |
sort |
column --table
}
_add() {
NAME=$1.$DOMAIN
ADDR=$2
TYPE=$2
VALUE=$3
# It looks like if we create two identical records, then delete one of them,
# Netlify DNS ends up in a weird state (the name doesn't resolve anymore even
# though it's still visible through the API and the website?)
if netlify dns_zones/$ZONE_ID/dns_records |
jq '.[] | select(.hostname=="'$NAME'" and .type=="A" and .value=="'$ADDR'")' |
jq '.[] | select(.hostname=="'$NAME'" and .type=="'$TYPE'" and .value=="'$VALUE'")' |
grep .
then
echo "It looks like that record already exists. Refusing to create it."
exit 1
fi
netlify dns_zones/$ZONE_ID/dns_records type=A hostname=$NAME value=$ADDR ttl=300
netlify dns_zones/$ZONE_ID/dns_records type=$TYPE hostname=$NAME value=$VALUE ttl=300
netlify dns_zones/$ZONE_ID/dns_records |
jq '.[] | select(.hostname=="'$NAME'")'
@@ -88,7 +90,7 @@ case "$1" in
_list
;;
add)
_add $2 $3
_add $2 $3 $4
;;
del)
_del $2

View File

@@ -421,18 +421,18 @@ _cmd_kubebins() {
TAG=$1
need_tag
##VERSION##
if [ "$KUBEVERSION" = "" ]; then
KUBEVERSION="$(curl -fsSL https://cdn.dl.k8s.io/release/stable.txt | sed s/^v//)"
fi
##VERSION##
case "$KUBEVERSION" in
1.19.*)
ETCD_VERSION=v3.4.13
CNI_VERSION=v0.8.7
;;
*)
ETCD_VERSION=v3.5.9
ETCD_VERSION=v3.5.10
CNI_VERSION=v1.3.0
;;
esac
@@ -466,24 +466,36 @@ _cmd_kubepkgs() {
TAG=$1
need_tag
if [ "$KUBEVERSION" ]; then
pssh "
sudo tee /etc/apt/preferences.d/kubernetes <<EOF
# Prior September 2023, there was a single Kubernetes package repo that
# contained packages for all versions, so we could just add that repo
# and install whatever was the latest version available there.
# Things have changed (versions after September 2023, e.g. 1.28.3 are
# not in the old repo) and now there is a different repo for each
# minor version, so we need to figure out what minor version we are
# installing to add the corresponding repo.
if [ "$KUBEVERSION" = "" ]; then
KUBEVERSION="$(curl -fsSL https://cdn.dl.k8s.io/release/stable.txt | sed s/^v//)"
fi
KUBEREPOVERSION="$(echo $KUBEVERSION | cut -d. -f1-2)"
# Since the new repo doesn't have older versions, add a safety check here.
MINORVERSION="$(echo $KUBEVERSION | cut -d. -f2)"
if [ "$MINORVERSION" -lt 24 ]; then
die "Cannot install kubepkgs for versions before 1.24."
fi
pssh "
sudo tee /etc/apt/preferences.d/kubernetes <<EOF
Package: kubectl kubeadm kubelet
Pin: version $KUBEVERSION-*
Pin-Priority: 1000
EOF"
fi
# As of February 27th, 2023, packages.cloud.google.com seems broken
# (serves HTTP 500 errors for the GPG key), so let's pre-load that key.
pssh -I "sudo apt-key add -" < lib/kubernetes-apt-key.gpg
# Install packages
pssh --timeout 200 "
#curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg |
#sudo apt-key add - &&
echo deb http://apt.kubernetes.io/ kubernetes-xenial main |
curl -fsSL https://pkgs.k8s.io/core:/stable:/v$KUBEREPOVERSION/deb/Release.key |
gpg --dearmor | sudo tee /etc/apt/keyrings/kubernetes-apt-keyring.gpg &&
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$KUBEREPOVERSION/deb/ /' |
sudo tee /etc/apt/sources.list.d/kubernetes.list"
pssh --timeout 200 "
sudo apt-get update -q &&

View File

@@ -17,6 +17,12 @@ pssh() {
echo "[parallel-ssh] $@"
# There are some routers that really struggle with the number of TCP
# connections that we open when deploying large fleets of clusters.
# We're adding a 1 second delay here, but this can be cranked up if
# necessary - or down to zero, too.
sleep ${PSSH_DELAY_PRE-1}
$(which pssh || which parallel-ssh) -h $HOSTFILE -l ubuntu \
--par ${PSSH_PARALLEL_CONNECTIONS-100} \
--timeout 300 \

View File

@@ -7,7 +7,7 @@ USER_PASSWORD=training
# For a list of old versions, check:
# https://kubernetes.io/releases/patch-releases/#non-active-branch-history
KUBEVERSION=1.22.5
KUBEVERSION=1.24.14
STEPS="
wait

View File

@@ -1,3 +1,6 @@
#export TF_VAR_node_size=GP2.4
#export TF_VAR_node_size=g6-standard-6
CLUSTERSIZE=1
CLUSTERPREFIX=CHANGEME

View File

@@ -1,68 +0,0 @@
title: |
Docker Intensif
chat: "[Mattermost](https://highfive.container.training/mattermost)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2023-09-enix.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- # DAY 1
#- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
#- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- # DAY 2
- containers/Container_Networking_Basics.md
- containers/Local_Development_Workflow.md
- containers/Container_Network_Model.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- # DAY 3
- containers/Start_And_Attach.md
- containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- containers/Dockerfile_Tips.md
- containers/Advanced_Dockerfiles.md
- containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Exercise_Dockerfile_Advanced.md
- # DAY 4
- containers/Buildkit.md
- containers/Network_Drivers.md
- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
- containers/Orchestration_Overview.md
#- containers/Docker_Machine.md
#- containers/Init_Systems.md
#- containers/Application_Configuration.md
#- containers/Logging.md
#- containers/Containers_From_Scratch.md
#- containers/Container_Engines.md
#- containers/Pods_Anatomy.md
#- containers/Ecosystem.md
- shared/thankyou.md
#- containers/links.md

View File

@@ -1,92 +0,0 @@
title: |
Fondamentaux Kubernetes
chat: "[Mattermost](https://highfive.container.training/mattermost)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2023-09-enix.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/prereqs.md
- shared/handson.md
#- shared/webssh.md
- shared/connecting.md
- exercises/k8sfundamentals-brief.md
- exercises/yaml-brief.md
- exercises/localcluster-brief.md
- exercises/healthchecks-brief.md
- shared/toc.md
- # 1
#- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
- k8s/kubectl-run.md
- k8s/kubectlexpose.md
- k8s/service-types.md
- k8s/kubenet.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- exercises/k8sfundamentals-details.md
- k8s/ourapponkube.md
#- k8s/exercise-wordsmith.md
- # 2
- shared/yaml.md
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- k8s/yamldeploy.md
- k8s/namespaces.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/setup-overview.md
- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
- k8s/localkubeconfig.md
- k8s/accessinternal.md
- k8s/kubectlproxy.md
- exercises/yaml-details.md
- exercises/localcluster-details.md
- # 3
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/rollout.md
- k8s/healthchecks.md
#- k8s/healthchecks-more.md
- k8s/dashboard.md
- k8s/k9s.md
- k8s/tilt.md
- exercises/healthchecks-details.md
- # 4
- k8s/ingress.md
#- k8s/ingress-tls.md
#- k8s/ingress-advanced.md
- k8s/volumes.md
#- k8s/exercise-configmap.md
#- k8s/build-with-docker.md
#- k8s/build-with-kaniko.md
- k8s/configuration.md
- k8s/secrets.md
- k8s/batch-jobs.md
- shared/thankyou.md

View File

@@ -1,43 +0,0 @@
title: |
Packaging d'applications
pour Kubernetes
chat: "[Mattermost](https://highfive.container.training/mattermost)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2023-09-enix.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics-julien.md
- k8s/intro.md
- shared/about-slides.md
- k8s/prereqs-advanced.md
- shared/handson.md
- shared/webssh.md
- shared/connecting.md
#- shared/chat-room-im.md
#- shared/chat-room-zoom.md
- shared/toc.md
-
- k8s/demo-apps.md
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
- exercises/helm-generic-chart-details.md
-
- k8s/helm-create-better-chart.md
- k8s/helm-dependencies.md
- k8s/helm-values-schema-validation.md
- k8s/helm-secrets.md
- exercises/helm-umbrella-chart-details.md
-
- k8s/ytt.md
- shared/thankyou.md

View File

@@ -1,70 +0,0 @@
title: |
Kubernetes Avancé
chat: "[Mattermost](https://highfive.container.training/mattermost)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2023-09-enix.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom.md
- k8s/prereqs-advanced.md
- shared/handson.md
- shared/webssh.md
- shared/connecting.md
- shared/toc.md
- exercises/netpol-brief.md
- exercises/sealed-secrets-brief.md
- exercises/kyverno-ingress-domain-name-brief.md
- #1
- k8s/demo-apps.md
- k8s/netpol.md
- k8s/authn-authz.md
- k8s/sealed-secrets.md
- k8s/cert-manager.md
- k8s/cainjector.md
- k8s/ingress-tls.md
- exercises/netpol-details.md
- exercises/sealed-secrets-details.md
- #2
- k8s/extending-api.md
- k8s/crd.md
- k8s/operators.md
- k8s/admission.md
- k8s/cainjector.md
- k8s/kyverno.md
- exercises/kyverno-ingress-domain-name-details.md
- #3
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/horizontal-pod-autoscaler.md
- k8s/apiserver-deepdive.md
- k8s/aggregation-layer.md
- k8s/hpa-v2.md
- #4
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
#- k8s/eck.md
#- k8s/portworx.md
- k8s/openebs.md
- k8s/stateful-failover.md
- k8s/operators-design.md
- k8s/operators-example.md
- k8s/owners-and-dependents.md
- k8s/events.md
- k8s/finalizers.md
- shared/thankyou.md

View File

@@ -1,59 +0,0 @@
title: |
Opérer Kubernetes
chat: "[Mattermost](https://highfive.container.training/mattermost)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2023-09-enix.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics-ludovic.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
# DAY 1
-
- k8s/prereqs-advanced.md
- shared/handson.md
- k8s/architecture.md
- k8s/deploymentslideshow.md
- k8s/dmuc-easy.md
-
- k8s/dmuc-medium.md
- k8s/dmuc-hard.md
- k8s/cni-internals.md
#- k8s/interco.md
- k8s/apilb.md
-
- k8s/internal-apis.md
- k8s/staticpods.md
- k8s/cluster-upgrade.md
- k8s/cluster-backup.md
#- k8s/cloud-controller-manager.md
-
- k8s/control-plane-auth.md
- k8s/user-cert.md
- k8s/csr-api.md
- k8s/openid-connect.md
- k8s/pod-security-intro.md
- k8s/pod-security-policies.md
- k8s/pod-security-admission.md
- shared/thankyou.md
#-
# |
# # (Extra content)
# - k8s/apiserver-deepdive.md
# - k8s/setup-overview.md
# - k8s/setup-devel.md
# - k8s/setup-managed.md
# - k8s/setup-selfhosted.md

View File

@@ -2,6 +2,7 @@
#/ /kube-halfday.yml.html 200!
#/ /kube-fullday.yml.html 200!
#/ /kube-twodays.yml.html 200!
/ /kube.yml.html 200!
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
@@ -23,5 +24,3 @@
# Survey form
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform
/ /highfive.html 200!

View File

@@ -0,0 +1,11 @@
## Exercise — Enable RBAC on our custom cluster
- Enable RBAC on a manually-deployed control plane
- This involves:
- generating different certificates
- distributing the certificates to the controllers
- enabling the proper authorizers in API server

View File

@@ -0,0 +1,117 @@
# Exercise — Enable RBAC on our custom cluster
- We want to enable RBAC on the "polykube" cluster
(it doesn't matter whether we have 1 or multiple nodes)
- Ideally, we want to have, for instance:
- one key, certificate, and kubeconfig for a cluster admin
- one key, certificate, and kubeconfig for a user
<br/>
(with permissions in a single namespace)
- Bonus points: enable the NodeAuthorizer too!
- Check the following slides for hints
---
## Step 1
- Enable RBAC itself!
--
- This is done with an API server command-line flag
--
- Check [the documentation][kube-apiserver-doc] to see the flag
--
- For now, only enable `--authorization-mode=RBAC`
[kube-apiserver-doc]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
---
## Step 2
- Our certificate doesn't work anymore, we need to generate a new one
--
- We need a certificate that will have *some* (ideally *all*) permissions
--
- Two options:
- use the equivalent of "root" (identity that completely skips permission checks)
- a "non-root" identity but which is granted permissions with RBAC
--
- The "non-root" option looks nice, but to grant permissions, we need permissions
- So let's start with the equivalent of "root"!
--
- The Kubernetes equivalent of `root` is the group `system:masters`
---
## Step 2, continued
- We need to generate a certificate for a user belonging to group `system:masters`
--
- In Kubernetes certificates, groups are encoded with the "organization" field
--
- That corresponds to `O=system:masters`
--
- In other words we need to generate a new certificate, but with a subject of:
`/CN=admin/O=system:masters/` (the `CN` doesn't matter)
- That certificate should be able to interact with the API server, like before
---
## Step 3
- Now, all our controllers have permissions issues
- We need to either:
- use that `system:masters` cert everywhere
- generate different certs for every controller, with the proper identities
- Suggestion: use `system-masters` everywhere to begin with
(and make sure the cluster is back on its feet)
---
## Step 4
At this point, there are two possible forks in the road:
1. Generate certs for the control plane controllers
(`kube-controller-manager`, `kube-scheduler`)
2. Generate cert(s) for the node(s) and enable `NodeAuthorizer`
Good luck!

View File

@@ -1,4 +1,4 @@
# Exercise — Sealed Secrets
# Exercise — Sealed Secrets (and more RBAC!)
This is a "combo exercise" to practice the following concepts:

View File

@@ -1,117 +0,0 @@
<?xml version="1.0"?>
<html>
<head>
<style>
td {
background: #ccc;
padding: 1em;
}
</style>
</head>
<body>
<table>
<tr>
<td>Mardi 26 septembre 2023</td>
<td>
<a href="1.yml.html">Docker Intensif</a>
</td>
</tr>
<tr>
<td>Mercredi 27 septembre 2023</td>
<td>
<a href="1.yml.html">Docker Intensif</a>
</td>
</tr>
<tr>
<td>Jeudi 28 septembre 2023</td>
<td>
<a href="1.yml.html">Docker Intensif</a>
</td>
</tr>
<tr>
<td>Vendredi 29 septembre 2023</td>
<td>
<a href="1.yml.html">Docker Intensif</a>
</td>
</tr>
<tr>
<td>Mardi 3 octobre 2023</td>
<td>
<a href="2.yml.html">Fondamentaux Kubernetes</a>
</td>
</tr>
<tr>
<td>Mercredi 4 octobre 2023</td>
<td>
<a href="2.yml.html">Fondamentaux Kubernetes</a>
</td>
</tr>
<tr>
<td>Jeudi 5 octobre 2023</td>
<td>
<a href="2.yml.html">Fondamentaux Kubernetes</a>
</td>
</tr>
<tr>
<td>Vendredi 6 octobre 2023</td>
<td>
<a href="2.yml.html">Fondamentaux Kubernetes</a>
</td>
</tr>
<tr>
<td>Mardi 10 octobre 2023</td>
<td>
<a href="4.yml.html">Kubernetes Avancé</a>
</td>
</tr>
<tr>
<td>Mercredi 11 octobre 2023</td>
<td>
<a href="4.yml.html">Kubernetes Avancé</a>
</td>
</tr>
<tr>
<td>Jeudi 12 octobre 2023</td>
<td>
<a href="4.yml.html">Kubernetes Avancé</a>
</td>
</tr>
<tr>
<td>Vendredi 13 octobre 2023</td>
<td>
<a href="4.yml.html">Kubernetes Avancé</a>
</td>
</tr>
<tr>
<td>Lundi 16 octobre 2023</td>
<td>
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
</td>
</tr>
<tr>
<td>Mardi 17 octobre 2023</td>
<td>
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
</td>
</tr>
<tr>
<td>Mercredi 18 octobre 2023</td>
<td>
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
</td>
</tr>
<tr>
<td>Jeudi 19 octobre 2023</td>
<td>
<a href="5.yml.html">Opérer Kubernetes</a>
</td>
</tr>
<tr>
<td>Vedredi 20 octobre 2023</td>
<td>
<a href="5.yml.html">Opérer Kubernetes</a>
</td>
</tr>
</table>
</body>
</html>

View File

@@ -1,72 +0,0 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
#- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
#- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
#- containers/Start_And_Attach.md
- containers/Naming_And_Inspecting.md
#- containers/Labels.md
- containers/Getting_Inside.md
- containers/Initial_Images.md
-
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
-
- containers/Container_Networking_Basics.md
#- containers/Network_Drivers.md
- containers/Local_Development_Workflow.md
- containers/Container_Network_Model.md
- shared/yaml.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
-
- containers/Multi_Stage_Builds.md
#- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
#- containers/Docker_Machine.md
#- containers/Advanced_Dockerfiles.md
#- containers/Buildkit.md
#- containers/Init_Systems.md
#- containers/Application_Configuration.md
#- containers/Logging.md
#- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
#- containers/Container_Engines.md
#- containers/Pods_Anatomy.md
#- containers/Ecosystem.md
#- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md

View File

@@ -1,73 +0,0 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- in-person
content:
- shared/title.md
# - shared/logistics.md
- containers/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Windows_Containers.md
- containers/Working_With_Volumes.md
- shared/yaml.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Buildkit.md
- containers/Init_Systems.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Pods_Anatomy.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md

View File

@@ -1,81 +0,0 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- # DAY 1
- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Initial_Images.md
-
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
-
- containers/Dockerfile_Tips.md
- containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Exercise_Dockerfile_Advanced.md
-
- containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Start_And_Attach.md
- containers/Getting_Inside.md
- containers/Resource_Limits.md
- # DAY 2
- containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
-
- containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- shared/yaml.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
-
- containers/Installing_Docker.md
- containers/Container_Engines.md
- containers/Init_Systems.md
- containers/Advanced_Dockerfiles.md
- containers/Buildkit.md
-
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Orchestration_Overview.md
-
- shared/thankyou.md
- containers/links.md
#-
#- containers/Docker_Machine.md
#- containers/Ambassadors.md
#- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
#- containers/Pods_Anatomy.md
#- containers/Ecosystem.md

View File

@@ -543,6 +543,23 @@ Shell to the rescue!
(it should only allow values of `red`, `green`, `blue`)
---
## Coming soon...
- Kubernetes Validating Admission Policies
- Integrated with the Kubernetes API server
- Lets us define policies using [CEL (Common Expression Language)][cel-spec]
- Available in beta in Kubernetes 1.28 <!-- ##VERSION## -->
- Check this [CNCF Blog Post][cncf-blog-vap] for more details
[cncf-blog-vap]: https://www.cncf.io/blog/2023/09/14/policy-management-in-kubernetes-is-changing/
[cel-spec]: https://github.com/google/cel-spec
???
:EN:- Dynamic admission control with webhooks

View File

@@ -856,7 +856,7 @@ class: extra-details
- To learn more about Kubernetes attacks and threat models around RBAC:
📽️ [Hacking into Kubernetes Security for Beginners](https://www.youtube.com/watch?v=mLsCm9GVIQg)
by [Ellen Körbes](https://twitter.com/ellenkorbes)
by [V Körbes](https://twitter.com/veekorbes)
and [Tabitha Sable](https://twitter.com/TabbySable)
---

View File

@@ -507,6 +507,86 @@ kubeadm should now agree to upgrade to 1.23.X.
---
## And now, was that a good idea?
--
**Almost!**
--
- The official recommendation is to *drain* a node before performing node maintenance
(migrate all workloads off the node before upgrading it)
- How do we do that?
- Is it really necessary?
- Let's see!
---
## Draining a node
- This can be achieved with the `kubectl drain` command, which will:
- *cordon* the node (prevent new pods from being scheduled there)
- *evict* all the pods running on the node (delete them gracefully)
- the evicted pods will automatically be recreated somewhere else
- evictions might be blocked in some cases (Pod Disruption Budgets, `emptyDir` volumes)
- Once the node is drained, it can safely be upgraded, restarted...
- Once it's ready, it can be put back in commission with `kubectl uncordon`
---
## Is it necessary?
- When upgrading kubelet from one patch-level version to another:
- it's *probably fine*
- When upgrading system packages:
- it's *probably fine*
- except [when it's not][datadog-systemd-outage]
- When upgrading the kernel:
- it's *probably fine*
- ...as long as we can tolerate a restart of the containers on the node
- ...and that they will be unavailable for a few minutes (during the reboot)
[datadog-systemd-outage]: https://www.datadoghq.com/blog/engineering/2023-03-08-deep-dive-into-platform-level-impact/
---
## Is it necessary?
- When upgrading kubelet from one minor version to another:
- it *may or may not be fine*
- in some cases (e.g. migrating from Docker to containerd) it *will not*
- Here's what [the documentation][node-upgrade-docs] says:
*Draining nodes before upgrading kubelet ensures that pods are re-admitted and containers are re-created, which may be necessary to resolve some security issues or other important bugs.*
- Do it at your own risk, and if you do, test extensively in staging environments!
[node-upgrade-docs]: https://kubernetes.io/docs/tasks/administer-cluster/cluster-upgrade/#manual-deployments
---
class: extra-details
## Skipping versions

513
slides/k8s/disruptions.md Normal file
View File

@@ -0,0 +1,513 @@
# Disruptions
In a perfect world...
- hardware never fails
- software never has bugs
- ...and never needs to be updated
- ...and uses a predictable amount of resources
- ...and these resources are infinite anyways
- network latency and packet loss are zero
- humans never make mistakes
--
😬
---
## Disruptions
In the real world...
- hardware will fail randomly (without advance notice)
- software has bugs
- ...and we constantly add new features
- ...and will sometimes use more resources than expected
- ...and these resources are limited
- network latency and packet loss are NOT zero
- humans make mistake (shutting down the wrong machine, the wrong app...)
---
## Disruptions
- In Kubernetes, a "disruption" is something that stops the execution of a Pod
- There are **voluntary** and **unvoluntary** disruptions
- voluntary = directly initiated by humans (including by mistake!)
- unvoluntary = everything else
- In this section, we're going to see what they are and how to prevent them
(or at least, mitigate their effects)
---
## Node outage
- Example: hardware failure (server or network), low-level error
(includes kernel bugs, issues affecting underlying hypervisors or infrastructure...)
- **Unvoluntary** disruption (even if it results from human error!)
- Consequence: all workloads on that node become unresponsive
- Mitigations:
- scale workloads to at least 2 replicas (or more if quorum is needed)
- add anti-affinity scheduling constraints (to avoid having all pods on the same node)
---
## Node outage play-by-play
- Node goes down (or disconnected from network)
- Its lease (in Namespace `kube-node-lease`) doesn't get renewed
- Controller manager detects that and mark the node as "unreachable"
(this adds both a `NoSchedule` and `NoExecute` taints to the node)
- Eventually, the `NoExecute` taint will evict these pods
- This will trigger creation of replacement pods by owner controllers
(except for pods with a stable network identity, e.g. in a Stateful Set!)
---
## Node outage notes
- By default, pods will tolerate the `unreachable:NoExecute` taint for 5 minutes
(toleration automatically added by Admission controller `DefaultTolerationSeconds`)
- Pods of a Stateful Set don't recover automatically:
- as long as the Pod exists, a replacement Pod can't be created
- the Pod will exist as long as its Node exists
- deleting the Node (manually or automatically) will recover the Pod
---
## Memory/disk pressure
- Example: available memory on a node goes below a specific threshold
(because a pod is using too much memory and no limit was set)
- **Unvoluntary** disruption
- Consequence: kubelet starts to *evict* some pods
- Mitigations:
- set *resource limits* on containers to prevent them from using too much resources
- set *resource requests* on containers to make sure they don't get evicted
<br/>
(as long as they use less than what they requested)
- make sure that apps don't use more resources than what they've requested
---
## Memory/disk pressure play-by-play
- Memory leak in an application container, slowly causing very high memory usage
- Overall free memory on the node goes below the *soft* or the *hard* threshold
(default hard threshold = 100Mi; default soft threshold = none)
- When reaching the *soft* threshold:
- kubelet waits until the "eviction soft grace period" expires
- then (if resource usage is still above the threshold) it gracefully evicts pods
- When reaching the *hard* threshold:
- kubelet immediately and forcefully evicts pods
---
## Which pods are evicted?
- Kubelet only considers pods that are using *more* than what they requested
(and only for the resource that is under pressure, e.g. RAM or disk usage)
- First, it sorts pods by *priority¹* (as set with the `priorityClassName` in the pod spec)
- Then, by how much their resource usage exceeds their request
(again, for the resource that is under pressure)
- It evicts pods until enough resources have been freed up
---
## Soft (graceful) vs hard (forceful) eviction
- Soft eviction = graceful shutdown of the pod
(honor's the pod `terminationGracePeriodSeconds` timeout)
- Hard eviction = immediate shutdown of the pod
(kills all containers immediately)
---
## Memory/disk pressure notes
- If resource usage increases *very fast*, kubelet might not catch it fast enough
- For memory: this will trigger the kernel out-of-memory killer
- containers killed by OOM are automatically restarted (no eviction)
- eviction might happen at a later point though (if memory usage stays high)
- For disk: there is no "out-of-disk" killer, but writes will fail
- the `write` system call fails with `errno = ENOSPC` / `No space left on device`
- eviction typically happens shortly after (when kubelet catches up)
- When relying on disk/memory bursts a lot, using `priorityClasses` might help
---
## Memory/disk pressure delays
- By default, no soft threshold is defined
- Defining it requires setting both the threshold and the grace period
- Grace periods can be different for the different types of resources
- When a node is under pressure, kubelet places a `NoSchedule` taint
(to avoid adding more pods while the pod is under pressure)
- Once the node is no longer under pressure, kubelet clears the taint
(after waiting an extra timeout, `evictionPressureTransitionPeriod`, 5 min by default)
---
## Accidental deletion
- Example: developer deletes the wrong Deployment, the wrong Namespace...
- **Voluntary** disruption
(from Kubernetes' perspective!)
- Consequence: application is down
- Mitigations:
- only deploy to production systems through e.g. gitops workflows
- enforce peer review of changes
- only give users limited (e.g. read-only) access to production systems
- use canary deployments (might not catch all mistakes though!)
---
## Bad code deployment
- Example: critical bug introduced, application crashes immediately or is non-functional
- **Voluntary** disruption
(again, from Kubernetes' perspective!)
- Consequence: application is down
- Mitigations:
- readiness probes can mitigate immediate crashes
<br/>
(rolling update continues only when enough pods are ready)
- delayed crashes will require a rollback
<br/>
(manual intervention, or automated by a canary system)
---
## Node shutdown
- Example: scaling down a cluster to save money
- **Voluntary** disruption
- Consequence:
- all workloads running on that node are terminated
- this might disrupt workloads that have too many replicas on that node
- or workloads that should not be interrupted at all
- Mitigations:
- terminate workloads one at a time, coordinating with users
--
🤔
---
## Node shutdown
- Example: scaling down a cluster to save money
- **Voluntary** disruption
- Consequence:
- all workloads running on that node are terminated
- this might disrupt workloads that have too many replicas on that node
- or workloads that should not be interrupted at all
- Mitigations:
- ~~terminate workloads one at a time, coordinating with users~~
- use Pod Disruption Budgets
---
## Pod Disruption Budgets
- A PDB is a kind of *contract* between:
- "admins" = folks maintaining the cluster (e.g. adding/removing/updating nodes)
- "users" = folks deploying apps and workloads on the cluster
- A PDB expresses something like:
*in that particular set of pods, do not "disrupt" more than X at a time*
- Examples:
- in that set of frontend pods, do not disrupt more than 1 at a time
- in that set of worker pods, always have at least 10 ready
<br/>
(do not disrupt them if it would bring down the number of ready pods below 10)
---
## PDB - user side
- Cluster users create a PDB with a manifest like this one:
```yaml
@@INCLUDE[k8s/pod-disruption-budget.yaml]
```
- The PDB must indicate either `minAvailable` or `maxUnavailable`
---
## Rounding logic
- Percentages are rounded **up**
- When specifying `maxUnavailble` as a percentage, this can result in a higher perecentage
(e.g. `maxUnavailable: 50%` with 3 pods can result in 2 pods being unavailable!)
---
## Unmanaged pods
- Specifying `minAvailable: X` works all the time
- Specifying `minAvailable: X%` or `maxUnavaiable` requires *managed pods*
(pods that belong to a controller, e.g. Replica Set, Stateful Set...)
- This is because the PDB controller needs to know the total number of pods
(given by the `replicas` field, not merely by counting pod objects)
- The PDB controller will try to resolve the controller using the pod selector
- If that fails, the PDB controller will emit warning events
(visible with `kubectl describe pdb ...`)
---
## Zero
- `maxUnavailable: 0` means "do not disrupt my pods"
- Same thing if `minAvailable` is greater than or equal to the number of pods
- In that case, cluster admins are supposed to get in touch with cluster users
- This will prevent fully automated operation
(and some cluster admins automated systems might not honor that request)
---
## PDB - admin side
- As a cluster admin, we need to follow certain rules
- Only shut down (or restart) a node when no pods are running on that node
(except system pods belonging to Daemon Sets)
- To remove pods running on a node, we should use the *eviction API*
(which will check PDB constraints and honor them)
- To prevent new pods from being scheduled on a node, we can use a *taint*
- These operations are streamlined by `kubectl drain`, which will:
- *cordon* the node (add a `NoSchedule` taint)
- invoke the *eviction API* to remove pods while respecting their PDBs
---
## Theory vs practice
- `kubectl drain` won't evict pods using `emptyDir` volumes
(unless the `--delete-emptydir-data` flag is passed as well)
- Make sure that `emptyDir` volumes don't hold anything important
(they shouldn't, but... who knows!)
- Kubernetes lacks a standard way for users to express:
*this `emptyDir` volume can/cannot be safely deleted*
- If a PDB forbids an eviction, this requires manual coordination
---
class: extra-details
## Unhealthy pod eviction policy
- By default, unhealthy pods can only be evicted if PDB allows it
(unhealthy = running, but not ready)
- In many cases, unhealthy pods aren't healthy anyway, and can be removed
- This behavior is enabled by setting the appropriate field in the PDB manifest:
```yaml
spec:
unhealthyPodEvictionPolicy: AlwaysAllow
```
---
## Node upgrade
- Example: upgrading kubelet or the Linux kernel on a node
- **Voluntary** disruption
- Consequence:
- all workloads running on that node are temporarily interrupted, and restarted
- this might disrupt these workloads
- Mitigations:
- migrate workloads off the done first (as if we were shutting it down)
---
## Node upgrade notes
- Is it necessary to drain a node before doing an upgrade?
- From [the documentation][node-upgrade-docs]:
*Draining nodes before upgrading kubelet ensures that pods are re-admitted and containers are re-created, which may be necessary to resolve some security issues or other important bugs.*
- It's *probably* safe to upgrade in-place for:
- kernel upgrades
- kubelet patch-level upgrades (1.X.Y → 1.X.Z)
- It's *probably* better to drain the node for minor revisions kubelet upgrades (1.X → 1.Y)
- In doubt, test extensively in staging environments!
[node-upgrade-docs]: https://kubernetes.io/docs/tasks/administer-cluster/cluster-upgrade/#manual-deployments
---
## Manual rescheduling
- Example: moving workloads around to accommodate noisy neighbors or other issues
(e.g. pod X is doing a lot of disk I/O and this is starving other pods)
- **Voluntary** disruption
- Consequence:
- the moved workloads are temporarily interrupted
- Mitigations:
- define an appropriate number of replicas, declare PDBs
- use the [eviction API][eviction-API] to move workloads
[eviction-API]: https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/
???
:EN:- Voluntary and unvoluntary disruptions
:EN:- Pod Disruption Budgets
:FR:- "Disruptions" volontaires et involontaires
:FR:- Pod Disruption Budgets

View File

@@ -462,7 +462,7 @@ The "context" section references the "cluster" and "credentials" that we defined
---
## Review the kubeconfig filfe
## Review the kubeconfig file
The kubeconfig file should look like this:

View File

@@ -339,34 +339,12 @@ class: extra-details
---
## Service catalog
- *Service catalog* is another extension mechanism
- It's not extending the Kubernetes API strictly speaking
(but it still provides new features!)
- It doesn't create new types; it uses:
- ClusterServiceBroker
- ClusterServiceClass
- ClusterServicePlan
- ServiceInstance
- ServiceBinding
- It uses the Open service broker API
---
## Documentation
- [Custom Resource Definitions: when to use them](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
- [Custom Resources Definitions: how to use them](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/)
- [Service Catalog](https://kubernetes.io/docs/concepts/extend-kubernetes/service-catalog/)
- [Built-in Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/)
- [Dynamic Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/)

314
slides/k8s/hpa-v2-keda.md Normal file
View File

@@ -0,0 +1,314 @@
# Scaling with custom metrics
- The HorizontalPodAutoscaler v1 can only scale on Pod CPU usage
- Sometimes, we need to scale using other metrics:
- memory
- requests per second
- latency
- active sessions
- items in a work queue
- ...
- The HorizontalPodAutoscaler v2 can do it!
---
## Requirements
⚠️ Autoscaling on custom metrics is fairly complex!
- We need some metrics system
(Prometheus is a popular option, but others are possible too)
- We need our metrics (latency, traffic...) to be fed in the system
(with Prometheus, this might require a custom exporter)
- We need to expose these metrics to Kubernetes
(Kubernetes doesn't "speak" the Prometheus API)
- Then we can set up autoscaling!
---
## The plan
- We will deploy the DockerCoins demo app
(one of its components has a bottleneck; its latency will increase under load)
- We will use Prometheus to collect and store metrics
- We will deploy a tiny HTTP latency monitor (a Prometheus *exporter*)
- We will then use KEDA with a "Prometheus Scaler"
---
## Deploying DockerCoins
- That's the easy part!
.lab[
- Create a new namespace and switch to it:
```bash
kubectl create namespace customscaling
kns customscaling
```
- Deploy DockerCoins, and scale up the `worker` Deployment:
```bash
kubectl apply -f ~/container.training/k8s/dockercoins.yaml
kubectl scale deployment worker --replicas=10
```
]
---
## Current state of affairs
- The `rng` service is a bottleneck
(it cannot handle more than 10 requests/second)
- With enough traffic, its latency increases
(by about 100ms per `worker` Pod after the 3rd worker)
.lab[
- Check the `webui` port and open it in your browser:
```bash
kubectl get service webui
```
- Check the `rng` ClusterIP and test it with e.g. `httping`:
```bash
kubectl get service rng
```
]
---
## Measuring latency
- We will use a tiny custom Prometheus exporter, [httplat](https://github.com/jpetazzo/httplat)
- `httplat` exposes Prometheus metrics on port 9080 (by default)
- It monitors exactly one URL, that must be passed as a command-line argument
.lab[
- Deploy `httplat`:
```bash
kubectl create deployment httplat --image=jpetazzo/httplat -- httplat http://rng/
```
- Expose it:
```bash
kubectl expose deployment httplat --port=9080
```
]
---
class: extra-details
## Measuring latency in the real world
- We are using this tiny custom exporter for simplicity
- A more common method to collect latency is to use a service mesh
- A service mesh can usually collect latency for *all* services automatically
---
## Install Prometheus
- We will use the Prometheus community Helm chart
(because we can configure it dynamically with annotations)
.lab[
- If it's not installed yet on the cluster, install Prometheus:
```bash
helm upgrade --install prometheus prometheus \
--repo https://prometheus-community.github.io/helm-charts \
--namespace prometheus --create-namespace \
--set server.service.type=NodePort \
--set server.service.nodePort=30090 \
--set server.persistentVolume.enabled=false \
--set alertmanager.enabled=false
```
]
---
## Configure Prometheus
- We can use annotations to tell Prometheus to collect the metrics
.lab[
- Tell Prometheus to "scrape" our latency exporter:
```bash
kubectl annotate service httplat \
prometheus.io/scrape=true \
prometheus.io/port=9080 \
prometheus.io/path=/metrics
```
]
If you deployed Prometheus differently, you might have to configure it manually.
You'll need to instruct it to scrape http://httplat.customscaling.svc:9080/metrics.
---
## Make sure that metrics get collected
- Before moving on, confirm that Prometheus has our metrics
.lab[
- Connect to Prometheus
(if you installed it like instructed above, it is exposed as a NodePort on port 30090)
- Check that `httplat` metrics are available
- You can try to graph the following PromQL expression:
```
rate(httplat_latency_seconds_sum[2m])/rate(httplat_latency_seconds_count[2m])
```
]
---
## Troubleshooting
- Make sure that the exporter works:
- get the ClusterIP of the exporter with `kubectl get svc httplat`
- `curl http://<ClusterIP>:9080/metrics`
- check that the result includes the `httplat` histogram
- Make sure that Prometheus is scraping the exporter:
- go to `Status` / `Targets` in Prometheus
- make sure that `httplat` shows up in there
---
## Installing KEDA
- Multiple possibilities, as explained in the [documentation](https://keda.sh/docs/2.12/deploy/)
- For simplicity we can use the YAML version with admission webhooks
---
## Creating a "Scaler"
- With KEDA, instead of creating an HPA policy directly, we create a "Scaled Object"
- The "Scaled Object" will take care of:
- registering and exposing our custom metric in KEDA'a aggregation layer
- creating the HPA policy that consumes that metric
- See the [Prometheus Scaler documentation](https://keda.sh/docs/2.12/scalers/prometheus/)
---
## Witness the marvel of custom autoscaling
(Sort of)
- After a short while, the `rng` Deployment will scale up
- It should scale up until the latency drops below 100ms
(and continue to scale up a little bit more after that)
- Then, since the latency will be well below 100ms, it will scale down
- ... and back up again, etc.
(See pictures on next slides!)
---
class: pic
![Latency over time](images/hpa-v2-pa-latency.png)
---
class: pic
![Number of pods over time](images/hpa-v2-pa-pods.png)
---
## What's going on?
- The autoscaler's information is slightly out of date
(not by much; probably between 1 and 2 minute)
- It's enough to cause the oscillations to happen
- One possible fix is to tell the autoscaler to wait a bit after each action
- It will reduce oscillations, but will also slow down its reaction time
(and therefore, how fast it reacts to a peak of traffic)
---
## What's going on? Take 2
- As soon as the measured latency is *significantly* below our target (100ms) ...
the autoscaler tries to scale down
- If the latency is measured at 20ms ...
the autoscaler will try to *divide the number of pods by five!*
- One possible solution: apply a formula to the measured latency,
so that values between e.g. 10 and 100ms get very close to 100ms.
- Another solution: instead of targetting for a specific latency,
target a 95th percentile latency or something similar, using
a more advanced PromQL expression (and leveraging the fact that
we have histograms instead of raw values).
???
:EN:- Autoscaling with custom metrics
:FR:- Suivi de charge avancé (HPAv2)

View File

@@ -109,7 +109,7 @@ class: extra-details
- Install Go
(on our VMs: `sudo snap install go --classic`)
(on our VMs: `sudo snap install go --classic` or `sudo apk add go`)
- Install kubebuilder
@@ -250,7 +250,7 @@ spec:
## Loading an object
Open `controllers/machine_controller.go`.
Open `internal/controllers/machine_controller.go`.
Add that code in the `Reconcile` method, at the `TODO(user)` location:
@@ -505,7 +505,7 @@ if machine.Spec.SwitchPosition != "down" {
changeAt := machine.Status.SeenAt.Time.Add(5 * time.Second)
if now.Time.After(changeAt) {
machine.Spec.SwitchPosition = "down"
machine.Status.SeenAt = nil
machine.Status.SeenAt = nil
if err := r.Update(ctx, &machine); err != nil {
logger.Info("error updating switch position")
return ctrl.Result{}, client.IgnoreNotFound(err)
@@ -629,17 +629,17 @@ Note: this time, only create a new custom resource; not a new controller.
- We can retrieve associated switches like this:
```go
var switches uselessv1alpha1.SwitchList
var switches uselessv1alpha1.SwitchList
if err := r.List(ctx, &switches,
client.InNamespace(req.Namespace),
client.MatchingLabels{"machine": req.Name},
); err != nil {
logger.Error(err, "unable to list switches of the machine")
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if err := r.List(ctx, &switches,
client.InNamespace(req.Namespace),
client.MatchingLabels{"machine": req.Name},
); err != nil {
logger.Error(err, "unable to list switches of the machine")
return ctrl.Result{}, client.IgnoreNotFound(err)
}
logger.Info("Found switches", "switches", switches)
logger.Info("Found switches", "switches", switches)
```
---
@@ -649,13 +649,13 @@ Note: this time, only create a new custom resource; not a new controller.
- Each time we reconcile a Machine, let's update its status:
```go
status := ""
for _, sw := range switches.Items {
status += string(sw.Spec.Position[0])
}
machine.Status.Positions = status
if err := r.Status().Update(ctx, &machine); err != nil {
...
status := ""
for _, sw := range switches.Items {
status += string(sw.Spec.Position[0])
}
machine.Status.Positions = status
if err := r.Status().Update(ctx, &machine); err != nil {
...
```
- Run the controller and check that POSITIONS gets updated
@@ -721,7 +721,7 @@ if err := r.Create(ctx, &sw); err != nil { ...
Define the following helper function:
```go
func (r *MachineReconciler) machineOfSwitch(obj client.Object) []ctrl.Request {
func (r *MachineReconciler) machineOfSwitch(ctx context.Context, obj client.Object) []ctrl.Request {
return []ctrl.Request{
ctrl.Request{
NamespacedName: types.NamespacedName{
@@ -746,7 +746,7 @@ func (r *MachineReconciler) SetupWithManager(mgr ctrl.Manager) error {
For(&uselessv1alpha1.Machine{}).
Owns(&uselessv1alpha1.Switch{}).
Watches(
&source.Kind{Type: &uselessv1alpha1.Switch{}},
&uselessv1alpha1.Switch{},
handler.EnqueueRequestsFromMapFunc(r.machineOfSwitch),
).
Complete(r)

View File

@@ -228,7 +228,7 @@ General workflow:
3. `kustomize edit add patch` to add patches to said resources
4. `kustomized edit add ...` or `kustomize edit set ...` (many options!)
4. `kustomize edit add ...` or `kustomize edit set ...` (many options!)
5. `kustomize build | kubectl apply -f-` or `kubectl apply -k .`
@@ -244,7 +244,7 @@ General workflow:
(just add `--help` after any command to see possible options!)
- Make sure to install the completion and try e.g. `kustomize eidt add [TAB][TAB]`
- Make sure to install the completion and try e.g. `kustomize edit add [TAB][TAB]`
---

View File

@@ -1,4 +1,4 @@
## Pre-requirements
# Pre-requirements
- Kubernetes concepts

View File

@@ -6,11 +6,53 @@
- We can specify *limits* and/or *requests*
- We can specify quantities of CPU and/or memory
- We can specify quantities of CPU and/or memory and/or ephemeral storage
---
## CPU vs memory
## Requests vs limits
- *Requests* are *guaranteed reservations* of resources
- They are used for scheduling purposes
- Kubelet will use cgroups to e.g. guarantee a minimum amount of CPU time
- A container **can** use more than its requested resources
- A container using *less* than what it requested should never be killed or throttled
- A node **cannot** be overcommitted with requests
(the sum of all requests **cannot** be higher than resources available on the node)
- A small amount of resources is set aside for system components
(this explains why there is a difference between "capacity" and "allocatable")
---
## Requests vs limits
- *Limits* are "hard limits" (a container **cannot** exceed its limits)
- They aren't taken into account by the scheduler
- A container exceeding its memory limit is killed instantly
(by the kernel out-of-memory killer)
- A container exceeding its CPU limit is throttled
- A container exceeding its disk limit is killed
(usually with a small delay, since this is checked periodically by kubelet)
- On a given node, the sum of all limits **can** be higher than the node size
---
## Compressible vs incompressible resources
- CPU is a *compressible resource*
@@ -24,7 +66,29 @@
- if we have N GB RAM and need 2N, we might run at... 0.1% speed!
- As a result, exceeding limits will have different consequences for CPU and memory
- Disk is also an *incompressible resource*
- when the disk is full, writes will fail
- applications may or may not crash but persistent apps will be in trouble
---
## Running low on CPU
- Two ways for a container to "run low" on CPU:
- it's hitting its CPU limit
- all CPUs on the node are at 100% utilization
- The app in the container will run slower
(compared to running without a limit, or if CPU cycles were available)
- No other consequence
(but this could affect SLA/SLO for latency-sensitive applications!)
---
@@ -136,9 +200,7 @@ For more details, check [this blog post](https://erickhun.com/posts/kubernetes-f
## Running low on memory
- When the system runs low on memory, it starts to reclaim used memory
(we talk about "memory pressure")
- When the kernel runs low on memory, it starts to reclaim used memory
- Option 1: free up some buffers and caches
@@ -162,71 +224,91 @@ For more details, check [this blog post](https://erickhun.com/posts/kubernetes-f
- If a container exceeds its memory *limit*, it gets killed immediately
- If a node is overcommitted and under memory pressure, it will terminate some pods
- If a node memory usage gets too high, it will *evict* some pods
(see next slide for some details about what "overcommit" means here!)
(we say that the node is "under pressure", more on that in a bit!)
[KEP 2400]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2400-node-swap/README.md#implementation-history
---
## Overcommitting resources
## Running low on disk
- *Limits* are "hard limits" (a container *cannot* exceed its limits)
- When the kubelet runs low on disk, it starts to reclaim disk space
- a container exceeding its memory limit is killed
(similarly to what the kernel does, but in different categories)
- a container exceeding its CPU limit is throttled
- Option 1: garbage collect dead pods and containers
- On a given node, the sum of pod *limits* can be higher than the node size
(no consequence, but their logs will be deleted)
- *Requests* are used for scheduling purposes
- Option 2: remove unused images
- a container can use more than its requested CPU or RAM amounts
(no consequence, but these images will have to be repulled if we need them later)
- a container using *less* than what it requested should never be killed or throttled
- Option 3: evict pods and remove them to reclaim their disk usage
- On a given node, the sum of pod *requests* cannot be higher than the node size
- Note: this only applies to *ephemeral storage*, not to e.g. Persistent Volumes!
---
## Pod quality of service
## Ephemeral storage?
Each pod is assigned a QoS class (visible in `status.qosClass`).
- This includes:
- If limits = requests:
- the *read-write layer* of the container
<br/>
(any file creation/modification outside of its volumes)
- as long as the container uses less than the limit, it won't be affected
- `emptyDir` volumes mounted in the container
- if all containers in a pod have *(limits=requests)*, QoS is considered "Guaranteed"
- the container logs stored on the node
- If requests &lt; limits:
- This does not include:
- as long as the container uses less than the request, it won't be affected
- the container image
- otherwise, it might be killed/evicted if the node gets overloaded
- if at least one container has *(requests&lt;limits)*, QoS is considered "Burstable"
- If a pod doesn't have any request nor limit, QoS is considered "BestEffort"
- other types of volumes (e.g. Persistent Volumes, `hostPath`, or `local` volumes)
---
## Quality of service impact
class: extra-details
- When a node is overloaded, BestEffort pods are killed first
## Disk limit enforcement
- Then, Burstable pods that exceed their requests
- Disk usage is periodically measured by kubelet
- Burstable and Guaranteed pods below their requests are never killed
(with something equivalent to `du`)
(except if their node fails)
- There can be a small delay before pod termination when disk limit is exceeded
- If we only use Guaranteed pods, no pod should ever be killed
- It's also possible to enable filesystem *project quotas*
(as long as they stay within their limits)
(e.g. with EXT4 or XFS)
(Pod QoS is also explained in [this page](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/) of the Kubernetes documentation and in [this blog post](https://medium.com/google-cloud/quality-of-service-class-qos-in-kubernetes-bb76a89eb2c6).)
- Remember that container logs are also accounted for!
(container log rotation/retention is managed by kubelet)
---
class: extra-details
## `nodefs` and `imagefs`
- `nodefs` is the main filesystem of the node
(holding, notably, `emptyDir` volumes and container logs)
- Optionally, the container engine can be configured to use an `imagefs`
- `imagefs` will store container images and container writable layers
- When there is a separate `imagefs`, its disk usage is tracked independently
- If `imagefs` usage gets too high, kubelet will remove old images first
(conversely, if `nodefs` usage gets too high, kubelet won't remove old images)
---
@@ -304,6 +386,46 @@ class: extra-details
---
## Pod quality of service
Each pod is assigned a QoS class (visible in `status.qosClass`).
- If limits = requests:
- as long as the container uses less than the limit, it won't be affected
- if all containers in a pod have *(limits=requests)*, QoS is considered "Guaranteed"
- If requests &lt; limits:
- as long as the container uses less than the request, it won't be affected
- otherwise, it might be killed/evicted if the node gets overloaded
- if at least one container has *(requests&lt;limits)*, QoS is considered "Burstable"
- If a pod doesn't have any request nor limit, QoS is considered "BestEffort"
---
## Quality of service impact
- When a node is overloaded, BestEffort pods are killed first
- Then, Burstable pods that exceed their requests
- Burstable and Guaranteed pods below their requests are never killed
(except if their node fails)
- If we only use Guaranteed pods, no pod should ever be killed
(as long as they stay within their limits)
(Pod QoS is also explained in [this page](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/) of the Kubernetes documentation and in [this blog post](https://medium.com/google-cloud/quality-of-service-class-qos-in-kubernetes-bb76a89eb2c6).)
---
## Specifying resources
- Resource requests are expressed at the *container* level
@@ -316,9 +438,9 @@ class: extra-details
(so 100m = 0.1)
- Memory is expressed in bytes
- Memory and ephemeral disk storage are expressed in bytes
- Memory can be expressed with k, M, G, T, ki, Mi, Gi, Ti suffixes
- These can have k, M, G, T, ki, Mi, Gi, Ti suffixes
(corresponding to 10^3, 10^6, 10^9, 10^12, 2^10, 2^20, 2^30, 2^40)
@@ -334,11 +456,13 @@ containers:
image: jpetazzo/color
resources:
limits:
memory: "100Mi"
cpu: "100m"
requests:
ephemeral-storage: 10M
memory: "100Mi"
requests:
cpu: "10m"
ephemeral-storage: 10M
memory: "100Mi"
```
This set of resources makes sure that this service won't be killed (as long as it stays below 100 MB of RAM), but allows its CPU usage to be throttled if necessary.
@@ -365,7 +489,7 @@ This set of resources makes sure that this service won't be killed (as long as i
---
## We need default resource values
## We need to specify resource values
- If we do not set resource values at all:
@@ -379,196 +503,33 @@ This set of resources makes sure that this service won't be killed (as long as i
- if the request is zero, the scheduler can't make a smart placement decision
- To address this, we can set default values for resources
- This is done with a LimitRange object
- This is fine when learning/testing, absolutely not in production!
---
# Defining min, max, and default resources
## How should we set resources?
- We can create LimitRange objects to indicate any combination of:
- Option 1: manually, for each container
- min and/or max resources allowed per pod
- simple, effective, but tedious
- default resource *limits*
- Option 2: automatically, with the [Vertical Pod Autoscaler (VPA)][vpa]
- default resource *requests*
- relatively simple, very minimal involvement beyond initial setup
- maximal burst ratio (*limit/request*)
- not compatible with HPAv1, can disrupt long-running workloads (see [limitations][vpa-limitations])
- LimitRange objects are namespaced
- Option 3: semi-automatically, with tools like [Robusta KRR][robusta]
- They apply to their namespace only
- good compromise between manual work and automation
---
- Option 4: by creating LimitRanges in our Namespaces
## LimitRange example
- relatively simple, but "one-size-fits-all" approach might not always work
```yaml
apiVersion: v1
kind: LimitRange
metadata:
name: my-very-detailed-limitrange
spec:
limits:
- type: Container
min:
cpu: "100m"
max:
cpu: "2000m"
memory: "1Gi"
default:
cpu: "500m"
memory: "250Mi"
defaultRequest:
cpu: "500m"
```
---
## Example explanation
The YAML on the previous slide shows an example LimitRange object specifying very detailed limits on CPU usage,
and providing defaults on RAM usage.
Note the `type: Container` line: in the future,
it might also be possible to specify limits
per Pod, but it's not [officially documented yet](https://github.com/kubernetes/website/issues/9585).
---
## LimitRange details
- LimitRange restrictions are enforced only when a Pod is created
(they don't apply retroactively)
- They don't prevent creation of e.g. an invalid Deployment or DaemonSet
(but the pods will not be created as long as the LimitRange is in effect)
- If there are multiple LimitRange restrictions, they all apply together
(which means that it's possible to specify conflicting LimitRanges,
<br/>preventing any Pod from being created)
- If a LimitRange specifies a `max` for a resource but no `default`,
<br/>that `max` value becomes the `default` limit too
---
# Namespace quotas
- We can also set quotas per namespace
- Quotas apply to the total usage in a namespace
(e.g. total CPU limits of all pods in a given namespace)
- Quotas can apply to resource limits and/or requests
(like the CPU and memory limits that we saw earlier)
- Quotas can also apply to other resources:
- "extended" resources (like GPUs)
- storage size
- number of objects (number of pods, services...)
---
## Creating a quota for a namespace
- Quotas are enforced by creating a ResourceQuota object
- ResourceQuota objects are namespaced, and apply to their namespace only
- We can have multiple ResourceQuota objects in the same namespace
- The most restrictive values are used
---
## Limiting total CPU/memory usage
- The following YAML specifies an upper bound for *limits* and *requests*:
```yaml
apiVersion: v1
kind: ResourceQuota
metadata:
name: a-little-bit-of-compute
spec:
hard:
requests.cpu: "10"
requests.memory: 10Gi
limits.cpu: "20"
limits.memory: 20Gi
```
These quotas will apply to the namespace where the ResourceQuota is created.
---
## Limiting number of objects
- The following YAML specifies how many objects of specific types can be created:
```yaml
apiVersion: v1
kind: ResourceQuota
metadata:
name: quota-for-objects
spec:
hard:
pods: 100
services: 10
secrets: 10
configmaps: 10
persistentvolumeclaims: 20
services.nodeports: 0
services.loadbalancers: 0
count/roles.rbac.authorization.k8s.io: 10
```
(The `count/` syntax allows limiting arbitrary objects, including CRDs.)
---
## YAML vs CLI
- Quotas can be created with a YAML definition
- ...Or with the `kubectl create quota` command
- Example:
```bash
kubectl create quota my-resource-quota --hard=pods=300,limits.memory=300Gi
```
- With both YAML and CLI form, the values are always under the `hard` section
(there is no `soft` quota)
---
## Viewing current usage
When a ResourceQuota is created, we can see how much of it is used:
```
kubectl describe resourcequota my-resource-quota
Name: my-resource-quota
Namespace: default
Resource Used Hard
-------- ---- ----
pods 12 100
services 1 5
services.loadbalancers 0 0
services.nodeports 0 0
```
[robusta]: https://github.com/robusta-dev/krr
[vpa]: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
[vpa-limitations]: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler#known-limitations
---
@@ -606,130 +567,6 @@ services.nodeports 0 0
---
class: extra-details
## PriorityClass and ResourceQuotas
- A ResourceQuota can include a list of *scopes* or a *scope selector*
- In that case, the quota will only apply to the scoped resources
- Example: limit the resources allocated to "high priority" Pods
- In that case, make sure that the quota is created in every Namespace
(or use *admission configuration* to enforce it)
- See the [resource quotas documentation][quotadocs] for details
[quotadocs]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#resource-quota-per-priorityclass
---
# Limiting resources in practice
- We have at least three mechanisms:
- requests and limits per Pod
- LimitRange per namespace
- ResourceQuota per namespace
- Let's see a simple recommendation to get started with resource limits
---
## Set a LimitRange
- In each namespace, create a LimitRange object
- Set a small default CPU request and CPU limit
(e.g. "100m")
- Set a default memory request and limit depending on your most common workload
- for Java, Ruby: start with "1G"
- for Go, Python, PHP, Node: start with "250M"
- Set upper bounds slightly below your expected node size
(80-90% of your node size, with at least a 500M memory buffer)
---
## Set a ResourceQuota
- In each namespace, create a ResourceQuota object
- Set generous CPU and memory limits
(e.g. half the cluster size if the cluster hosts multiple apps)
- Set generous objects limits
- these limits should not be here to constrain your users
- they should catch a runaway process creating many resources
- example: a custom controller creating many pods
---
## Observe, refine, iterate
- Observe the resource usage of your pods
(we will see how in the next chapter)
- Adjust individual pod limits
- If you see trends: adjust the LimitRange
(rather than adjusting every individual set of pod limits)
- Observe the resource usage of your namespaces
(with `kubectl describe resourcequota ...`)
- Rinse and repeat regularly
---
## Underutilization
- Remember: when assigning a pod to a node, the scheduler looks at *requests*
(not at current utilization on the node)
- If pods request resources but don't use them, this can lead to underutilization
(because the scheduler will consider that the node is full and can't fit new pods)
---
## Viewing a namespace limits and quotas
- `kubectl describe namespace` will display resource limits and quotas
.lab[
- Try it out:
```bash
kubectl describe namespace default
```
- View limits and quotas for *all* namespaces:
```bash
kubectl describe namespace
```
]
---
## Additional resources
- [A Practical Guide to Setting Kubernetes Requests and Limits](http://blog.kubecost.com/blog/requests-and-limits/)

View File

@@ -1,65 +0,0 @@
title: |
Kubernetes
for Admins and Ops
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
- static-pods-exercise
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
- k8s/prereqs-advanced.md
- shared/handson.md
- k8s/architecture.md
#- k8s/internal-apis.md
- k8s/deploymentslideshow.md
- k8s/dmuc-easy.md
-
- k8s/dmuc-medium.md
- k8s/dmuc-hard.md
#- k8s/multinode.md
#- k8s/cni.md
- k8s/cni-internals.md
#- k8s/interco.md
-
- k8s/apilb.md
#- k8s/setup-overview.md
#- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
- k8s/cluster-upgrade.md
- k8s/cluster-backup.md
- k8s/staticpods.md
-
#- k8s/cloud-controller-manager.md
#- k8s/bootstrap.md
- k8s/control-plane-auth.md
- k8s/pod-security-intro.md
- k8s/pod-security-policies.md
- k8s/pod-security-admission.md
- k8s/user-cert.md
- k8s/csr-api.md
- k8s/openid-connect.md
-
#- k8s/lastwords-admin.md
- k8s/links.md
- shared/thankyou.md

View File

@@ -1,95 +0,0 @@
title: |
Kubernetes
for administrators
and operators
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
# DAY 1
- - k8s/prereqs-advanced.md
- shared/handson.md
- k8s/architecture.md
- k8s/internal-apis.md
- k8s/deploymentslideshow.md
- k8s/dmuc-easy.md
- - k8s/dmuc-medium.md
- k8s/dmuc-hard.md
#- k8s/multinode.md
#- k8s/cni.md
- k8s/cni-internals.md
#- k8s/interco.md
- - k8s/apilb.md
- k8s/setup-overview.md
#- k8s/setup-devel.md
- k8s/setup-managed.md
- k8s/setup-selfhosted.md
- k8s/cluster-upgrade.md
- k8s/staticpods.md
- - k8s/cluster-backup.md
- k8s/cloud-controller-manager.md
- k8s/healthchecks.md
- k8s/healthchecks-more.md
# DAY 2
- - k8s/kubercoins.md
- k8s/logs-cli.md
- k8s/logs-centralized.md
- k8s/authn-authz.md
- k8s/user-cert.md
- k8s/csr-api.md
- - k8s/openid-connect.md
- k8s/control-plane-auth.md
###- k8s/bootstrap.md
- k8s/netpol.md
- k8s/pod-security-intro.md
- k8s/pod-security-policies.md
- k8s/pod-security-admission.md
- - k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/horizontal-pod-autoscaler.md
- - k8s/prometheus.md
#- k8s/prometheus-stack.md
- k8s/extending-api.md
- k8s/crd.md
- k8s/operators.md
- k8s/eck.md
###- k8s/operators-design.md
###- k8s/operators-example.md
# CONCLUSION
- - k8s/lastwords.md
- k8s/links.md
- shared/thankyou.md
- |
# (All content after this slide is bonus material)
# EXTRA
- - k8s/volumes.md
- k8s/configuration.md
- k8s/secrets.md
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
#- k8s/portworx.md
- k8s/openebs.md
- k8s/stateful-failover.md

View File

@@ -1,136 +0,0 @@
title: |
Deploying and Scaling Microservices
with Kubernetes
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
- shared/prereqs.md
- shared/handson.md
#- shared/webssh.md
- shared/connecting.md
#- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
-
- k8s/kubectl-run.md
#- k8s/batch-jobs.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/kubectlexpose.md
- k8s/service-types.md
- k8s/kubenet.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
#- k8s/exercise-wordsmith.md
-
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- k8s/yamldeploy.md
- k8s/namespaces.md
- k8s/setup-overview.md
- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
-
- k8s/dashboard.md
- k8s/rollout.md
- k8s/healthchecks.md
- k8s/ingress.md
#- k8s/volumes.md
- k8s/configuration.md
- k8s/secrets.md
- k8s/openebs.md
#- k8s/k9s.md
#- k8s/tilt.md
#- k8s/kubectlscale.md
#- k8s/scalingdockercoins.md
#- shared/hastyconclusions.md
#- k8s/daemonset.md
#- shared/yaml.md
#- k8s/exercise-yaml.md
#- k8s/localkubeconfig.md
#- k8s/access-eks-cluster.md
#- k8s/accessinternal.md
#- k8s/kubectlproxy.md
#- k8s/healthchecks-more.md
#- k8s/record.md
#- k8s/ingress-tls.md
#- k8s/kustomize.md
#- k8s/helm-intro.md
#- k8s/helm-chart-format.md
#- k8s/helm-create-basic-chart.md
#- k8s/helm-create-better-chart.md
#- k8s/helm-dependencies.md
#- k8s/helm-values-schema-validation.md
#- k8s/helm-secrets.md
#- k8s/exercise-helm.md
#- k8s/ytt.md
#- k8s/gitlab.md
#- k8s/create-chart.md
#- k8s/create-more-charts.md
#- k8s/netpol.md
#- k8s/authn-authz.md
#- k8s/user-cert.md
#- k8s/csr-api.md
#- k8s/openid-connect.md
#- k8s/pod-security-intro.md
#- k8s/pod-security-policies.md
#- k8s/pod-security-admission.md
#- k8s/exercise-configmap.md
#- k8s/build-with-docker.md
#- k8s/build-with-kaniko.md
#- k8s/logs-centralized.md
#- k8s/prometheus.md
#- k8s/prometheus-stack.md
#- k8s/statefulsets.md
#- k8s/consul.md
#- k8s/pv-pvc-sc.md
#- k8s/volume-claim-templates.md
#- k8s/portworx.md
#- k8s/openebs.md
#- k8s/stateful-failover.md
#- k8s/extending-api.md
#- k8s/crd.md
#- k8s/admission.md
#- k8s/operators.md
#- k8s/operators-design.md
#- k8s/operators-example.md
#- k8s/staticpods.md
#- k8s/finalizers.md
#- k8s/owners-and-dependents.md
#- k8s/gitworkflows.md
-
#- k8s/whatsnext.md
- k8s/lastwords.md
#- k8s/links.md
- shared/thankyou.md

View File

@@ -1,91 +0,0 @@
title: |
Kubernetes 101
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
#- logistics.md
# Bridget-specific; others use logistics.md
- logistics-bridget.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- - shared/prereqs.md
- shared/handson.md
#- shared/webssh.md
- shared/connecting.md
- k8s/versions-k8s.md
- shared/sampleapp.md
# Bridget doesn't go into as much depth with compose
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- shared/declarative.md
- k8s/declarative.md
#- k8s/kubenet.md
- k8s/kubectlget.md
- k8s/setup-overview.md
#- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
- - k8s/kubectl-run.md
#- k8s/batch-jobs.md
#- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/deploymentslideshow.md
- k8s/kubectlexpose.md
#- k8s/service-types.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
#- k8s/localkubeconfig.md
#- k8s/access-eks-cluster.md
#- k8s/accessinternal.md
#- k8s/kubectlproxy.md
- - k8s/dashboard.md
#- k8s/k9s.md
#- k8s/tilt.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/rollout.md
#- k8s/record.md
- - k8s/logs-cli.md
# Bridget hasn't added EFK yet
#- k8s/logs-centralized.md
- k8s/namespaces.md
- k8s/helm-intro.md
#- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
#- k8s/helm-create-better-chart.md
#- k8s/helm-dependencies.md
#- k8s/helm-values-schema-validation.md
#- k8s/helm-secrets.md
#- k8s/kustomize.md
#- k8s/ytt.md
#- k8s/netpol.md
- k8s/whatsnext.md
# - k8s/links.md
# Bridget-specific
- k8s/links-bridget.md
- shared/thankyou.md

View File

@@ -1,170 +0,0 @@
title: |
Deploying and Scaling Microservices
with Docker and Kubernetes
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- in-person
content:
- shared/title.md
#- logistics.md
- k8s/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
- shared/prereqs.md
- shared/handson.md
#- shared/webssh.md
- shared/connecting.md
- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
-
- k8s/kubectlget.md
- k8s/kubectl-run.md
- k8s/batch-jobs.md
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
-
- k8s/kubectlexpose.md
- k8s/service-types.md
- k8s/kubenet.md
- k8s/shippingimages.md
- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
#- k8s/exercise-wordsmith.md
- shared/yaml.md
- k8s/yamldeploy.md
- k8s/namespaces.md
-
- k8s/setup-overview.md
- k8s/setup-devel.md
- k8s/setup-managed.md
- k8s/setup-selfhosted.md
- k8s/dashboard.md
- k8s/k9s.md
- k8s/tilt.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
#- k8s/exercise-yaml.md
-
- k8s/rollout.md
- k8s/healthchecks.md
- k8s/healthchecks-more.md
- k8s/record.md
-
- k8s/localkubeconfig.md
#- k8s/access-eks-cluster.md
- k8s/accessinternal.md
- k8s/kubectlproxy.md
-
- k8s/ingress.md
- k8s/ingress-advanced.md
#- k8s/ingress-canary.md
- k8s/ingress-tls.md
- k8s/cert-manager.md
- k8s/cainjector.md
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
- k8s/helm-create-better-chart.md
- k8s/helm-dependencies.md
- k8s/helm-values-schema-validation.md
- k8s/helm-secrets.md
#- k8s/exercise-helm.md
- k8s/gitlab.md
- k8s/ytt.md
-
- k8s/netpol.md
- k8s/authn-authz.md
- k8s/pod-security-intro.md
- k8s/pod-security-policies.md
- k8s/pod-security-admission.md
- k8s/user-cert.md
- k8s/csr-api.md
- k8s/openid-connect.md
- k8s/control-plane-auth.md
-
- k8s/volumes.md
#- k8s/exercise-configmap.md
- k8s/build-with-docker.md
- k8s/build-with-kaniko.md
-
- k8s/configuration.md
- k8s/secrets.md
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
- k8s/portworx.md
- k8s/openebs.md
- k8s/stateful-failover.md
-
- k8s/logs-centralized.md
- k8s/prometheus.md
- k8s/prometheus-stack.md
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/cluster-autoscaler.md
- k8s/horizontal-pod-autoscaler.md
- k8s/hpa-v2.md
-
- k8s/extending-api.md
- k8s/apiserver-deepdive.md
- k8s/crd.md
- k8s/aggregation-layer.md
- k8s/admission.md
- k8s/operators.md
- k8s/operators-design.md
- k8s/operators-example.md
- k8s/kubebuilder.md
- k8s/sealed-secrets.md
- k8s/kyverno.md
- k8s/eck.md
- k8s/finalizers.md
- k8s/owners-and-dependents.md
- k8s/events.md
-
- k8s/dmuc-easy.md
- k8s/dmuc-medium.md
- k8s/dmuc-hard.md
#- k8s/multinode.md
#- k8s/cni.md
- k8s/cni-internals.md
- k8s/apilb.md
- k8s/staticpods.md
-
- k8s/cluster-upgrade.md
- k8s/cluster-backup.md
- k8s/cloud-controller-manager.md
- k8s/gitworkflows.md
-
- k8s/lastwords.md
- k8s/links.md
- shared/thankyou.md

View File

@@ -1,136 +0,0 @@
title: |
Deploying and Scaling Microservices
with Kubernetes
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
- shared/prereqs.md
- shared/handson.md
#- shared/webssh.md
- shared/connecting.md
#- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
-
- k8s/kubectl-run.md
- k8s/batch-jobs.md
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/kubectlexpose.md
- k8s/service-types.md
- k8s/kubenet.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
#- k8s/exercise-wordsmith.md
-
- k8s/yamldeploy.md
- k8s/setup-overview.md
- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
- k8s/dashboard.md
- k8s/k9s.md
#- k8s/tilt.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- shared/yaml.md
#- k8s/exercise-yaml.md
-
- k8s/localkubeconfig.md
#- k8s/access-eks-cluster.md
- k8s/accessinternal.md
#- k8s/kubectlproxy.md
- k8s/rollout.md
- k8s/healthchecks.md
#- k8s/healthchecks-more.md
- k8s/record.md
-
- k8s/namespaces.md
- k8s/ingress.md
#- k8s/ingress-advanced.md
#- k8s/ingress-canary.md
#- k8s/ingress-tls.md
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
- k8s/helm-create-better-chart.md
- k8s/helm-dependencies.md
- k8s/helm-values-schema-validation.md
- k8s/helm-secrets.md
#- k8s/exercise-helm.md
#- k8s/ytt.md
- k8s/gitlab.md
-
- k8s/netpol.md
- k8s/authn-authz.md
#- k8s/csr-api.md
#- k8s/openid-connect.md
#- k8s/pod-security-intro.md
#- k8s/pod-security-policies.md
#- k8s/pod-security-admission.md
-
- k8s/volumes.md
#- k8s/exercise-configmap.md
#- k8s/build-with-docker.md
#- k8s/build-with-kaniko.md
- k8s/configuration.md
- k8s/secrets.md
- k8s/logs-centralized.md
#- k8s/prometheus.md
#- k8s/prometheus-stack.md
-
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
#- k8s/portworx.md
- k8s/openebs.md
- k8s/stateful-failover.md
#- k8s/extending-api.md
#- k8s/admission.md
#- k8s/operators.md
#- k8s/operators-design.md
#- k8s/operators-example.md
#- k8s/staticpods.md
#- k8s/owners-and-dependents.md
#- k8s/gitworkflows.md
-
- k8s/whatsnext.md
- k8s/lastwords.md
- k8s/links.md
- shared/thankyou.md

View File

@@ -1,13 +1,13 @@
title: |
Advanced
Kubernetes
Advanced Kubernetes
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "[Slack](https://ap-guest.slack.com/archives/C88FPJY23)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
slides: https://2023-12-demonware.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
@@ -24,70 +24,63 @@ content:
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- #1
- k8s/prereqs-advanced.md
-
- shared/prereqs.md
- shared/handson.md
#- shared/webssh.md
- shared/connecting.md
#- k8s/versions-k8s.md
- k8s/demo-apps.md
- k8s/authn-authz.md
- k8s/architecture.md
- k8s/internal-apis.md
- k8s/deploymentslideshow.md
- k8s/dmuc-easy.md
- #2
- exercises/rbac-details.md
-
- k8s/dmuc-medium.md
- k8s/dmuc-hard.md
#- k8s/multinode.md
#- k8s/cni.md
#- k8s/interco.md
- k8s/cni-internals.md
- #3
- k8s/apilb.md
- k8s/cni-internals.md
- k8s/control-plane-auth.md
- |
# (Extra content)
- k8s/internal-apis.md
- k8s/staticpods.md
- k8s/cluster-upgrade.md
- #4
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
- |
# (Extra content)
- k8s/helm-create-better-chart.md
- k8s/helm-dependencies.md
- k8s/helm-values-schema-validation.md
- k8s/helm-secrets.md
- k8s/ytt.md
- #5
- exercises/polykuberbac-details.md
-
- k8s/extending-api.md
- k8s/crd.md
- k8s/operators.md
- k8s/sealed-secrets.md
- k8s/crd.md
- #6
- k8s/ingress-tls.md
- k8s/ingress-advanced.md
#- k8s/ingress-canary.md
- k8s/cert-manager.md
- k8s/cainjector.md
- k8s/eck.md
- #7
- k8s/admission.md
- k8s/kyverno.md
- #8
- k8s/aggregation-layer.md
- k8s/metrics-server.md
- k8s/prometheus.md
- k8s/prometheus-stack.md
- k8s/hpa-v2.md
- #9
- k8s/operators-design.md
- k8s/operators-example.md
- k8s/kubebuilder.md
- k8s/events.md
- k8s/finalizers.md
- |
# (Extra content)
- k8s/owners-and-dependents.md
- k8s/apiserver-deepdive.md
#- k8s/record.md
- k8s/finalizers.md
- k8s/events.md
- exercises/sealed-secrets-details.md
-
- k8s/admission.md
- k8s/kyverno.md
- k8s/cert-manager.md
- k8s/cainjector.md
- k8s/resource-limits.md
- exercises/kyverno-ingress-domain-name-details.md
-
- k8s/cluster-sizing.md
- k8s/disruptions.md
- k8s/cluster-autoscaler.md
- k8s/horizontal-pod-autoscaler.md
- k8s/metrics-server.md
- k8s/aggregation-layer.md
- k8s/hpa-v2-keda.md
- shared/thankyou.md
-
- |
# (Extra material)
- k8s/apiserver-deepdive.md
- k8s/ingress.md
- k8s/ingress-advanced.md
#- k8s/ingress-canary.md
- k8s/ingress-tls.md
- shared/thankyou.md

View File

@@ -1,76 +0,0 @@
## Introductions (en 🇫🇷)
- Bonjour !
- Sur scène : Julien
- En backstage : Alexandre, Antoine, Aurélien (x2), Benji, David, Kostas, Nicolas, Paul, Sébastien, Thibault...
- Horaires : tous les jours de 9h à 13h
- On fera une pause vers (environ) 11h
- N'hésitez pas à poser un maximum de questions!
- Utilisez @@CHAT@@ pour les questions, demander de l'aide, etc.
[@alexbuisine]: https://twitter.com/alexbuisine
[EphemeraSearch]: https://ephemerasearch.com/
[@jpetazzo]: https://twitter.com/jpetazzo
[@jpetazzo@hachyderm.io]: https://hachyderm.io/@jpetazzo
[@s0ulshake]: https://twitter.com/s0ulshake
[Quantgene]: https://www.quantgene.com/
---
## Les 15 minutes du matin
- Chaque jour, on commencera à 9h par une mini-présentation de 15 minutes
(sur un sujet choisi ensemble, pas forcément en relation avec la formation!)
- L'occasion de s'échauffer les neurones avec 🥐/☕️/🍊
(avant d'attaquer les choses sérieuses)
- Puis à 9h15 on rentre dans le vif du sujet
---
## Travaux pratiques
- À la fin de chaque matinée, il y a un exercice pratique concret
(pour mettre en œuvre ce qu'on a vu)
- Les exercices font partie de la formation !
- Ils sont prévus pour prendre entre 15 minutes et 2 heures
(selon les connaissances et l'aisance de chacun·e)
- Chaque matinée commencera avec un passage en revue de l'exercice de la veille
- On est là pour vous aider si vous bloquez sur un exercice !
---
## Allô Docker¹ ?
- Chaque après-midi : une heure de questions/réponses ouvertes !
(sauf le vendredi)
- Mardi: 15h-16h
- Mercredi: 16h-17h
- Jeudi: 14h-15h
- Sur [Jitsi][jitsi] (lien "visioconf" sur le portail de formation)
.footnote[¹Clin d'œil à l'excellent ["Quoi de neuf Docker?"][qdnd] de l'excellent [Nicolas Deloof][ndeloof] 🙂]
[qdnd]: https://www.youtube.com/channel/UCOAhkxpryr_BKybt9wIw-NQ
[ndeloof]: https://github.com/ndeloof
[jitsi]: https://training.enix.io/jitsi-magic/jitsi.container.training/AlloDockerMai2023

View File

@@ -1,76 +0,0 @@
## Introductions (en 🇫🇷)
- Bonjour !
- Sur scène : Ludovic
- En backstage : Alexandre, Antoine, Aurélien (x2), Benji, David, Julien, Kostas, Nicolas, Paul, Sébastien, Thibault...
- Horaires : tous les jours de 9h à 13h
- On fera une pause vers (environ) 11h
- N'hésitez pas à poser un maximum de questions!
- Utilisez @@CHAT@@ pour les questions, demander de l'aide, etc.
[@alexbuisine]: https://twitter.com/alexbuisine
[EphemeraSearch]: https://ephemerasearch.com/
[@jpetazzo]: https://twitter.com/jpetazzo
[@jpetazzo@hachyderm.io]: https://hachyderm.io/@jpetazzo
[@s0ulshake]: https://twitter.com/s0ulshake
[Quantgene]: https://www.quantgene.com/
---
## Les 15 minutes du matin
- Chaque jour, on commencera à 9h par une mini-présentation de 15 minutes
(sur un sujet choisi ensemble, pas forcément en relation avec la formation!)
- L'occasion de s'échauffer les neurones avec 🥐/☕️/🍊
(avant d'attaquer les choses sérieuses)
- Puis à 9h15 on rentre dans le vif du sujet
---
## Travaux pratiques
- À la fin de chaque matinée, il y a un exercice pratique concret
(pour mettre en œuvre ce qu'on a vu)
- Les exercices font partie de la formation !
- Ils sont prévus pour prendre entre 15 minutes et 2 heures
(selon les connaissances et l'aisance de chacun·e)
- Chaque matinée commencera avec un passage en revue de l'exercice de la veille
- On est là pour vous aider si vous bloquez sur un exercice !
---
## Allô Docker¹ ?
- Chaque après-midi : une heure de questions/réponses ouvertes !
(sauf le vendredi)
- Mardi: 15h-16h
- Mercredi: 16h-17h
- Jeudi: 17h-18h
- Sur [Jitsi][jitsi] (lien "visioconf" sur le portail de formation)
.footnote[¹Clin d'œil à l'excellent ["Quoi de neuf Docker?"][qdnd] de l'excellent [Nicolas Deloof][ndeloof] 🙂]
[qdnd]: https://www.youtube.com/channel/UCOAhkxpryr_BKybt9wIw-NQ
[ndeloof]: https://github.com/ndeloof
[jitsi]: https://training.enix.io/jitsi-magic/jitsi.container.training/AlloDockerMai2023

View File

@@ -1,18 +1,18 @@
## Introductions (en 🇫🇷)
## Introductions
- Bonjour !
- Hello! I'm Jérôme Petazzoni ([@jpetazzo], [@jpetazzo@hachyderm.io], Ardan Labs)
- Sur scène : Jérôme ([@jpetazzo@hachyderm.io])
- The training will run from 8am to noon (Vancouver) / 4pm to 8pm (Dublin)
- En backstage : Alexandre, Antoine, Aurélien (x2), Benji, David, Julien, Kostas, Nicolas, Paul, Sébastien, Thibault...
- We'll have regular breaks
- Horaires : tous les jours de 9h à 13h
- Feel free to interrupt for questions at any time
- On fera une pause vers (environ) 11h
- *Especially when you see full screen container pictures!*
- N'hésitez pas à poser un maximum de questions!
- Live feedback, questions, help: @@CHAT@@
- Utilisez @@CHAT@@ pour les questions, demander de l'aide, etc.
<!-- -->
[@alexbuisine]: https://twitter.com/alexbuisine
[EphemeraSearch]: https://ephemerasearch.com/
@@ -23,54 +23,16 @@
---
## Les 15 minutes du matin
## Exercises
- Chaque jour, on commencera à 9h par une mini-présentation de 15 minutes
- At the end of each day, there is a series of exercises
(sur un sujet choisi ensemble, pas forcément en relation avec la formation!)
- To make the most out of the training, please try the exercises!
- L'occasion de s'échauffer les neurones avec 🥐/☕️/🍊
(it will help to practice and memorize the content of the day)
(avant d'attaquer les choses sérieuses)
- We recommend to take at least one hour to work on the exercises
- Puis à 9h15 on rentre dans le vif du sujet
(if you understood the content of the day, it will be much faster)
---
## Travaux pratiques
- À la fin de chaque matinée, il y a un exercice pratique concret
(pour mettre en œuvre ce qu'on a vu)
- Les exercices font partie de la formation !
- Ils sont prévus pour prendre entre 15 minutes et 2 heures
(selon les connaissances et l'aisance de chacun·e)
- Chaque matinée commencera avec un passage en revue de l'exercice de la veille
- On est là pour vous aider si vous bloquez sur un exercice !
---
## Allô Docker¹ ?
- Chaque après-midi : une heure de questions/réponses ouvertes !
(sauf le dernier jour)
- Mardi: 16h30-17h30
- Mercredi: 15h30-16h30
- Jeudi: 14h30-15h30
- Sur [Jitsi][jitsi] (lien "visioconf" sur le portail de formation)
.footnote[¹Clin d'œil à l'excellent ["Quoi de neuf Docker?"][qdnd] de l'excellent [Nicolas Deloof][ndeloof] 🙂]
[qdnd]: https://www.youtube.com/channel/UCOAhkxpryr_BKybt9wIw-NQ
[ndeloof]: https://github.com/ndeloof
[jitsi]: https://training.enix.io/jitsi-magic/jitsi.container.training/AlloDockerAutomne2023
- Each day will start with a quick review of the exercises of the previous day

View File

@@ -1,4 +1,4 @@
## Pre-requirements
# Pre-requirements
- Be comfortable with the UNIX command line

View File

@@ -1,24 +1,11 @@
class: title
class: title, self-paced
Merci !
![end](images/end.jpg)
Thank you!
---
## Derniers mots...
class: title, in-person
- Le portail de formation reste en ligne après la formation
- N'hésitez pas à nous contacter via la messagerie instantanée !
- Les VM ENIX restent en ligne au moins une semaine après la formation
(mais pas les clusters cloud ; eux on les éteint très vite)
- N'oubliez pas de remplier les formulaires d'évaluation
(c'est pas pour nous, c'est une obligation légale😅)
- Encore **merci** à vous !
That's all, folks! <br/> Questions?
![end](images/end.jpg)

View File

@@ -1,72 +0,0 @@
title: |
Container Orchestration
with Docker and Swarm
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
- snap
- btp-auto
- benchmarking
- elk-manual
- prom-manual
content:
- shared/title.md
- logistics.md
- swarm/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- - shared/prereqs.md
- shared/handson.md
- shared/connecting.md
- swarm/versions.md
- shared/sampleapp.md
- shared/composescale.md
- shared/hastyconclusions.md
- shared/composedown.md
- swarm/swarmkit.md
- shared/declarative.md
- swarm/swarmmode.md
- swarm/creatingswarm.md
#- swarm/machine.md
- swarm/morenodes.md
- - swarm/firstservice.md
- swarm/ourapponswarm.md
- swarm/hostingregistry.md
- swarm/testingregistry.md
- swarm/btp-manual.md
- swarm/swarmready.md
- swarm/stacks.md
- swarm/cicd.md
- swarm/updatingservices.md
- swarm/rollingupdates.md
- swarm/healthchecks.md
- - swarm/operatingswarm.md
- swarm/netshoot.md
- swarm/ipsec.md
- swarm/swarmtools.md
- swarm/security.md
- swarm/secrets.md
- swarm/encryptionatrest.md
- swarm/leastprivilege.md
- swarm/apiscope.md
- - swarm/logging.md
- swarm/metrics.md
- swarm/gui.md
- swarm/stateful.md
- swarm/extratips.md
- shared/thankyou.md
- swarm/links.md

View File

@@ -1,71 +0,0 @@
title: |
Container Orchestration
with Docker and Swarm
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
- snap
- btp-manual
- benchmarking
- elk-manual
- prom-manual
content:
- shared/title.md
- logistics.md
- swarm/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- - shared/prereqs.md
- shared/handson.md
- shared/connecting.md
- swarm/versions.md
- shared/sampleapp.md
- shared/composescale.md
- shared/hastyconclusions.md
- shared/composedown.md
- swarm/swarmkit.md
- shared/declarative.md
- swarm/swarmmode.md
- swarm/creatingswarm.md
#- swarm/machine.md
- swarm/morenodes.md
- - swarm/firstservice.md
- swarm/ourapponswarm.md
#- swarm/hostingregistry.md
#- swarm/testingregistry.md
#- swarm/btp-manual.md
#- swarm/swarmready.md
- swarm/stacks.md
- swarm/cicd.md
- swarm/updatingservices.md
#- swarm/rollingupdates.md
#- swarm/healthchecks.md
- - swarm/operatingswarm.md
#- swarm/netshoot.md
#- swarm/ipsec.md
#- swarm/swarmtools.md
- swarm/security.md
#- swarm/secrets.md
#- swarm/encryptionatrest.md
- swarm/leastprivilege.md
- swarm/apiscope.md
- swarm/logging.md
- swarm/metrics.md
#- swarm/stateful.md
#- swarm/extratips.md
- shared/thankyou.md
- swarm/links.md

View File

@@ -1,80 +0,0 @@
title: |
Container Orchestration
with Docker and Swarm
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- in-person
- btp-auto
content:
- shared/title.md
#- shared/logistics.md
- swarm/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- - shared/prereqs.md
- shared/handson.md
- shared/connecting.md
- swarm/versions.md
- |
name: part-1
class: title, self-paced
Part 1
- shared/sampleapp.md
- shared/composescale.md
- shared/hastyconclusions.md
- shared/composedown.md
- swarm/swarmkit.md
- shared/declarative.md
- swarm/swarmmode.md
- swarm/creatingswarm.md
#- swarm/machine.md
- swarm/morenodes.md
- - swarm/firstservice.md
- swarm/ourapponswarm.md
- swarm/hostingregistry.md
- swarm/testingregistry.md
- swarm/btp-manual.md
- swarm/swarmready.md
- swarm/stacks.md
- swarm/cicd.md
- |
name: part-2
class: title, self-paced
Part 2
- - swarm/operatingswarm.md
- swarm/netshoot.md
- swarm/swarmnbt.md
- swarm/ipsec.md
- swarm/updatingservices.md
- swarm/rollingupdates.md
- swarm/healthchecks.md
- swarm/nodeinfo.md
- swarm/swarmtools.md
- - swarm/security.md
- swarm/secrets.md
- swarm/encryptionatrest.md
- swarm/leastprivilege.md
- swarm/apiscope.md
- swarm/logging.md
- swarm/metrics.md
- swarm/stateful.md
- swarm/extratips.md
- shared/thankyou.md
- swarm/links.md

View File

@@ -1,75 +0,0 @@
title: |
Container Orchestration
with Docker and Swarm
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- in-person
- btp-auto
content:
- shared/title.md
#- shared/logistics.md
- swarm/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- shared/handson.md
- shared/connecting.md
- swarm/versions.md
- |
name: part-1
class: title, self-paced
Part 1
- shared/sampleapp.md
- shared/composescale.md
- shared/hastyconclusions.md
- shared/composedown.md
- swarm/swarmkit.md
- shared/declarative.md
- swarm/swarmmode.md
- swarm/creatingswarm.md
#- swarm/machine.md
- swarm/morenodes.md
- - swarm/firstservice.md
- swarm/ourapponswarm.md
- swarm/hostingregistry.md
- swarm/testingregistry.md
- swarm/btp-manual.md
- swarm/swarmready.md
- swarm/stacks.md
- |
name: part-2
class: title, self-paced
Part 2
- - swarm/operatingswarm.md
#- swarm/netshoot.md
#- swarm/swarmnbt.md
- swarm/ipsec.md
- swarm/updatingservices.md
- swarm/rollingupdates.md
#- swarm/healthchecks.md
- swarm/nodeinfo.md
- swarm/swarmtools.md
- - swarm/security.md
- swarm/secrets.md
- swarm/encryptionatrest.md
- swarm/leastprivilege.md
- swarm/apiscope.md
#- swarm/logging.md
#- swarm/metrics.md
- swarm/stateful.md
- swarm/extratips.md
- shared/thankyou.md
- swarm/links.md

View File

@@ -15,6 +15,7 @@ h1, h2, h3, h4, h5, h6 {
font-weight: bold;
font-size: 45px !important;
margin-top: 0.5em;
margin-bottom: 0.75em;
}
code {