Compare commits

..

45 Commits

Author SHA1 Message Date
Jerome Petazzoni
86828caf31 fix-redirects.sh: adding forced redirect 2020-04-07 16:47:57 -05:00
Jérôme Petazzoni
ffaca8925b Merge pull request #546 from arthurlogilab/patch-7
[cluster-backup] add bivac
2020-02-08 08:42:54 -06:00
Jérôme Petazzoni
4e27c60e07 Merge pull request #542 from arthurlogilab/patch-3
[extending-api] link to video, not playlist
2020-02-08 08:41:14 -06:00
Jérôme Petazzoni
c38e169b61 Merge pull request #540 from arthurlogilab/patch-1
[cluster-sizing.md] add link to Kiyot project page
2020-02-08 08:40:18 -06:00
Jérôme Petazzoni
061451ec12 Merge pull request #545 from arthurlogilab/patch-6
[slides/cni] plugins : point to README
2020-02-08 08:38:56 -06:00
Arthur Lutz
6139a9a1fa [cluster-backup] add bivac
Backup Interface for Volumes Attached to Containers
2020-02-07 16:59:00 +01:00
Arthur Lutz
a9f0ee93d8 [slides/cni] plugins : point to README 2020-02-07 11:49:25 +01:00
Arthur Lutz
0fc8b8e884 [extending-api] link to video, not playlist 2020-02-06 17:25:38 +01:00
Arthur Lutz
b9667365a4 [cluster-sizing.md] add link to Kiyot project page 2020-02-06 16:02:46 +01:00
Jerome Petazzoni
dde20a81bc Shuffle 2020-02-05 11:44:45 -06:00
Jerome Petazzoni
b17c2150a3 Merge branch 'master' into 2020-02-enix 2020-02-05 09:32:21 -06:00
Jerome Petazzoni
1414b74224 typos 2020-02-05 09:32:19 -06:00
Jerome Petazzoni
d0202963b7 typos 2020-02-05 05:05:19 -06:00
Jerome Petazzoni
bad25bfb80 Merge branch 'master' into 2020-02-enix 2020-02-05 05:02:50 -06:00
Jerome Petazzoni
834fe6c617 Merge branch 'master' into 2020-02-enix 2020-02-05 02:51:34 -06:00
Jerome Petazzoni
783d2783dc Merge branch '2020-02-enix' of github.com:jpetazzo/container.training into 2020-02-enix 2020-02-05 01:52:13 -06:00
Jerome Petazzoni
b0fa8d497e Add restaurant address 2020-02-05 01:52:03 -06:00
Julien Girardin
97530166d1 Copy introduction of day 2 2020-02-05 08:20:36 +01:00
Julien Girardin
360f74f555 Something like last push for day 3 2020-02-04 22:48:00 +01:00
Jerome Petazzoni
18b7ee9e3d Merge branch '2020-02-enix' of github.com:jpetazzo/container.training into 2020-02-enix 2020-02-04 06:34:20 -06:00
Jerome Petazzoni
7afab5619f Merge branch 'master' into 2020-02-enix 2020-02-04 06:34:07 -06:00
Julien Girardin
1db1bf3885 A batch of fix for 3rd day 2020-02-03 18:41:18 +01:00
Jerome Petazzoni
822fa8443b Add WiFi info 2020-02-03 01:48:51 -06:00
Jerome Petazzoni
10937f7740 Add @zempashi's emoji and link 2020-02-02 13:55:17 -06:00
Jerome Petazzoni
ad94cdab2d Last tweaks 2020-02-02 10:45:00 -06:00
Jerome Petazzoni
2495040007 Merge branch 'master' into 2020-02-enix 2020-02-02 10:21:17 -06:00
Jerome Petazzoni
8fab9999f2 Merge branch 'master' into 2020-02-enix 2020-02-02 09:35:30 -06:00
Jerome Petazzoni
3ecbec85de Reorg day 2 content 2020-02-02 04:43:28 -06:00
Julien Girardin
10024ff021 Merge pull request #539 from jpetazzo/review-j3
More tweaks
2020-01-31 22:16:59 +01:00
Jerome Petazzoni
17eea45536 merge 2020-01-31 12:56:11 -06:00
Jerome Petazzoni
9089157367 Tweaks / formating / English 2020-01-31 12:37:11 -06:00
Julien Girardin
8599c572a0 Merge pull request #538 from jpetazzo/review-j3
Review j3
2020-01-31 08:55:11 +01:00
Jerome Petazzoni
8038d5ebff Many small fixes + update on registries 2020-01-30 15:40:13 -06:00
Julien Girardin
e569388618 Advanced rollout and security 2020-01-29 18:12:26 +01:00
Jerome Petazzoni
646a0f7ee2 First round of reviews 2020-01-28 13:40:38 -06:00
Jerome Petazzoni
3a1549e3a4 C'est la merge, chef 2020-01-28 12:41:59 -06:00
Julien Girardin
54a9ba25b3 Add Exercice for promtheus and open-telemetry 2020-01-28 16:26:39 +01:00
Julien Girardin
128a5a2340 Opentelemetry and a few things on prometheus 2020-01-28 16:16:36 +01:00
Julien Girardin
0f34f037bf Second batch for day 3 2020-01-27 18:10:34 +01:00
Julien Girardin
c0d735ade2 More on day 3 2020-01-27 13:42:40 +01:00
Julien Girardin
3914160d28 starting filling day 3 2020-01-27 13:42:02 +01:00
Jerome Petazzoni
80356d92cb Update after work session with @zempashi 2019-11-06 08:26:13 -06:00
Jerome Petazzoni
e9b22c5074 Merge branch 'master' into 2020-02-enix 2019-11-06 07:18:38 -06:00
Jerome Petazzoni
ed55c72366 Free up most of day 3 2019-10-25 12:44:12 -05:00
Jerome Petazzoni
a1a2cccfef First draft for Feb 2020 content 2019-10-06 10:28:07 -05:00
52 changed files with 1727 additions and 1091 deletions

View File

@@ -9,21 +9,21 @@ services:
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.3
image: k8s.gcr.io/etcd:3.3.10
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.17.2
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.17.2
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
"Edit the CLUSTER placeholder first. Then, remove this line.":
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.17.2
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-scheduler --master http://localhost:8080

View File

@@ -12,6 +12,7 @@ metadata:
name: kube-router-cfg
namespace: kube-system
labels:
tier: node
k8s-app: kube-router
data:
cni-conf.json: |
@@ -31,21 +32,20 @@ data:
]
}
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-router
tier: node
name: kube-router
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-router
template:
metadata:
labels:
k8s-app: kube-router
tier: node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:

View File

@@ -9,20 +9,20 @@ services:
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.3
image: k8s.gcr.io/etcd:3.3.10
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.17.2
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.17.2
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-controller-manager --master http://localhost:8080
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.17.2
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,4 +1,4 @@
apiVersion: apiextensions.k8s.io/v1beta1
apiVersion: apiextensions.k8s.io/v1alpha1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training

View File

@@ -1,9 +1,10 @@
apiVersion: apiextensions.k8s.io/v1
apiVersion: apiextensions.k8s.io/v1alpha1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
version: v1alpha1
scope: Namespaced
names:
plural: coffees
@@ -11,25 +12,21 @@ spec:
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
properties:
spec:
required:
- taste
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
additionalPrinterColumns:
- JSONPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
validation:
openAPIV3Schema:
properties:
spec:
required:
- taste
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string

View File

@@ -113,12 +113,9 @@ _cmd_disabledocker() {
TAG=$1
need_tag
pssh "
sudo systemctl disable docker.service
sudo systemctl disable docker.socket
sudo systemctl stop docker
sudo killall containerd
"
pssh "sudo systemctl disable docker.service"
pssh "sudo systemctl disable docker.socket"
pssh "sudo systemctl stop docker"
}
_cmd kubebins "Install Kubernetes and CNI binaries but don't start anything"
@@ -130,15 +127,18 @@ _cmd_kubebins() {
set -e
cd /usr/local/bin
if ! [ -x etcd ]; then
##VERSION##
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.3/etcd-v3.4.3-linux-amd64.tar.gz \
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.15/etcd-v3.3.15-linux-amd64.tar.gz \
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
fi
if ! [ -x hyperkube ]; then
##VERSION##
curl -L https://dl.k8s.io/v1.17.2/kubernetes-server-linux-amd64.tar.gz \
| sudo tar --strip-components=3 -zx \
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
curl -L https://dl.k8s.io/v1.16.2/kubernetes-server-linux-amd64.tar.gz \
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
fi
if ! [ -x kubelet ]; then
for BINARY in kubectl kube-apiserver kube-scheduler kube-controller-manager kubelet kube-proxy;
do
sudo ln -s hyperkube \$BINARY
done
fi
sudo mkdir -p /opt/cni/bin
cd /opt/cni/bin
@@ -249,7 +249,6 @@ EOF"
# Install ship
pssh "
if [ ! -x /usr/local/bin/ship ]; then
##VERSION##
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
sudo tar -C /usr/local/bin -zx ship
fi"
@@ -257,7 +256,7 @@ EOF"
# Install the AWS IAM authenticator
pssh "
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
##VERSION##
##VERSION##
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
sudo chmod +x /usr/local/bin/aws-iam-authenticator
fi"

View File

@@ -1,24 +1,9 @@
#!/bin/sh
set -e
retry () {
N=$1
I=0
shift
while ! "$@"; do
I=$(($I+1))
if [ $I -gt $N ]; then
echo "FAILED, ABORTING"
exit 1
fi
echo "FAILED, RETRYING ($I/$N)"
done
}
export AWS_INSTANCE_TYPE=t3a.small
INFRA=infra/aws-eu-west-3
INFRA=infra/aws-us-west-2
STUDENTS=2
@@ -32,9 +17,9 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--count $STUDENTS
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl disabledocker $TAG
retry 5 ./workshopctl kubebins $TAG
./workshopctl deploy $TAG
./workshopctl disabledocker $TAG
./workshopctl kubebins $TAG
./workshopctl cards $TAG
SETTINGS=admin-kubenet
@@ -45,9 +30,9 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
retry 5 ./workshopctl disableaddrchecks $TAG
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl cards $TAG
SETTINGS=admin-kuberouter
@@ -58,9 +43,9 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
retry 5 ./workshopctl disableaddrchecks $TAG
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl cards $TAG
#INFRA=infra/aws-us-west-1
@@ -75,6 +60,7 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl kube $TAG 1.15.9
./workshopctl deploy $TAG
./workshopctl kube $TAG 1.16.6
./workshopctl cards $TAG

69
slides/1.yml Normal file
View File

@@ -0,0 +1,69 @@
title: |
Jour 1
Fondamentaux
Conteneurs & Docker
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/enix/formation-highfive-202002)"
gitrepo: github.com/jpetazzo/container.training
slides: http://2020-02-enix.container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
-
- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
#- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- containers/Initial_Images.md
-
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
-
- containers/Naming_And_Inspecting.md
#- containers/Labels.md
- containers/Getting_Inside.md
#- containers/Resource_Limits.md
- containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
-
- containers/Container_Networking_Basics.md
#- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
#- containers/Ambassadors.md
- containers/Local_Development_Workflow.md
#- containers/Windows_Containers.md
#- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
#- containers/Docker_Machine.md
#- containers/Advanced_Dockerfiles.md
#- containers/Application_Configuration.md
#- containers/Logging.md
#- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
#- containers/Container_Engines.md
#- containers/Ecosystem.md
#- containers/Orchestration_Overview.md
-
- shared/thankyou.md
- containers/links.md

57
slides/2.yml Normal file
View File

@@ -0,0 +1,57 @@
title: |
Jour 2
Fondamentaux
Orchestration
& Kubernetes
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/enix/formation-highfive-202002)"
gitrepo: github.com/jpetazzo/container.training
slides: http://2020-02-enix.container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
-
- shared/prereqs.md
#- shared/webssh.md
- shared/connecting.md
- k8s/versions-k8s.md
- shared/sampleapp.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
-
- k8s/kubectlrun.md
- k8s/logs-cli.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/kubenet.md
- k8s/kubectlexpose.md
-
- k8s/shippingimages.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
- k8s/yamldeploy.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
-
- k8s/rollout.md
#- k8s/dryrun.md
- k8s/healthchecks.md
#- k8s/healthchecks-more.md
#- k8s/record.md
#- k8s/dashboard.md
- k8s/ingress.md
-
- shared/thankyou.md

81
slides/3.yml Normal file
View File

@@ -0,0 +1,81 @@
title: |
Jour 3
Méthodologies DevOps
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/enix/formation-highfive-202002)"
gitrepo: github.com/jpetazzo/container.training
slides: http://2020-02-enix.container.training/
exclude:
- self-paced
- hide-exercise
chapters:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
-
- shared/prereqs.md
- shared/connecting.md
# Bien démarrer en local (minikube, kind)
- shared/sampleapp.md
- k8s/software-dev-banalities.md
- k8s/on-desktop.md
- k8s/volumes.md
- k8s/namespaces.md
- k8s/localkubeconfig.md
- k8s/accessinternal.md
- k8s/testing.md
-
- k8s/configuration.md
- k8s/sealed-secrets.md
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-secrets.md
-
- k8s/shippingimages.md
- k8s/registries.md
- k8s/stop-manual.md
- k8s/ci-cd.md
- k8s/exercise-ci-build.md
- k8s/kaniko.md
- k8s/exercise-ci-kaniko.md
- k8s/rollout.md
- k8s/advanced-rollout.md
- k8s/devs-and-ops-joined-topics.md
-
- k8s/prometheus-endpoint.md
- k8s/exercise-prometheus.md
- k8s/opentelemetry.md
- k8s/exercise-opentelemetry.md
- k8s/kubernetes-security.md
#- |
# # (Automatiser)
#- |
# # Fabrication d'image
#- |
# # Skaffold
#- |
# # Registries
#- |
# # Gitlab, CI
#- |
# # ROllout avancé, blue green, canary
#- |
# # Monitoring applicatif
#- |
# # Prometheus Grafana
#- |
# # Telemetry
-
- shared/thankyou.md

40
slides/4.yml Normal file
View File

@@ -0,0 +1,40 @@
title: |
Jour 4
Kubernetes Avancé
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/enix/formation-highfive-202002)"
gitrepo: github.com/jpetazzo/container.training
slides: http://2020-02-enix.container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
-
- k8s/netpol.md
- k8s/authn-authz.md
-
- k8s/statefulsets.md
- k8s/local-persistent-volumes.md
- k8s/portworx.md
-
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/horizontal-pod-autoscaler.md
-
- k8s/prometheus.md
- k8s/logs-centralized.md
- k8s/extending-api.md
- k8s/operators.md
#- k8s/operators-design.md
-
- shared/thankyou.md

42
slides/5.yml Normal file
View File

@@ -0,0 +1,42 @@
title: |
Jour 5
Opérer Kubernetes
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/enix/formation-highfive-202002)"
gitrepo: github.com/jpetazzo/container.training
slides: http://2020-02-enix.container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
-
- k8s/prereqs-admin.md
- k8s/architecture.md
- k8s/deploymentslideshow.md
- k8s/dmuc.md
-
- k8s/multinode.md
- k8s/cni.md
-
- k8s/apilb.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
- k8s/cluster-upgrade.md
- k8s/cluster-backup.md
- k8s/staticpods.md
-
- k8s/control-plane-auth.md
- k8s/csr-api.md
- k8s/openid-connect.md
- k8s/podsecuritypolicy.md
-
- shared/thankyou.md

View File

@@ -2,7 +2,7 @@
#/ /kube-halfday.yml.html 200
#/ /kube-fullday.yml.html 200
#/ /kube-twodays.yml.html 200
/ /outreach.yml.html 200!
/ /menu.html 200!
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack

View File

@@ -0,0 +1,5 @@
# Exercise -- write a simple pipeline
Let's create a simple pipeline with gitlab
The code is at: https://github.com/enix/kubecoin-build

View File

@@ -0,0 +1,76 @@
# Advanced Rollout
- In some cases the built-in mechanism of kubernetes is not enough.
- You want more control on the rollout, include a feedback of the monitoring, deploying
on multiple clusters, etc
- Two "main" strategies exist here:
- canary deployment
- blue/green deployment
---
## Canary deployment
- focus on one component of the stack
- deploy a new version of the component close to the production
- redirect some portion of prod traffic to new version
- scale up new version, redirect more traffic, checking everything is ok
- scale down old version
- move component to component with the same procedure
- That's what kubernetes does by default, but does every components at the same time
- Could be paired with `kubectl wait --for` and applying component sequentially,
for hand made canary deployement
---
## Blue/Green deployment
- focus on entire stack
- deploy a new stack
- check the new stack work as espected
- put traffic on new stack, rollback if any goes wrong
- garbage collect the previous infra structure
- there is nothing like that by default in kubernetes
- helm chart with multiple releases is the closest one
- could be paired with ingress feature like `nginx.ingress.kubernetes.io/canary-*`
---
## Not hand-made ?
There is a few additionnal controllers that help achieving those kind of rollout behaviours
They leverage kubernetes API at different levels to achieve this goal.
---
## Spinnaker
- https://www.spinnaker.io
- Help to deploy the same app on multiple cluster.
- Is able to analyse rollout status (canary analysis) and correlate it to monitoring
- Rollback if anything goes wrong
- also support Blue/Green
- Configuration done via UI
---
## Argo-rollout
- https://github.com/argoproj/argo-rollouts
- Replace your deployments with CRD (Custom Resource Definition) "deployment-like"
- Full control via CRDs
- BlueGreen and Canary deployment

View File

@@ -20,7 +20,7 @@ The control plane can run:
- in containers, on the same nodes that run other application workloads
(example: [Minikube](https://github.com/kubernetes/minikube); 1 node runs everything, [kind](https://kind.sigs.k8s.io/))
(example: Minikube; 1 node runs everything)
- on a dedicated node
@@ -28,7 +28,7 @@ The control plane can run:
- on a dedicated set of nodes
(example: [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way); [kops](https://github.com/kubernetes/kops))
(example: Kubernetes The Hard Way; kops)
- outside of the cluster

51
slides/k8s/ci-cd.md Normal file
View File

@@ -0,0 +1,51 @@
## Jenkins / Jenkins-X
- Multi-purpose CI
- Self-hosted CI for kubernetes
- create a namespace per commit and apply manifests in the namespace
</br>
"A deploy per feature-branch"
.small[
```shell
curl -L "https://github.com/jenkins-x/jx/releases/download/v2.0.1103/jx-darwin-amd64.tar.gz" | tar xzv jx
./jx boot
```
]
---
## GitLab
- Repository + registry + CI/CD integrated all-in-one
```shell
helm repo add gitlab https://charts.gitlab.io/
helm install gitlab gitlab/gitlab
```
---
## ArgoCD / flux
- Watch a git repository and apply changes to kubernetes
- provide UI to see changes, rollback
.small[
```shell
kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
```
]
---
## Tekton / knative
- knative is serverless project from google
- Tekton leverages knative to run pipelines
- not really user friendly today, but stay tune for wrappers/products

View File

@@ -81,7 +81,7 @@
## What version are we running anyway?
- When I say, "I'm running Kubernetes 1.15", is that the version of:
- When I say, "I'm running Kubernetes 1.16", is that the version of:
- kubectl
@@ -254,7 +254,7 @@ and kubectl, which can be one MINOR ahead or behind API server.]
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
```
- Look for the `image:` line, and update it to e.g. `v1.16.0`
- Look for the `image:` line, and update it to e.g. `v1.17.0`
]
@@ -308,11 +308,11 @@ and kubectl, which can be one MINOR ahead or behind API server.]
]
Note 1: kubeadm thinks that our cluster is running 1.16.0.
Note 1: kubeadm thinks that our cluster is running 1.17.0.
<br/>It is confused by our manual upgrade of the API server!
Note 2: kubeadm itself is still version 1.15.9.
<br/>It doesn't know how to upgrade do 1.16.X.
Note 2: kubeadm itself is still version 1.16.6.
<br/>It doesn't know how to upgrade do 1.17.X.
---
@@ -334,39 +334,8 @@ Note 2: kubeadm itself is still version 1.15.9.
]
Problem: kubeadm doesn't know know how to handle
upgrades from version 1.15.
This is because we installed version 1.17 (or even later).
We need to install kubeadm version 1.16.X.
---
## Downgrading kubeadm
- We need to go back to version 1.16.X (e.g. 1.16.6)
.exercise[
- View available versions for package `kubeadm`:
```bash
apt show kubeadm -a | grep ^Version | grep 1.16
```
- Downgrade kubeadm:
```
sudo apt install kubeadm=1.16.6-00
```
- Check what kubeadm tells us:
```
sudo kubeadm upgrade plan
```
]
kubeadm should now agree to upgrade to 1.16.6.
Note: kubeadm still thinks that our cluster is running 1.17.0.
<br/>But at least it knows about version 1.17.X now.
---
@@ -382,7 +351,7 @@ kubeadm should now agree to upgrade to 1.16.6.
- Perform the upgrade:
```bash
sudo kubeadm upgrade apply v1.16.6
sudo kubeadm upgrade apply v1.17.2
```
]
@@ -406,7 +375,7 @@ kubeadm should now agree to upgrade to 1.16.6.
- Upgrade kubelet:
```bash
sudo apt install kubelet=1.16.6-00
sudo apt install kubelet=1.17.2-00
```
]
@@ -454,7 +423,7 @@ kubeadm should now agree to upgrade to 1.16.6.
## Upgrading kubelet the right way
- We need to upgrade kubeadm, upgrade kubelet config, then upgrade kubelet
- The command that we need to run was shown by kubeadm
(after upgrading the control plane)
@@ -463,10 +432,8 @@ kubeadm should now agree to upgrade to 1.16.6.
- Download the configuration on each node, and upgrade kubelet:
```bash
for N in 1 2 3; do
ssh test$N "
sudo apt install kubeadm=1.16.6-00 &&
sudo kubeadm upgrade node &&
sudo apt install kubelet=1.16.6-00"
ssh test$N sudo kubeadm upgrade node config --kubelet-version v1.17.2
ssh test$N sudo apt install kubelet=1.17.2-00
done
```
]
@@ -475,7 +442,7 @@ kubeadm should now agree to upgrade to 1.16.6.
## Checking what we've done
- All our nodes should now be updated to version 1.16.6
- All our nodes should now be updated to version 1.17.2
.exercise[
@@ -492,7 +459,7 @@ class: extra-details
## Skipping versions
- This example worked because we went from 1.15 to 1.16
- This example worked because we went from 1.16 to 1.17
- If you are upgrading from e.g. 1.14, you will have to go through 1.15 first

View File

@@ -28,7 +28,7 @@ The reference plugins are available [here].
Look in each plugin's directory for its documentation.
[here]: https://github.com/containernetworking/plugins/tree/master/plugins
[here]: https://github.com/containernetworking/plugins
---
@@ -162,8 +162,6 @@ class: extra-details
---
class: extra-details
## What's BGP?
- BGP (Border Gateway Protocol) is the protocol used between internet routers
@@ -222,22 +220,6 @@ class: extra-details
---
class: extra-details
## Checking the CNI configuration
- By default, kubelet gets the CNI configuration from `/etc/cni/net.d`
.exercise[
- Check the content of `/etc/cni/net.d`
]
(On most machines, at this point, `/etc/cni/net.d` doesn't even exist).)
---
## Our control plane
- We will use a Compose file to start the control plane
@@ -376,26 +358,6 @@ Note: the DaemonSet won't create any pods (yet) since there are no nodes (yet).
---
class: extra-details
## Checking the CNI configuration
- At this point, kuberouter should have installed its CNI configuration
(in `/etc/cni/net.d`)
.exercise[
- Check the content of `/etc/cni/net.d`
]
- There should be a file created by kuberouter
- The file should contain the node's podCIDR
---
## Setting up a test
- Let's create a Deployment and expose it with a Service
@@ -443,8 +405,6 @@ This shows that we are using IPVS (vs. iptables, which picked random endpoints).
---
class: extra-details
## Troubleshooting
- What if we need to check that everything is working properly?
@@ -468,8 +428,6 @@ We should see the local pod CIDR connected to `kube-bridge`, and the other nodes
---
class: extra-details
## More troubleshooting
- We can also look at the output of the kube-router pods
@@ -486,8 +444,6 @@ class: extra-details
---
class: extra-details
## Trying `kubectl logs` / `kubectl exec`
.exercise[
@@ -513,8 +469,6 @@ What does that mean?
---
class: extra-details
## Internal name resolution
- To execute these commands, the API server needs to connect to kubelet
@@ -531,8 +485,6 @@ class: extra-details
---
class: extra-details
## Another way to check the logs
- We can also ask the logs directly to the container engine
@@ -574,3 +526,163 @@ done
- This could be useful for embedded platforms with very limited resources
(or lab environments for learning purposes)
---
# Interconnecting clusters
- We assigned different Cluster CIDRs to each cluster
- This allows us to connect our clusters together
- We will leverage kube-router BGP abilities for that
- We will *peer* each kube-router instance with a *route reflector*
- As a result, we will be able to ping each other's pods
---
## Disclaimers
- There are many methods to interconnect clusters
- Depending on your network implementation, you will use different methods
- The method shown here only works for nodes with direct layer 2 connection
- We will often need to use tunnels or other network techniques
---
## The plan
- Someone will start the *route reflector*
(typically, that will be the person presenting these slides!)
- We will update our kube-router configuration
- We will add a *peering* with the route reflector
(instructing kube-router to connect to it and exchange route information)
- We should see the routes to other clusters on our nodes
(in the output of e.g. `route -n` or `ip route show`)
- We should be able to ping pods of other nodes
---
## Starting the route reflector
- Only do this slide if you are doing this on your own
- There is a Compose file in the `compose/frr-route-reflector` directory
- Before continuing, make sure that you have the IP address of the route reflector
---
## Configuring kube-router
- This can be done in two ways:
- with command-line flags to the `kube-router` process
- with annotations to Node objects
- We will use the command-line flags
(because it will automatically propagate to all nodes)
.footnote[Note: with Calico, this is achieved by creating a BGPPeer CRD.]
---
## Updating kube-router configuration
- We need to pass two command-line flags to the kube-router process
.exercise[
- Edit the `kuberouter.yaml` file
- Add the following flags to the kube-router arguments:
```
- "--peer-router-ips=`X.X.X.X`"
- "--peer-router-asns=64512"
```
(Replace `X.X.X.X` with the route reflector address)
- Update the DaemonSet definition:
```bash
kubectl apply -f kuberouter.yaml
```
]
---
## Restarting kube-router
- The DaemonSet will not update the pods automatically
(it is using the default `updateStrategy`, which is `OnDelete`)
- We will therefore delete the pods
(they will be recreated with the updated definition)
.exercise[
- Delete all the kube-router pods:
```bash
kubectl delete pods -n kube-system -l k8s-app=kube-router
```
]
Note: the other `updateStrategy` for a DaemonSet is RollingUpdate.
<br/>
For critical services, we might want to precisely control the update process.
---
## Checking peering status
- We can see informative messages in the output of kube-router:
```
time="2019-04-07T15:53:56Z" level=info msg="Peer Up"
Key=X.X.X.X State=BGP_FSM_OPENCONFIRM Topic=Peer
```
- We should see the routes of the other clusters show up
- For debugging purposes, the reflector also exports a route to 1.0.0.2/32
- That route will show up like this:
```
1.0.0.2 172.31.X.Y 255.255.255.255 UGH 0 0 0 eth0
```
- We should be able to ping the pods of other clusters!
---
## If we wanted to do more ...
- kube-router can also export ClusterIP addresses
(by adding the flag `--advertise-cluster-ip`)
- They are exported individually (as /32)
- This would allow us to easily access other clusters' services
(without having to resolve the individual addresses of pods)
- Even better if it's combined with DNS integration
(to facilitate name → ClusterIP resolution)

View File

@@ -71,7 +71,7 @@
(straightforward on CPU; more complex on other metrics)
- Resource management and scheduling
- Ressource management and scheduling
(reserve CPU/RAM for containers; placement constraints)

View File

@@ -105,7 +105,23 @@ The dashboard will then ask you which authentication you want to use.
---
## Security implications of `kubectl apply`
## Other dashboards
- [Kube Web View](https://codeberg.org/hjacobs/kube-web-view)
- read-only dashboard
- optimized for "troubleshooting and incident response"
- see [vision and goals](https://kube-web-view.readthedocs.io/en/latest/vision.html#vision) for details
- [Kube Ops View](https://github.com/hjacobs/kube-ops-view)
- "provides a common operational picture for multiple Kubernetes clusters"
---
# Security implications of `kubectl apply`
- When we do `kubectl apply -f <URL>`, we create arbitrary resources

View File

@@ -0,0 +1,10 @@
## We are done, what else ?
We have seen what means developping an application on kubernetes.
There still few subjects to tackle that are not purely relevant for developers
They have *some involvement* for developers:
- Monitoring
- Security

View File

@@ -0,0 +1,5 @@
## Exercise - building with Kubernetes
- Let's go to https://github.com/enix/kubecoin
- Our goal is to follow the instructions and complete exercise #1

View File

@@ -0,0 +1,3 @@
## Exercice - build with kaniko
Complete exercise #2, (again code at: https://github.com/enix/kubecoin )

View File

@@ -1,31 +0,0 @@
# Exercise — ConfigMaps
- In this exercise, we will use a ConfigMap to store static assets
- While there are some circumstances where this can be useful ...
- ... It is generally **not** a good idea!
- Once you've read that warning, check the next slide for instructions :)
---
## Exercise — ConfigMaps
This will use the wordsmith app.
We want to store the static files (served by `web`) in a ConfigMap.
1. Transform the `static` directory into a ConfigMap.
(https://github.com/jpetazzo/wordsmith/tree/master/web/static)
2. Find out where that `static` directory is located in `web`.
(for instance, by using `kubectl exec` to investigate)
3. Update the definition of the `web` Deployment to use the ConfigMap.
(note: fonts and images will be broken; that's OK)
4. Make a minor change in the ConfigMap (e.g. change the text color)

View File

@@ -1,63 +0,0 @@
# Exercise — Helm charts
Let's write a Helm chart for wordsmith!
We will need the YAML manifests that we wrote earlier.
Level 1: create a chart to deploy wordsmith.
Level 2: make it so that the number of replicas can be set with `--set replicas=X`.
Level 3: change the colors of the lego bricks.
(For level 3, fork the repository and use ctr.run to build images.)
See next slide if you need hints!
---
## Hints
*Scroll one slide at a time to see hints.*
--
Use `helm create` to create a new chart.
--
Delete the content of the `templates` directory and put your YAML instead.
--
Install the resulting chart. Voilà!
--
Use `{{ .Values.replicas }}` in the YAML manifest for `words`.
--
Also add `replicas: 5` to `values.yaml` to provide a default value.
---
## Changing the color
- Fork the repository
- Make sure that your fork has valid Dockerfiles
(or identify a branch that has valid Dockerfiles)
- Use the following images:
ctr.run/yourgithubusername/wordsmith/db:branchname
(replace db with web and words for the other components)
- Change the images and/or CSS in `web/static`
- Commit, push, trigger a rolling update
(`imagePullPolicy` should be `Always`, which is the default)

View File

@@ -0,0 +1,5 @@
## Exercice - monitor with opentelemetry
Complete exercise #5, (again code at: https://github.com/enix/kubecoin )
*Note: Not all daemon are "ready" for opentelemetry, only `rng` and `worker`

View File

@@ -0,0 +1,5 @@
## Exercice - monitor with prometheus
Complete exercise #4, (again code at: https://github.com/enix/kubecoin )
*Note: Not all daemon are "ready" for prometheus, only `hasher` and `redis`

View File

@@ -1,39 +0,0 @@
# Exercise — deploying on Kubernetes
Let's deploy the wordsmith app on Kubernetes!
As a reminder, we have the following components:
| Name | Image | Port |
|-------|---------------------------------|------|
| db | jpetazzo/wordsmith-db:latest | 5432 |
| web | jpetazzo/wordsmith-web:latest | 80 |
| words | jpetazzo/wordsmith-words:latest | 8080 |
We need `web` to be available from outside the cluster.
See next slide if you need hints!
---
## Hints
*Scroll one slide at a time to see hints.*
--
- For each component, we need to create a deployment and a service
--
- Deployments can be created with `kubectl create deployment`
--
- Services can be created with `kubectl expose`
--
- Public services (like `web`) need to use a special type
(e.g. `NodePort`)

View File

@@ -1,77 +0,0 @@
# Exercise — writing YAML
Let's write YAML manifests for the wordsmith app!
It can be a single YAML file or multiple files in a directory.
See next slides for testing instructions and hints.
---
## How to test our YAML
If `XYZ` is that YAML file (or directory with YAML files), we should be able to:
1. Create a new namespace, e.g. `foo123`
2. Deploy wordsmith with a single command
(e.g. `kubectl apply --namespace foo123 -f XYZ`)
3. Find out the connection information for `web`
(e.g. `kubectl get service web --namespace`)
4. Connect to it and see the wordsmith app
See next slide for hints.
---
## Strategies
There are at least three methods to write our YAML.
1. Dump the YAML of existing wordsmith deployments and services.
(we can dump YAML with `kubectl get -o yaml ...`)
2. Adapt existing YAML (from the docs or dockercoins).
(for reference, kubercoins is at https://github.com/jpetazzo/kubercoins)
3. Write it entirely from scratch.
See next slide for more hints.
---
## Adapting YAML
*Scroll one slide at a time to see hints.*
--
One option is to start with the YAML from kubercoins.
(see https://github.com/jpetazzo/kubercoins)
--
Adapt the YAML of a deployment (e.g. worker) to run "web".
--
We need to change the name, labels, selectors, and image.
--
Then adapt the YAML of a service (e.g. webui).
--
We need to change the name, labels, selectors, possibly port number.
--
Repeat for the other components.

View File

@@ -1,157 +0,0 @@
# Interconnecting clusters
- We assigned different Cluster CIDRs to each cluster
- This allows us to connect our clusters together
- We will leverage kube-router BGP abilities for that
- We will *peer* each kube-router instance with a *route reflector*
- As a result, we will be able to ping each other's pods
---
## Disclaimers
- There are many methods to interconnect clusters
- Depending on your network implementation, you will use different methods
- The method shown here only works for nodes with direct layer 2 connection
- We will often need to use tunnels or other network techniques
---
## The plan
- Someone will start the *route reflector*
(typically, that will be the person presenting these slides!)
- We will update our kube-router configuration
- We will add a *peering* with the route reflector
(instructing kube-router to connect to it and exchange route information)
- We should see the routes to other clusters on our nodes
(in the output of e.g. `route -n` or `ip route show`)
- We should be able to ping pods of other nodes
---
## Starting the route reflector
- Only do this slide if you are doing this on your own
- There is a Compose file in the `compose/frr-route-reflector` directory
- Before continuing, make sure that you have the IP address of the route reflector
---
## Configuring kube-router
- This can be done in two ways:
- with command-line flags to the `kube-router` process
- with annotations to Node objects
- We will use the command-line flags
(because it will automatically propagate to all nodes)
.footnote[Note: with Calico, this is achieved by creating a BGPPeer CRD.]
---
## Updating kube-router configuration
- We need to pass two command-line flags to the kube-router process
.exercise[
- Edit the `kuberouter.yaml` file
- Add the following flags to the kube-router arguments:
```
- "--peer-router-ips=`X.X.X.X`"
- "--peer-router-asns=64512"
```
(Replace `X.X.X.X` with the route reflector address)
- Update the DaemonSet definition:
```bash
kubectl apply -f kuberouter.yaml
```
]
---
## Restarting kube-router
- The DaemonSet will not update the pods automatically
(it is using the default `updateStrategy`, which is `OnDelete`)
- We will therefore delete the pods
(they will be recreated with the updated definition)
.exercise[
- Delete all the kube-router pods:
```bash
kubectl delete pods -n kube-system -l k8s-app=kube-router
```
]
Note: the other `updateStrategy` for a DaemonSet is RollingUpdate.
<br/>
For critical services, we might want to precisely control the update process.
---
## Checking peering status
- We can see informative messages in the output of kube-router:
```
time="2019-04-07T15:53:56Z" level=info msg="Peer Up"
Key=X.X.X.X State=BGP_FSM_OPENCONFIRM Topic=Peer
```
- We should see the routes of the other clusters show up
- For debugging purposes, the reflector also exports a route to 1.0.0.2/32
- That route will show up like this:
```
1.0.0.2 172.31.X.Y 255.255.255.255 UGH 0 0 0 eth0
```
- We should be able to ping the pods of other clusters!
---
## If we wanted to do more ...
- kube-router can also export ClusterIP addresses
(by adding the flag `--advertise-cluster-ip`)
- They are exported individually (as /32)
- This would allow us to easily access other clusters' services
(without having to resolve the individual addresses of pods)
- Even better if it's combined with DNS integration
(to facilitate name → ClusterIP resolution)

34
slides/k8s/kaniko.md Normal file
View File

@@ -0,0 +1,34 @@
## Privileged container
- Running privileged container could be really harmful for the node it run on.
- Getting control of a node could expose other containers in the cluster and the cluster itself
- It's even worse when it is docker that run in this privileged container
- `docker build` doesn't allow to run privileged container for building layer
- nothing forbid to run `docker run --privileged`
---
## Kaniko
- https://github.com/GoogleContainerTools/kaniko
- *kaniko doesn't depend on a Docker daemon and executes each command
within a Dockerfile completely in userspace*
- Kaniko is only a build system, there is no runtime like docker does
- generates OCI compatible image, so could be run on Docker or other CRI
- use a different cache system than Docker
---
## Rootless docker and rootless buildkit
- This is experimental
- Have a lot of requirement of kernel param, options to set
- But it exists

View File

@@ -115,22 +115,6 @@ The output is a list of available API routes.
---
## OpenAPI (fka Swagger)
- The Kubernetes API serves an OpenAPI Specification
(OpenAPI was formerly known as Swagger)
- OpenAPI has many advantages
(generate client library code, generate test code ...)
- For us, this means we can explore the API with [Swagger UI](https://swagger.io/tools/swagger-ui/)
(for instance with the [Swagger UI add-on for Firefox](https://addons.mozilla.org/en-US/firefox/addon/swagger-ui-ff/))
---
## `kubectl proxy` is intended for local use
- By default, the proxy listens on port 8001
@@ -151,8 +135,6 @@ The output is a list of available API routes.
---
class: extra-details
## Running `kubectl proxy` on a remote machine
- If we wanted to connect to the proxy from another machine, we would need to:
@@ -170,8 +152,6 @@ class: extra-details
---
class: extra-details
## Security considerations
- Running `kubectl proxy` openly is a huge security risk

View File

@@ -0,0 +1,78 @@
# Security and kubernetes
There are many mechanisms in kubernetes to ensure the security.
Obviously the more you constrain your app, the better.
There is also mechanism to forbid "unsafe" application to be launched on
kubernetes, but that's more for ops-guys 😈 (more on that next days)
Let's focus on what can we do on the developer latop, to make app
compatible with secure system, enforced or not (it's always a good practice)
---
## No container in privileged mode
- risks:
- If one privileged container get compromised,
we basically get full access to the node from within a container
(not need to tamper auth logs, alter binary).
- Sniffing networks allow often to get access to the entire cluster.
- how to avoid:
```
[...]
spec:
containers:
- name: foo
securityContext:
privileged: false
```
Luckily that's the default !
---
## No container run as "root"
- risks:
- bind mounting a directory like /usr/bin allow to change node system core
</br>ex: copy a tampered version of "ping", wait for an admin to login
and to issue a ping command and bingo !
- how to avoid:
```
[...]
spec:
containers:
- name: foo
securityContext:
runAsUser: 1000
runAsGroup: 100
```
- The default is to use the image default
- If your writing your own Dockerfile, don't forget about the `USER` instruction
---
## Capabilities
- You can give capabilities one-by-one to a container
- It's useful if you need more capabilities (for some reason), but not grating 'root' privileged
- risks: no risks whatsoever, except by granting a big list of capabilities
- how to use:
```
[...]
spec:
containers:
- name: foo
securityContext:
capabilities:
add: ["NET_ADMIN", "SYS_TIME"]
drop: []
```
The default use the container runtime defaults
- and we can also drop default capabilities granted by the container runtime !

View File

@@ -1,4 +1,4 @@
# Last words
# What's next?
- Congratulations!
@@ -189,20 +189,6 @@ are a few tools that can help us.*
---
## Developer experience
- How do we on-board a new developer?
- What do they need to install to get a dev stack?
- How does a code change make it from dev to prod?
- How does someone add a component to a stack?
*These questions are good "sanity checks" to validate our strategy!*
---
## Some guidelines
- Start small

View File

@@ -64,19 +64,3 @@ If it shows our nodes and their CPU and memory load, we're good!
]
- We can also use selectors (`-l app=...`)
---
## Other tools
- kube-capacity is a great CLI tool to view resources
(https://github.com/robscott/kube-capacity)
- It can show resource and limits, and compare them with usage
- It can show utilization per node, or per pod
- kube-resource-report can generate HTML reports
(https://github.com/hjacobs/kube-resource-report)

179
slides/k8s/on-desktop.md Normal file
View File

@@ -0,0 +1,179 @@
# Development Workflow
In this section we will see how to set up a local development workflow.
We will list multiple options.
Keep in mind that we don't have to use *all* these tools!
It's up to the developer to find what best suits them.
---
## What does it mean to develop on Kubernetes ?
In theory, the generic workflow is:
1. Make changes to our code or edit a Dockerfile
2. Build a new Docker image with a new tag
3. Push that Docker image to a registry
4. Update the YAML or templates referencing that Docker image
<br/>(e.g. of the corresponding Deployment, StatefulSet, Job ...)
5. Apply the YAML or templates
6. Are we satisfied with the result?
<br/>No → go back to step 1 (or step 4 if the image is OK)
<br/>Yes → commit and push our changes to source control
---
## A few quirks
In practice, there are some details that make this workflow more complex.
- We need a Docker container registry to store our images
<br/>
(for Open Source projects, a free Docker Hub account works fine)
- We need to set image tags properly, hopefully automatically
- If we decide to use a fixed tag (like `:latest`) instead:
- we need to specify `imagePullPolicy=Always` to force image pull
- we need to trigger a rollout when we want to deploy a new image
<br/>(with `kubectl rollout restart` or by killing the running pods)
- We need a fast internet connection to push the images
- We need to regularly clean up the registry to avoid accumulating old images
---
## When developing locally
- If we work with a local cluster, pushes and pulls are much faster
- Even better, with a one-node cluster, most of these problems disappear
- If we build and run the images on the same node, ...
- we don't need to push images
- we don't need a fast internet connection
- we don't need a registry
- we can use bind mounts to edit code locally and make changes available immediately in running containers
- This means that it is much simpler to deploy to local development environment (like Minikube, Docker Desktop ...) than to a "real" cluster
---
## Minikube
- Start a VM with the hypervisor of your choice: VirtualBox, kvm, Hyper-V ...
- Well supported by the Kubernetes community
- Lot of addons
- Easy cleanup: delete the VM with `minikube delete`
- Bind mounts depend on the underlying hypervisor
(they may require additionnal setup)
---
## Docker Desktop
- Available for Mac and Windows
- Start a VM with the appropriate hypervisor (even better!)
- Bind mounts work out of the box
```yaml
volumes:
- name: repo_dir
hostPath:
path: /C/Users/Enix/my_code_repository
```
- Ingress and other addons need to be installed manually
---
## Kind
- Kubernetes-in-Docker
- Uses Docker-in-Docker to run Kubernetes
<br/>
(technically, it's more like Containerd-in-Docker)
- We don't get a real Docker Engine (and cannot build Dockerfiles)
- Single-node by default, but multi-node clusters are possible
- Very convenient to test Kubernetes deployments when only Docker is available
<br/>
(e.g. on public CI services like Travis, Circle, GitHub Actions ...)
- Bind mounts require extra configuration
- Extra configuration for a couple of addons, totally custom for other
- Doesn't work with BTRFS (sorry BTRFS users😢)
---
## microk8s
- Distribution of Kubernetes using Snap
(Snap is a container-like method to install software)
- Available on Ubuntu and derivatives
- Bind mounts work natively (but require extra setup if we run in a VM)
- Big list of addons; easy to install
---
## Proper tooling
The simple workflow seems to be:
- set up a one-node cluster with one of the methods mentioned previously,
- find the remote Docker endpoint,
- configure the `DOCKER_HOST` variable to use that endpoint,
- follow the previous 7-step workflow.
Can we do better?
---
## Helpers
- Skaffold (https://skaffold.dev/):
- build with docker, kaniko, google builder
- install with pure yaml manifests, kustomize, helm
- Tilt (https://tilt.dev/)
- Tiltfile is programmatic format (python ?)
- Primitive for building with docker
- Primitive for deploying with pure yaml manifests, kustomize, helm
- Garden (https://garden.io/)
- Forge (https://forge.sh/)

View File

@@ -0,0 +1,84 @@
# OpenTelemetry
*OpenTelemetry* is a "tracing" framework.
It's a fusion of two other frameworks:
*OpenTracing* and *OpenCensus*.
Its goal is to provide deep integration with programming languages and
application frameworks to enabled deep dive tracing of different events accross different components.
---
## Span ! span ! span !
- A unit of tracing is called a *span*
- A span has: a start time, a stop time, and an ID
- It represents an action that took some time to complete
(e.g.: function call, database transaction, REST API call ...)
- A span can have a parent span, and can have multiple child spans
(e.g.: when calling function `B`, sub-calls to `C` and `D` were issued)
- Think of it as a "tree" of calls
---
## Distributed tracing
- When two components interact, their spans can be connected together
- Example: microservice `A` sends a REST API call to microservice `B`
- `A` will have a span for the call to `B`
- `B` will have a span for the call from `A`
<br/>(that normally starts shortly after, and finishes shortly before)
- the span of `A` will be the parent of the span of `B`
- they join the same "tree" of calls
<!-- FIXME the thing below? -->
details: `A` will send headers (depends of the protocol used) to tag the span ID,
so that `B` can generate child span and joining the same tree of call
---
## Centrally stored
- What do we do with all these spans?
- We store them!
- In the previous exemple:
- `A` will send trace information to its local agent
- `B` will do the same
- every span will end up in the same DB
- at a later point, we can reconstruct the "tree" of call and analyze it
- There are multiple implementations of this stack (agent + DB + web UI)
(the most famous open source ones are Zipkin and Jaeger)
---
## Data sampling
- Do we store *all* the spans?
(it looks like this could need a lot of storage!)
- No, we can use *sampling*, to reduce storage and network requirements
- Smart sampling is applied directly in the application to save CPU if span is not needed
- It also insures that if a span is marked as sampled, all child span are sampled as well
(so that the tree of call is complete)

View File

@@ -0,0 +1,150 @@
# Prometheus
Prometheus is a monitoring system with a small storage I/O footprint.
It's quite ubiquitous in the Kubernetes world.
This section is not an in-depth description of Prometheus.
*Note: More on Prometheus next day!*
<!--
FIXME maybe just use prometheus.md and add this file after it?
This way there is not need to write a Prom intro.
-->
---
## Prometheus exporter
- Prometheus *scrapes* (pulls) metrics from *exporters*
- A Prometheus exporter is an HTTP endpoint serving a response like this one:
```
# HELP http_requests_total The total number of HTTP requests.
# TYPE http_requests_total counter
http_requests_total{method="post",code="200"} 1027 1395066363000
http_requests_total{method="post",code="400"} 3 1395066363000
# Minimalistic line:
metric_without_timestamp_and_labels 12.47
```
- Our goal, as a developer, will be to expose such an endpoint to Prometheus
---
## Implementing a Prometheus exporter
Multiple strategies can be used:
- Implement the exporter in the application itself
(especially if it's already an HTTP server)
- Use building blocks that may already expose such an endpoint
(puma, uwsgi)
- Add a sidecar exporter that leverages and adapts an existing monitoring channel
(e.g. JMX for Java applications)
---
## Implementing a Prometheus exporter
- The Prometheus client libraries are often the easiest solution
- They offer multiple ways of integration, including:
- "I'm already running a web server, just add a monitoring route"
- "I don't have a web server (or I want another one), please run one in a thread"
- Client libraries for various languages:
- https://github.com/prometheus/client_python
- https://github.com/prometheus/client_ruby
- https://github.com/prometheus/client_golang
(Can you see the pattern?)
---
## Adding a sidecar exporter
- There are many exporters available already:
https://prometheus.io/docs/instrumenting/exporters/
- These are "translators" from one monitoring channel to another
- Writing your own is not complicated
(using the client libraries mentioned previously)
- Avoid exposing the internal monitoring channel more than enough
(the app and its sidecars run in the same network namespace,
<br/>so they can communicate over `localhost`)
---
## Configuring the Prometheus server
- We need to tell the Prometheus server to *scrape* our exporter
- Prometheus has a very flexible "service discovery" mechanism
(to discover and enumerate the targets that it should scrape)
- Depending on how we installed Prometheus, various methods might be available
---
## Configuring Prometheus, option 1
- Edit `prometheus.conf`
- Always possible
(we should always have a Prometheus configuration file somewhere!)
- Dangerous and error-prone
(if we get it wrong, it is very easy to break Prometheus)
- Hard to maintain
(the file will grow over time, and might accumulate obsolete information)
---
## Configuring Prometheus, option 2
- Add *annotations* to the pods or services to monitor
- We can do that if Prometheus is installed with the official Helm chart
- Prometheus will detect these annotations and automatically start scraping
- Example:
```yaml
annotations:
prometheus.io/port: 9090
prometheus.io/path: /metrics
```
---
## Configuring Prometheus, option 3
- Create a ServiceMonitor custom resource
- We can do that if we are using the CoreOS Prometheus operator
- See the [Prometheus operator documentation](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) for more details

99
slides/k8s/registries.md Normal file
View File

@@ -0,0 +1,99 @@
# Registries
- There are lots of options to ship our container images to a registry
- We can group them depending on some characteristics:
- SaaS or self-hosted
- with or without a build system
---
## Docker registry
- Self-hosted and [open source](https://github.com/docker/distribution)
- Runs in a single Docker container
- Supports multiple storage backends
- Supports basic authentication out of the box
- [Other authentication schemes](https://docs.docker.com/registry/deploying/#more-advanced-authentication) through proxy or delegation
- No build system
- To run it with the Docker engine:
```shell
docker run -d -p 5000:5000 --name registry registry:2
```
- Or use the dedicated plugin in minikube, microk8s, etc.
---
## Harbor
- Self-hostend and [open source](https://github.com/goharbor/harbor)
- Supports both Docker images and Helm charts
- Advanced authentification mechanism
- Multi-site synchronisation
- Vulnerability scanning
- No build system
- To run it with Helm:
```shell
helm repo add harbor https://helm.goharbor.io
helm install my-release harbor/harbor
```
---
## Gitlab
- Available both as a SaaS product and self-hosted
- SaaS product is free for open source projects; paid subscription otherwise
- Some parts are [open source](https://gitlab.com/gitlab-org/gitlab-foss/)
- Integrated CI
- No build system (but a custom build system can be hooked to the CI)
- To run it with Helm:
```shell
helm repo add gitlab https://charts.gitlab.io/
helm install gitlab gitlab/gitlab
```
---
## Docker Hub
- SaaS product: [hub.docker.com](https://hub.docker.com)
- Free for public image; paid subscription for private ones
- Build system included
---
## Quay
- Available both as a SaaS product (Quay) and self-hosted ([quay.io](https://quay.io))
- SaaS product is free for public repositories; paid subscription otherwise
- Some components of Quay and quay.io are open source
(see [Project Quay](https://www.projectquay.io/) and the [announcement](https://www.redhat.com/en/blog/red-hat-introduces-open-source-project-quay-container-registry))
- Build system included

View File

@@ -80,6 +80,7 @@
- Rolling updates can be monitored with the `kubectl rollout` subcommand
---
class: hide-exercise
## Rolling out the new `worker` service
@@ -109,6 +110,7 @@
That rollout should be pretty quick. What shows in the web UI?
---
class: hide-exercise
## Give it some time
@@ -131,6 +133,7 @@ That rollout should be pretty quick. What shows in the web UI?
(The grace period is 30 seconds, but [can be changed](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods) if needed)
---
class: hide-exercise
## Rolling out something invalid
@@ -148,10 +151,10 @@ That rollout should be pretty quick. What shows in the web UI?
kubectl rollout status deploy worker
```
<!--
/<!--
```wait Waiting for deployment```
```key ^C```
-->
/-->
]
@@ -162,6 +165,7 @@ Our rollout is stuck. However, the app is not dead.
(After a minute, it will stabilize to be 20-25% slower.)
---
class: hide-exercise
## What's going on with our rollout?
@@ -202,6 +206,7 @@ class: extra-details
- Our rollout is stuck at this point!
---
class: hide-exercise
## Checking the dashboard during the bad rollout
@@ -218,6 +223,7 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
]
---
class: hide-exercise
## Recovering from a bad rollout
@@ -240,6 +246,7 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
]
---
class: hide-exercise
## Rolling back to an older version
@@ -250,6 +257,7 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
- How can we get back to the previous version?
---
class: hide-exercise
## Multiple "undos"
@@ -269,6 +277,7 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
🤔 That didn't work.
---
class: hide-exercise
## Multiple "undos" don't work
@@ -291,6 +300,8 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
---
class: hide-exercise
## In this specific scenario
- Our version numbers are easy to guess
@@ -301,6 +312,8 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
---
class: hide-exercise
## Listing versions
- We can list successive versions of a Deployment with `kubectl rollout history`
@@ -321,6 +334,7 @@ We might see something like 1, 4, 5.
(Depending on how many "undos" we did before.)
---
class: hide-exercise
## Explaining deployment revisions
@@ -340,6 +354,7 @@ We might see something like 1, 4, 5.
---
class: extra-details
class: hide-exercise
## What about the missing revisions?
@@ -354,6 +369,7 @@ class: extra-details
(if we wanted to!)
---
class: hide-exercise
## Rolling back to an older version
@@ -373,6 +389,7 @@ class: extra-details
---
class: extra-details
class: hide-exercise
## Changing rollout parameters
@@ -380,7 +397,7 @@ class: extra-details
- revert to `v0.1`
- be conservative on availability (always have desired number of available workers)
- go slow on rollout speed (update only one pod at a time)
- go slow on rollout speed (update only one pod at a time)
- give some time to our workers to "warm up" before starting more
The corresponding changes can be expressed in the following YAML snippet:
@@ -404,6 +421,7 @@ spec:
---
class: extra-details
class: hide-exercise
## Applying changes through a YAML patch
@@ -434,6 +452,6 @@ class: extra-details
kubectl get deploy -o json worker |
jq "{name:.metadata.name} + .spec.strategy.rollingUpdate"
```
]
]
]

View File

@@ -0,0 +1,72 @@
# sealed-secrets
- https://github.com/bitnami-labs/sealed-secrets
- has a server side (standard kubernetes deployment) and a client side *kubeseal* binary
- server-side start by generating a key pair, keep the private, expose the public.
- To create a sealed-secret, you only need access to public key
- You can enforce access with RBAC rules of kubernetes
---
## sealed-secrets how to
- adding a secret: *kubeseal* will cipher it with the public key
- server side controller will re-create original secret, when the ciphered one are added to the cluster
- it makes it "safe" to add those secret to your source tree
- since version 0.9 key rotation are enable by default, so remember to backup private keys regularly.
</br> (or you won't be able to decrypt all you keys, in a case of *disaster recovery*)
---
## First "sealed-secret"
.exercise[
- Install *kubeseal*
```bash
wget https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.9.7/kubeseal-linux-amd64 -O kubeseal
sudo install -m 755 kubeseal /usr/local/bin/kubeseal
```
- Install controller
```bash
helm install -n kube-system sealed-secrets-controller stable/sealed-secrets
```
- Create a secret you don't want to leak
```bash
kubectl create secret generic --from-literal=foo=bar my-secret -o yaml --dry-run \
| kubeseal > mysecret.yaml
kubectl apply -f mysecret.yaml
```
]
---
## Alternative: sops / git crypt
- You can work a VCS level (ie totally abstracted from kubernetess)
- sops (https://github.com/mozilla/sops), VCS agnostic, encrypt portion of files
- git-crypt that work with git to transparently encrypt (some) files in git
---
## Other alternative
- You can delegate secret management to another component like *hashicorp vault*
- Can work in multiple ways:
- encrypt secret from API-server (instead of the much secure *base64*)
- encrypt secret before sending it in kubernetes (avoid git in plain text)
- manager secret entirely in vault and expose to the container via volume

View File

@@ -0,0 +1,15 @@
## Software development
From years, decades, (centuries !), software development has followed the same principles:
- Development
- Testing
- Packaging
- Shipping
- Deployment
We will see how this map to Kubernetes world.

View File

@@ -144,23 +144,158 @@ spec:
---
## Individual volumes
## Persistent Volume Claims
- The Pods of a Stateful set can have individual volumes
- To abstract the different types of storage, a pod can use a special volume type
(i.e. in a Stateful set with 3 replicas, there will be 3 volumes)
- This type is a *Persistent Volume Claim*
- These volumes can be either:
- A Persistent Volume Claim (PVC) is a resource type
- allocated from a pool of pre-existing volumes (disks, partitions ...)
(visible with `kubectl get persistentvolumeclaims` or `kubectl get pvc`)
- created dynamically using a storage system
- A PVC is not a volume; it is a *request for a volume*
- This introduces a bunch of new Kubernetes resource types:
---
Persistent Volumes, Persistent Volume Claims, Storage Classes
## Persistent Volume Claims in practice
(and also `volumeClaimTemplates`, that appear within Stateful Set manifests!)
- Using a Persistent Volume Claim is a two-step process:
- creating the claim
- using the claim in a pod (as if it were any other kind of volume)
- A PVC starts by being Unbound (without an associated volume)
- Once it is associated with a Persistent Volume, it becomes Bound
- A Pod referring an unbound PVC will not start
(but as soon as the PVC is bound, the Pod can start)
---
## Binding PV and PVC
- A Kubernetes controller continuously watches PV and PVC objects
- When it notices an unbound PVC, it tries to find a satisfactory PV
("satisfactory" in terms of size and other characteristics; see next slide)
- If no PV fits the PVC, a PV can be created dynamically
(this requires to configure a *dynamic provisioner*, more on that later)
- Otherwise, the PVC remains unbound indefinitely
(until we manually create a PV or setup dynamic provisioning)
---
## What's in a Persistent Volume Claim?
- At the very least, the claim should indicate:
- the size of the volume (e.g. "5 GiB")
- the access mode (e.g. "read-write by a single pod")
- Optionally, it can also specify a Storage Class
- The Storage Class indicates:
- which storage system to use (e.g. Portworx, EBS...)
- extra parameters for that storage system
e.g.: "replicate the data 3 times, and use SSD media"
---
## What's a Storage Class?
- A Storage Class is yet another Kubernetes API resource
(visible with e.g. `kubectl get storageclass` or `kubectl get sc`)
- It indicates which *provisioner* to use
(which controller will create the actual volume)
- And arbitrary parameters for that provisioner
(replication levels, type of disk ... anything relevant!)
- Storage Classes are required if we want to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
(but we can also create volumes manually, and ignore Storage Classes)
---
## Defining a Persistent Volume Claim
Here is a minimal PVC:
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: my-claim
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
```
---
## Using a Persistent Volume Claim
Here is a Pod definition like the ones shown earlier, but using a PVC:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-using-a-claim
spec:
containers:
- image: ...
name: container-using-a-claim
volumeMounts:
- mountPath: /my-vol
name: my-volume
volumes:
- name: my-volume
persistentVolumeClaim:
claimName: my-claim
```
---
## Persistent Volume Claims and Stateful sets
- The pods in a stateful set can define a `volumeClaimTemplate`
- A `volumeClaimTemplate` will dynamically create one Persistent Volume Claim per pod
- Each pod will therefore have its own volume
- These volumes are numbered (like the pods)
- When updating the stateful set (e.g. image upgrade), each pod keeps its volume
- When pods get rescheduled (e.g. node failure), they keep their volume
(this requires a storage system that is not node-local)
- These volumes are not automatically deleted
(when the stateful set is scaled down or deleted)
---
@@ -306,9 +441,11 @@ nodes and encryption of gossip traffic) were removed for simplicity.
## Caveats
- We aren't using actual persistence yet
- We haven't used a `volumeClaimTemplate` here
(no `volumeClaimTemplate`, Persistent Volume, etc.)
- That's because we don't have a storage provider yet
(except if you're running this on your own and your cluster has one)
- What happens if we lose a pod?
@@ -337,266 +474,3 @@ nodes and encryption of gossip traffic) were removed for simplicity.
- we lose all the data (ouch)
- If we run Consul without persistent storage, backups are a good idea!
---
# Persistent Volumes Claims
- Our Pods can use a special volume type: a *Persistent Volume Claim*
- A Persistent Volume Claim (PVC) is also a Kubernetes resource
(visible with `kubectl get persistentvolumeclaims` or `kubectl get pvc`)
- A PVC is not a volume; it is a *request for a volume*
- It should indicate at least:
- the size of the volume (e.g. "5 GiB")
- the access mode (e.g. "read-write by a single pod")
---
## What's in a PVC?
- A PVC contains at least:
- a list of *access modes* (ReadWriteOnce, ReadOnlyMany, ReadWriteMany)
- a size (interpreted as the minimal storage space needed)
- It can also contain optional elements:
- a selector (to restrict which actual volumes it can use)
- a *storage class* (used by dynamic provisioning, more on that later)
---
## What does a PVC look like?
Here is a manifest for a basic PVC:
```yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: my-claim
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
```
---
## Using a Persistent Volume Claim
Here is a Pod definition like the ones shown earlier, but using a PVC:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-using-a-claim
spec:
containers:
- image: ...
name: container-using-a-claim
volumeMounts:
- mountPath: /my-vol
name: my-volume
volumes:
- name: my-volume
persistentVolumeClaim:
claimName: my-claim
```
---
## Creating and using Persistent Volume Claims
- PVCs can be created manually and used explicitly
(as shown on the previous slides)
- They can also be created and used through Stateful Sets
(this will be shown later)
---
## Lifecycle of Persistent Volume Claims
- When a PVC is created, it starts existing in "Unbound" state
(without an associated volume)
- A Pod referencing an unbound PVC will not start
(the scheduler will wait until the PVC is bound to place it)
- A special controller continuously monitors PVCs to associate them with PVs
- If no PV is available, one must be created:
- manually (by operator intervention)
- using a *dynamic provisioner* (more on that later)
---
class: extra-details
## Which PV gets associated to a PVC?
- The PV must satisfy the PVC constraints
(access mode, size, optional selector, optional storage class)
- The PVs with the closest access mode are picked
- Then the PVs with the closest size
- It is possible to specify a `claimRef` when creating a PV
(this will associate it to the specified PVC, but only if the PV satisfies all the requirements of the PVC; otherwise another PV might end up being picked)
- For all the details about the PersistentVolumeClaimBinder, check [this doc](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/persistent-storage.md#matching-and-binding)
---
## Persistent Volume Claims and Stateful sets
- A Stateful set can define one (or more) `volumeClaimTemplate`
- Each `volumeClaimTemplate` will create one Persistent Volume Claim per pod
- Each pod will therefore have its own individual volume
- These volumes are numbered (like the pods)
- Example:
- a Stateful set is named `db`
- it is scaled to replicas
- it has a `volumeClaimTemplate` named `data`
- then it will create pods `db-0`, `db-1`, `db-2`
- these pods will have volumes named `data-db-0`, `data-db-1`, `data-db-2`
---
## Persistent Volume Claims are sticky
- When updating the stateful set (e.g. image upgrade), each pod keeps its volume
- When pods get rescheduled (e.g. node failure), they keep their volume
(this requires a storage system that is not node-local)
- These volumes are not automatically deleted
(when the stateful set is scaled down or deleted)
- If a stateful set is scaled back up later, the pods get their data back
---
## Dynamic provisioners
- A *dynamic provisioner* monitors unbound PVCs
- It can create volumes (and the corresponding PV) on the fly
- This requires the PVCs to have a *storage class*
(annotation `volume.beta.kubernetes.io/storage-provisioner`)
- A dynamic provisioner only acts on PVCs with the right storage class
(it ignores the other ones)
- Just like `LoadBalancer` services, dynamic provisioners are optional
(i.e. our cluster may or may not have one pre-installed)
---
## What's a Storage Class?
- A Storage Class is yet another Kubernetes API resource
(visible with e.g. `kubectl get storageclass` or `kubectl get sc`)
- It indicates which *provisioner* to use
(which controller will create the actual volume)
- And arbitrary parameters for that provisioner
(replication levels, type of disk ... anything relevant!)
- Storage Classes are required if we want to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
(but we can also create volumes manually, and ignore Storage Classes)
---
## The default storage class
- At most one storage class can be marked as the default class
(by annotating it with `storageclass.kubernetes.io/is-default-class=true`)
- When a PVC is created, it will be annotated with the default storage class
(unless it specifies an explicit storage class)
- This only happens at PVC creation
(existing PVCs are not updated when we mark a class as the default one)
---
## Dynamic provisioning setup
This is how we can achieve fully automated provisioning of persistent storage.
1. Configure a storage system.
(It needs to have an API, or be capable of automated provisioning of volumes.)
2. Install a dynamic provisioner for this storage system.
(This is some specific controller code.)
3. Create a Storage Class for this system.
(It has to match what the dynamic provisioner is expecting.)
4. Annotate the Storage Class to be the default one.
---
## Dynamic provisioning usage
After setting up the system (previous slide), all we need to do is:
*Create a Stateful Set that makes use of a `volumeClaimTemplate`.*
This will trigger the following actions.
1. The Stateful Set creates PVCs according to the `volumeClaimTemplate`.
2. The Stateful Set creates Pods using these PVCs.
3. The PVCs are automatically annotated with our Storage Class.
4. The dynamic provisioner provisions volumes and creates the corresponding PVs.
5. The PersistentVolumeClaimBinder associates the PVs and the PVCs together.
6. PVCs are now bound, the Pods can start.

17
slides/k8s/stop-manual.md Normal file
View File

@@ -0,0 +1,17 @@
# Automation && CI/CD
What we've done so far:
- development of our application
- manual testing, and exploration of automated testing strategies
- packaging in a container image
- shipping that image to a registry
What still need to be done:
- deployment of our application
- automation of the whole build / ship / run cycle

82
slides/k8s/testing.md Normal file
View File

@@ -0,0 +1,82 @@
# Testing
There are multiple levels of testing:
- unit testing (many small tests that run in isolation),
- integration testing (bigger tests involving multiple components),
- functional or end-to-end testing (even bigger tests involving the whole app).
In this section, we will focus on *unit testing*, where each test case
should (ideally) be completely isolated from other components and system
interaction: no real database, no real backend, *mocks* everywhere.
(For a good discussion on the merits of unit testing, we can read
[Just Say No to More End-to-End Tests](https://testing.googleblog.com/2015/04/just-say-no-to-more-end-to-end-tests.html).)
Unfortunately, this ideal scenario is easier said than done ...
---
## Multi-stage build
```dockerfile
FROM <baseimage>
RUN <install dependencies>
COPY <code>
RUN <build code>
RUN <install test dependencies>
COPY <test data sets and fixtures>
RUN <unit tests>
FROM <baseimage>
RUN <install dependencies>
COPY <code>
RUN <build code>
CMD, EXPOSE ...
```
- This leverages the Docker cache: if the code doesn't change, the tests don't need to run
- If the tests require a database or other backend, we can use `docker build --network`
- If the tests fail, the build fails; and no image is generated
---
## Docker Compose
```yaml
version: 3
service:
project:
image: my_image_name
build:
context: .
target: dev
database:
image: redis
backend:
image: backend
```
+
```shell
docker-compose build && docker-compose run project pytest -v
```
---
## Skaffold/Container-structure-test
- The `test` field of the `skaffold.yaml` instructs skaffold to run test against your image.
- It uses the [container-structure-test](https://github.com/GoogleContainerTools/container-structure-test)
- It allows to run custom commands
- Unfortunately, nothing to run other Docker images
(to start a database or a backend that we need to run tests)

View File

@@ -209,37 +209,19 @@ And *then* it is time to look at orchestration!
## Managing stack deployments
- Applications are made of many resources
- The best deployment tool will vary, depending on:
(Deployments, Services, and much more)
- the size and complexity of your stack(s)
- how often you change it (i.e. add/remove components)
- the size and skills of your team
- We need to automate the creation / update / management of these resources
- A few examples:
- There is no "absolute best" tool or method; it depends on:
- the size and complexity of our stack(s)
- how often we change it (i.e. add/remove components)
- the size and skills of our team
---
## A few tools to manage stacks
- Shell scripts invoking `kubectl`
- YAML resource manifests committed to a repo
- [Kustomize](https://github.com/kubernetes-sigs/kustomize)
(YAML manifests + patches applied on top)
- [Helm](https://github.com/kubernetes/helm)
(YAML manifests + templating engine)
- [Spinnaker](https://www.spinnaker.io/)
(Netflix' CD platform)
- [Brigade](https://brigade.sh/)
(event-driven scripting; no YAML)
- shell scripts invoking `kubectl`
- YAML resources descriptions committed to a repo
- [Helm](https://github.com/kubernetes/helm) (~package manager)
- [Spinnaker](https://www.spinnaker.io/) (Netflix' CD platform)
- [Brigade](https://brigade.sh/) (event-driven scripting; no YAML)
---
@@ -278,3 +260,17 @@ Sorry Star Trek fans, this is not the federation you're looking for!
- Synchronize resources across clusters
- Discover resources across clusters
---
## Developer experience
*We've put this last, but it's pretty important!*
- How do you on-board a new developer?
- What do they need to install to get a dev stack?
- How does a code change make it from dev to prod?
- How does someone add a component to a stack?

View File

@@ -140,27 +140,3 @@ We can safely ignore them.)
- Our version control system now has a full history of what we deploy
- Compares to "Infrastructure-as-Code", but for app deployments
---
class: extra-details
## Specifying the namespace
- When creating resources from YAML manifests, the namespace is optional
- If we specify a namespace:
- resources are created in the specified namespace
- this is typical for things deployed only once per cluster
- example: system components, cluster add-ons ...
- If we don't specify a namespace:
- resources are created in the current namespace
- this is typical for things that may be deployed multiple times
- example: applications (production, staging, feature branches ...)

View File

@@ -1,15 +1,17 @@
## Intros
- Hello!
- Hello! We are:
- The workshop will run from 9am to 5pm
- .emoji[🐳] Jérôme Petazzoni ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
- There will be a lunch break
- .emoji[☸️] Julien Girardin ([Zempashi](https://github.com/zempashi), Enix SAS)
(And coffee breaks!)
- The training will run from 9am to 5:30pm (with lunch and coffee breaks)
- For lunch, we'll invite you at [Chameleon, 70 Rue René Boulanger](https://goo.gl/maps/h2XjmJN5weDSUios8)
(please let us know if you'll eat on your own)
- Feel free to interrupt for questions at any time
- *Especially when you see full screen container pictures!*
- Live feedback, questions, help: @@CHAT@@

7
slides/menu.html Normal file
View File

@@ -0,0 +1,7 @@
<ul>
<li><a href="1.yml.html">Jour 1</a></li>
<li><a href="2.yml.html">Jour 2</a></li>
<li><a href="3.yml.html">Jour 3</a></li>
<li><a href="4.yml.html">Jour 4</a></li>
<li><a href="5.yml.html">Jour 5</a></li>
</ul>

View File

@@ -1,154 +0,0 @@
title: |
Docker
&
Kubernetes
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/jpetazzo/training-20200225-seattle)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2020-02-outreach.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
- # DAY 1
#- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
#- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
#- containers/Start_And_Attach.md
- containers/Naming_And_Inspecting.md
#- containers/Labels.md
- containers/Getting_Inside.md
- containers/Initial_Images.md
-
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
-
- containers/Container_Networking_Basics.md
#- containers/Network_Drivers.md
#- containers/Container_Network_Model.md
- containers/Local_Development_Workflow.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
-
- containers/Multi_Stage_Builds.md
#- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
- # DAY 2
- shared/prereqs.md
- shared/webssh.md
- shared/connecting.md
#- k8s/versions-k8s.md
- shared/sampleapp.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
-
- k8s/kubectlrun.md
- k8s/logs-cli.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/kubenet.md
- k8s/kubectlexpose.md
-
- k8s/shippingimages.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
- k8s/exercise-wordsmith.md
- k8s/yamldeploy.md
#- k8s/setup-k8s.md
#- k8s/dashboard.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
-
- k8s/daemonset.md
#- k8s/dryrun.md
- k8s/namespaces.md
- k8s/exercise-yaml.md
- k8s/localkubeconfig.md
- k8s/accessinternal.md
#- k8s/kubectlproxy.md
- k8s/ingress.md
- # DAY 3
- k8s/dashboard.md
- k8s/rollout.md
- k8s/healthchecks.md
#- k8s/healthchecks-more.md
#- k8s/record.md
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
#- k8s/horizontal-pod-autoscaler.md
-
- k8s/authn-authz.md
- k8s/logs-centralized.md
- k8s/prometheus.md
-
- k8s/volumes.md
#- k8s/build-with-docker.md
#- k8s/build-with-kaniko.md
- k8s/configuration.md
- k8s/exercise-configmap.md
-
- k8s/statefulsets.md
- k8s/local-persistent-volumes.md
- k8s/portworx.md
-
- k8s/whatsnext.md
- k8s/lastwords.md
- k8s/links.md
- shared/thankyou.md
-
- "# (Extra: containers) \n"
- containers/Docker_Machine.md
- containers/Advanced_Dockerfiles.md
- containers/Init_Systems.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Windows_Containers.md
-
- "# (Extra: containers internals) \n"
- containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
- containers/Containers_From_Scratch.md
- containers/Container_Engines.md
- containers/Pods_Anatomy.md
-
- "# (Extra: Helm) \n"
#- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
- k8s/exercise-helm.md
- k8s/helm-create-better-chart.md
- k8s/helm-secrets.md
-
- "# (Extra: Kubernetes operators) \n"
- k8s/extending-api.md
- k8s/operators.md
- k8s/operators-design.md
-
- "# (Extra: Kubernetes security) \n"
- k8s/csr-api.md
- k8s/openid-connect.md
- k8s/podsecuritypolicy.md
- k8s/netpol.md

View File

@@ -11,10 +11,8 @@ class: title, in-person
@@TITLE@@<br/></br>
.footnote[
WiFi: **Outreach**<br/>
Password: **Winter9378**
**WiFi: CONFERENCE**<br/>
**Mot de passe: 123conference**
**Slides[:](
https://www.youtube.com/watch?v=h16zyxiwDLY
) @@SLIDES@@**
**Slides[:](https://www.youtube.com/watch?v=h16zyxiwDLY) @@SLIDES@@**
]