mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-03-02 01:10:20 +00:00
Compare commits
3 Commits
2020-02-ou
...
2020-01-ca
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b1b942b21 | ||
|
|
0f046ed78c | ||
|
|
c5ed86c92b |
@@ -9,21 +9,21 @@ services:
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
image: k8s.gcr.io/etcd:3.3.10
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
|
||||
"Edit the CLUSTER placeholder first. Then, remove this line.":
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
|
||||
@@ -12,6 +12,7 @@ metadata:
|
||||
name: kube-router-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: kube-router
|
||||
data:
|
||||
cni-conf.json: |
|
||||
@@ -31,21 +32,20 @@ data:
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-router
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
|
||||
@@ -9,20 +9,20 @@ services:
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
image: k8s.gcr.io/etcd:3.3.10
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-controller-manager --master http://localhost:8080
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- taste
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: arabica
|
||||
spec:
|
||||
taste: strong
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: robusta
|
||||
spec:
|
||||
taste: stronger
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: liberica
|
||||
spec:
|
||||
taste: smoky
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: excelsa
|
||||
spec:
|
||||
taste: fruity
|
||||
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
|
||||
@@ -113,12 +113,9 @@ _cmd_disabledocker() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo systemctl disable docker.service
|
||||
sudo systemctl disable docker.socket
|
||||
sudo systemctl stop docker
|
||||
sudo killall containerd
|
||||
"
|
||||
pssh "sudo systemctl disable docker.service"
|
||||
pssh "sudo systemctl disable docker.socket"
|
||||
pssh "sudo systemctl stop docker"
|
||||
}
|
||||
|
||||
_cmd kubebins "Install Kubernetes and CNI binaries but don't start anything"
|
||||
@@ -130,15 +127,18 @@ _cmd_kubebins() {
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.3/etcd-v3.4.3-linux-amd64.tar.gz \
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.15/etcd-v3.3.15-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
##VERSION##
|
||||
curl -L https://dl.k8s.io/v1.17.2/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx \
|
||||
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
|
||||
curl -L https://dl.k8s.io/v1.16.2/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
|
||||
fi
|
||||
if ! [ -x kubelet ]; then
|
||||
for BINARY in kubectl kube-apiserver kube-scheduler kube-controller-manager kubelet kube-proxy;
|
||||
do
|
||||
sudo ln -s hyperkube \$BINARY
|
||||
done
|
||||
fi
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
@@ -249,7 +249,6 @@ EOF"
|
||||
# Install ship
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ship ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
@@ -257,7 +256,7 @@ EOF"
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
@@ -1,24 +1,9 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
retry () {
|
||||
N=$1
|
||||
I=0
|
||||
shift
|
||||
|
||||
while ! "$@"; do
|
||||
I=$(($I+1))
|
||||
if [ $I -gt $N ]; then
|
||||
echo "FAILED, ABORTING"
|
||||
exit 1
|
||||
fi
|
||||
echo "FAILED, RETRYING ($I/$N)"
|
||||
done
|
||||
}
|
||||
|
||||
export AWS_INSTANCE_TYPE=t3a.small
|
||||
|
||||
INFRA=infra/aws-eu-west-3
|
||||
INFRA=infra/aws-us-west-2
|
||||
|
||||
STUDENTS=2
|
||||
|
||||
@@ -32,9 +17,9 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $STUDENTS
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl disabledocker $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl disabledocker $TAG
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kubenet
|
||||
@@ -45,9 +30,9 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kuberouter
|
||||
@@ -58,9 +43,9 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
#INFRA=infra/aws-us-west-1
|
||||
@@ -75,6 +60,7 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kube $TAG 1.15.9
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kube $TAG 1.14.6
|
||||
./workshopctl cards $TAG
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
# Uncomment and/or edit one of the the following lines if necessary.
|
||||
#/ /kube-halfday.yml.html 200
|
||||
#/ /kube-fullday.yml.html 200
|
||||
#/ /kube-twodays.yml.html 200
|
||||
/ /outreach.yml.html 200!
|
||||
/ /kube.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
@@ -20,7 +20,7 @@ The control plane can run:
|
||||
|
||||
- in containers, on the same nodes that run other application workloads
|
||||
|
||||
(example: [Minikube](https://github.com/kubernetes/minikube); 1 node runs everything, [kind](https://kind.sigs.k8s.io/))
|
||||
(example: Minikube; 1 node runs everything)
|
||||
|
||||
- on a dedicated node
|
||||
|
||||
@@ -28,7 +28,7 @@ The control plane can run:
|
||||
|
||||
- on a dedicated set of nodes
|
||||
|
||||
(example: [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way); [kops](https://github.com/kubernetes/kops))
|
||||
(example: Kubernetes The Hard Way; kops)
|
||||
|
||||
- outside of the cluster
|
||||
|
||||
|
||||
@@ -360,7 +360,3 @@ docker run --rm --net host -v $PWD:/vol \
|
||||
- [kube-backup](https://github.com/pieterlange/kube-backup)
|
||||
|
||||
simple scripts to save resource YAML to a git repository
|
||||
|
||||
- [bivac](https://github.com/camptocamp/bivac)
|
||||
|
||||
Backup Interface for Volumes Attached to Containers
|
||||
|
||||
@@ -154,7 +154,7 @@ class: extra-details
|
||||
|
||||
- "Running Kubernetes without nodes"
|
||||
|
||||
- Systems like [Virtual Kubelet](https://virtual-kubelet.io/) or [Kiyot](https://static.elotl.co/docs/latest/kiyot/kiyot.html) can run pods using on-demand resources
|
||||
- Systems like [Virtual Kubelet](https://virtual-kubelet.io/) or Kiyot can run pods using on-demand resources
|
||||
|
||||
- Virtual Kubelet can leverage e.g. ACI or Fargate to run pods
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
|
||||
## What version are we running anyway?
|
||||
|
||||
- When I say, "I'm running Kubernetes 1.15", is that the version of:
|
||||
- When I say, "I'm running Kubernetes 1.11", is that the version of:
|
||||
|
||||
- kubectl
|
||||
|
||||
@@ -139,73 +139,6 @@
|
||||
|
||||
---
|
||||
|
||||
## Important questions
|
||||
|
||||
- Should we upgrade the control plane before or after the kubelets?
|
||||
|
||||
- Within the control plane, should we upgrade the API server first or last?
|
||||
|
||||
- How often should we upgrade?
|
||||
|
||||
- How long are versions maintained?
|
||||
|
||||
- All the answers are in [the documentation about version skew policy](https://kubernetes.io/docs/setup/release/version-skew-policy/)!
|
||||
|
||||
- Let's review the key elements together ...
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes uses semantic versioning
|
||||
|
||||
- Kubernetes versions look like MAJOR.MINOR.PATCH; e.g. in 1.17.2:
|
||||
|
||||
- MAJOR = 1
|
||||
- MINOR = 17
|
||||
- PATCH = 2
|
||||
|
||||
- It's always possible to mix and match different PATCH releases
|
||||
|
||||
(e.g. 1.16.1 and 1.16.6 are compatible)
|
||||
|
||||
- It is recommended to run the latest PATCH release
|
||||
|
||||
(but it's mandatory only when there is a security advisory)
|
||||
|
||||
---
|
||||
|
||||
## Version skew
|
||||
|
||||
- API server must be more recent than its clients (kubelet and control plane)
|
||||
|
||||
- ... Which means it must always be upgraded first
|
||||
|
||||
- All components support a difference of one¹ MINOR version
|
||||
|
||||
- This allows live upgrades (since we can mix e.g. 1.15 and 1.16)
|
||||
|
||||
- It also means that going from 1.14 to 1.16 requires going through 1.15
|
||||
|
||||
.footnote[¹Except kubelet, which can be up to two MINOR behind API server,
|
||||
and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
|
||||
---
|
||||
|
||||
## Release cycle
|
||||
|
||||
- There is a new PATCH relese whenever necessary
|
||||
|
||||
(every few weeks, or "ASAP" when there is a security vulnerability)
|
||||
|
||||
- There is a new MINOR release every 3 months (approximately)
|
||||
|
||||
- At any given time, three MINOR releases are maintained
|
||||
|
||||
- ... Which means that MINOR releases are maintained approximately 9 months
|
||||
|
||||
- We should expect to upgrade at least every 3 months (on average)
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- We are going to update a few cluster components
|
||||
@@ -218,6 +151,47 @@ and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
|
||||
---
|
||||
|
||||
## Updating kubelet
|
||||
|
||||
- These nodes have been installed using the official Kubernetes packages
|
||||
|
||||
- We can therefore use `apt` or `apt-get`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into node `test3`
|
||||
|
||||
- View available versions for package `kubelet`:
|
||||
```bash
|
||||
apt show kubelet -a | grep ^Version
|
||||
```
|
||||
|
||||
- Upgrade kubelet:
|
||||
```bash
|
||||
sudo apt install kubelet=1.15.3-00
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into node `test1`
|
||||
|
||||
- Check node versions:
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
```
|
||||
|
||||
- Create a deployment and scale it to make sure that the node still works
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating the API server
|
||||
|
||||
- This cluster has been deployed with kubeadm
|
||||
@@ -254,7 +228,7 @@ and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
```
|
||||
|
||||
- Look for the `image:` line, and update it to e.g. `v1.16.0`
|
||||
- Look for the `image:` line, and update it to e.g. `v1.15.0`
|
||||
|
||||
]
|
||||
|
||||
@@ -275,27 +249,9 @@ and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
|
||||
---
|
||||
|
||||
## Was that a good idea?
|
||||
|
||||
--
|
||||
|
||||
**No!**
|
||||
|
||||
--
|
||||
|
||||
- Remember the guideline we gave earlier:
|
||||
|
||||
*To update a component, use whatever was used to install it.*
|
||||
|
||||
- This control plane was deployed with kubeadm
|
||||
|
||||
- We should use kubeadm to upgrade it!
|
||||
|
||||
---
|
||||
|
||||
## Updating the whole control plane
|
||||
|
||||
- Let's make it right, and use kubeadm to upgrade the entire control plane
|
||||
- As an example, we'll use kubeadm to upgrade the entire control plane
|
||||
|
||||
(note: this is possible only because the cluster was installed with kubeadm)
|
||||
|
||||
@@ -308,11 +264,11 @@ and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
|
||||
]
|
||||
|
||||
Note 1: kubeadm thinks that our cluster is running 1.16.0.
|
||||
Note 1: kubeadm thinks that our cluster is running 1.15.0.
|
||||
<br/>It is confused by our manual upgrade of the API server!
|
||||
|
||||
Note 2: kubeadm itself is still version 1.15.9.
|
||||
<br/>It doesn't know how to upgrade do 1.16.X.
|
||||
Note 2: kubeadm itself is still version 1.14.6.
|
||||
<br/>It doesn't know how to upgrade do 1.15.X.
|
||||
|
||||
---
|
||||
|
||||
@@ -334,39 +290,8 @@ Note 2: kubeadm itself is still version 1.15.9.
|
||||
|
||||
]
|
||||
|
||||
Problem: kubeadm doesn't know know how to handle
|
||||
upgrades from version 1.15.
|
||||
|
||||
This is because we installed version 1.17 (or even later).
|
||||
|
||||
We need to install kubeadm version 1.16.X.
|
||||
|
||||
---
|
||||
|
||||
## Downgrading kubeadm
|
||||
|
||||
- We need to go back to version 1.16.X (e.g. 1.16.6)
|
||||
|
||||
.exercise[
|
||||
|
||||
- View available versions for package `kubeadm`:
|
||||
```bash
|
||||
apt show kubeadm -a | grep ^Version | grep 1.16
|
||||
```
|
||||
|
||||
- Downgrade kubeadm:
|
||||
```
|
||||
sudo apt install kubeadm=1.16.6-00
|
||||
```
|
||||
|
||||
- Check what kubeadm tells us:
|
||||
```
|
||||
sudo kubeadm upgrade plan
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
kubeadm should now agree to upgrade to 1.16.6.
|
||||
Note: kubeadm still thinks that our cluster is running 1.15.0.
|
||||
<br/>But at least it knows about version 1.15.X now.
|
||||
|
||||
---
|
||||
|
||||
@@ -382,91 +307,28 @@ kubeadm should now agree to upgrade to 1.16.6.
|
||||
|
||||
- Perform the upgrade:
|
||||
```bash
|
||||
sudo kubeadm upgrade apply v1.16.6
|
||||
sudo kubeadm upgrade apply v1.15.3
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating kubelet
|
||||
## Updating kubelets
|
||||
|
||||
- These nodes have been installed using the official Kubernetes packages
|
||||
- After updating the control plane, we need to update each kubelet
|
||||
|
||||
- We can therefore use `apt` or `apt-get`
|
||||
- This requires to run a special command on each node, to download the config
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into node `test3`
|
||||
|
||||
- View available versions for package `kubelet`:
|
||||
```bash
|
||||
apt show kubelet -a | grep ^Version
|
||||
```
|
||||
|
||||
- Upgrade kubelet:
|
||||
```bash
|
||||
sudo apt install kubelet=1.16.6-00
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into node `test1`
|
||||
|
||||
- Check node versions:
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
```
|
||||
|
||||
- Create a deployment and scale it to make sure that the node still works
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Was that a good idea?
|
||||
|
||||
--
|
||||
|
||||
**Almost!**
|
||||
|
||||
--
|
||||
|
||||
- Yes, kubelet was installed with distribution packages
|
||||
|
||||
- However, kubeadm took care of configuring kubelet
|
||||
|
||||
(when doing `kubeadm join ...`)
|
||||
|
||||
- We were supposed to run a special command *before* upgrading kubelet!
|
||||
|
||||
- That command should be executed on each node
|
||||
|
||||
- It will download the kubelet configuration generated by kubeadm
|
||||
|
||||
---
|
||||
|
||||
## Upgrading kubelet the right way
|
||||
|
||||
- We need to upgrade kubeadm, upgrade kubelet config, then upgrade kubelet
|
||||
|
||||
(after upgrading the control plane)
|
||||
(this config is generated by kubeadm)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Download the configuration on each node, and upgrade kubelet:
|
||||
```bash
|
||||
for N in 1 2 3; do
|
||||
ssh test$N "
|
||||
sudo apt install kubeadm=1.16.6-00 &&
|
||||
sudo kubeadm upgrade node &&
|
||||
sudo apt install kubelet=1.16.6-00"
|
||||
ssh test$N sudo kubeadm upgrade node config --kubelet-version v1.15.3
|
||||
ssh test$N sudo apt install kubelet=1.15.3-00
|
||||
done
|
||||
```
|
||||
]
|
||||
@@ -475,7 +337,7 @@ kubeadm should now agree to upgrade to 1.16.6.
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- All our nodes should now be updated to version 1.16.6
|
||||
- All our nodes should now be updated to version 1.15.3
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -492,12 +354,12 @@ class: extra-details
|
||||
|
||||
## Skipping versions
|
||||
|
||||
- This example worked because we went from 1.15 to 1.16
|
||||
- This example worked because we went from 1.14 to 1.15
|
||||
|
||||
- If you are upgrading from e.g. 1.14, you will have to go through 1.15 first
|
||||
- If you are upgrading from e.g. 1.13, you will generally have to go through 1.14 first
|
||||
|
||||
- This means upgrading kubeadm to 1.15.X, then using it to upgrade the cluster
|
||||
- This means upgrading kubeadm to 1.14.X, then using it to upgrade the cluster
|
||||
|
||||
- Then upgrading kubeadm to 1.16.X, etc.
|
||||
- Then upgrading kubeadm to 1.15.X, etc.
|
||||
|
||||
- **Make sure to read the release notes before upgrading!**
|
||||
|
||||
@@ -162,8 +162,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's BGP?
|
||||
|
||||
- BGP (Border Gateway Protocol) is the protocol used between internet routers
|
||||
@@ -222,22 +220,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Checking the CNI configuration
|
||||
|
||||
- By default, kubelet gets the CNI configuration from `/etc/cni/net.d`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the content of `/etc/cni/net.d`
|
||||
|
||||
]
|
||||
|
||||
(On most machines, at this point, `/etc/cni/net.d` doesn't even exist).)
|
||||
|
||||
---
|
||||
|
||||
## Our control plane
|
||||
|
||||
- We will use a Compose file to start the control plane
|
||||
@@ -376,26 +358,6 @@ Note: the DaemonSet won't create any pods (yet) since there are no nodes (yet).
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Checking the CNI configuration
|
||||
|
||||
- At this point, kuberouter should have installed its CNI configuration
|
||||
|
||||
(in `/etc/cni/net.d`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the content of `/etc/cni/net.d`
|
||||
|
||||
]
|
||||
|
||||
- There should be a file created by kuberouter
|
||||
|
||||
- The file should contain the node's podCIDR
|
||||
|
||||
---
|
||||
|
||||
## Setting up a test
|
||||
|
||||
- Let's create a Deployment and expose it with a Service
|
||||
@@ -443,8 +405,6 @@ This shows that we are using IPVS (vs. iptables, which picked random endpoints).
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- What if we need to check that everything is working properly?
|
||||
@@ -468,8 +428,6 @@ We should see the local pod CIDR connected to `kube-bridge`, and the other nodes
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## More troubleshooting
|
||||
|
||||
- We can also look at the output of the kube-router pods
|
||||
@@ -486,8 +444,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Trying `kubectl logs` / `kubectl exec`
|
||||
|
||||
.exercise[
|
||||
@@ -513,8 +469,6 @@ What does that mean?
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Internal name resolution
|
||||
|
||||
- To execute these commands, the API server needs to connect to kubelet
|
||||
@@ -531,8 +485,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Another way to check the logs
|
||||
|
||||
- We can also ask the logs directly to the container engine
|
||||
@@ -574,3 +526,163 @@ done
|
||||
- This could be useful for embedded platforms with very limited resources
|
||||
|
||||
(or lab environments for learning purposes)
|
||||
|
||||
---
|
||||
|
||||
# Interconnecting clusters
|
||||
|
||||
- We assigned different Cluster CIDRs to each cluster
|
||||
|
||||
- This allows us to connect our clusters together
|
||||
|
||||
- We will leverage kube-router BGP abilities for that
|
||||
|
||||
- We will *peer* each kube-router instance with a *route reflector*
|
||||
|
||||
- As a result, we will be able to ping each other's pods
|
||||
|
||||
---
|
||||
|
||||
## Disclaimers
|
||||
|
||||
- There are many methods to interconnect clusters
|
||||
|
||||
- Depending on your network implementation, you will use different methods
|
||||
|
||||
- The method shown here only works for nodes with direct layer 2 connection
|
||||
|
||||
- We will often need to use tunnels or other network techniques
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
|
||||
- Someone will start the *route reflector*
|
||||
|
||||
(typically, that will be the person presenting these slides!)
|
||||
|
||||
- We will update our kube-router configuration
|
||||
|
||||
- We will add a *peering* with the route reflector
|
||||
|
||||
(instructing kube-router to connect to it and exchange route information)
|
||||
|
||||
- We should see the routes to other clusters on our nodes
|
||||
|
||||
(in the output of e.g. `route -n` or `ip route show`)
|
||||
|
||||
- We should be able to ping pods of other nodes
|
||||
|
||||
---
|
||||
|
||||
## Starting the route reflector
|
||||
|
||||
- Only do this slide if you are doing this on your own
|
||||
|
||||
- There is a Compose file in the `compose/frr-route-reflector` directory
|
||||
|
||||
- Before continuing, make sure that you have the IP address of the route reflector
|
||||
|
||||
---
|
||||
|
||||
## Configuring kube-router
|
||||
|
||||
- This can be done in two ways:
|
||||
|
||||
- with command-line flags to the `kube-router` process
|
||||
|
||||
- with annotations to Node objects
|
||||
|
||||
- We will use the command-line flags
|
||||
|
||||
(because it will automatically propagate to all nodes)
|
||||
|
||||
.footnote[Note: with Calico, this is achieved by creating a BGPPeer CRD.]
|
||||
|
||||
---
|
||||
|
||||
## Updating kube-router configuration
|
||||
|
||||
- We need to pass two command-line flags to the kube-router process
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the `kuberouter.yaml` file
|
||||
|
||||
- Add the following flags to the kube-router arguments:
|
||||
```
|
||||
- "--peer-router-ips=`X.X.X.X`"
|
||||
- "--peer-router-asns=64512"
|
||||
```
|
||||
(Replace `X.X.X.X` with the route reflector address)
|
||||
|
||||
- Update the DaemonSet definition:
|
||||
```bash
|
||||
kubectl apply -f kuberouter.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Restarting kube-router
|
||||
|
||||
- The DaemonSet will not update the pods automatically
|
||||
|
||||
(it is using the default `updateStrategy`, which is `OnDelete`)
|
||||
|
||||
- We will therefore delete the pods
|
||||
|
||||
(they will be recreated with the updated definition)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Delete all the kube-router pods:
|
||||
```bash
|
||||
kubectl delete pods -n kube-system -l k8s-app=kube-router
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: the other `updateStrategy` for a DaemonSet is RollingUpdate.
|
||||
<br/>
|
||||
For critical services, we might want to precisely control the update process.
|
||||
|
||||
---
|
||||
|
||||
## Checking peering status
|
||||
|
||||
- We can see informative messages in the output of kube-router:
|
||||
```
|
||||
time="2019-04-07T15:53:56Z" level=info msg="Peer Up"
|
||||
Key=X.X.X.X State=BGP_FSM_OPENCONFIRM Topic=Peer
|
||||
```
|
||||
|
||||
- We should see the routes of the other clusters show up
|
||||
|
||||
- For debugging purposes, the reflector also exports a route to 1.0.0.2/32
|
||||
|
||||
- That route will show up like this:
|
||||
```
|
||||
1.0.0.2 172.31.X.Y 255.255.255.255 UGH 0 0 0 eth0
|
||||
```
|
||||
|
||||
- We should be able to ping the pods of other clusters!
|
||||
|
||||
---
|
||||
|
||||
## If we wanted to do more ...
|
||||
|
||||
- kube-router can also export ClusterIP addresses
|
||||
|
||||
(by adding the flag `--advertise-cluster-ip`)
|
||||
|
||||
- They are exported individually (as /32)
|
||||
|
||||
- This would allow us to easily access other clusters' services
|
||||
|
||||
(without having to resolve the individual addresses of pods)
|
||||
|
||||
- Even better if it's combined with DNS integration
|
||||
|
||||
(to facilitate name → ClusterIP resolution)
|
||||
|
||||
@@ -10,29 +10,6 @@
|
||||
|
||||
---
|
||||
|
||||
## What can we do with Kubernetes?
|
||||
|
||||
- Let's imagine that we have a 3-tier e-commerce app:
|
||||
|
||||
- web frontend
|
||||
|
||||
- API backend
|
||||
|
||||
- database (that we will keep out of Kubernetes for now)
|
||||
|
||||
- We have built images for our frontend and backend components
|
||||
|
||||
(e.g. with Dockerfiles and `docker build`)
|
||||
|
||||
- We are running them successfully with a local environment
|
||||
|
||||
(e.g. with Docker Compose)
|
||||
|
||||
- Let's see how we would deploy our app on Kubernetes!
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Basic things we can ask Kubernetes to do
|
||||
|
||||
--
|
||||
@@ -71,7 +48,7 @@
|
||||
|
||||
(straightforward on CPU; more complex on other metrics)
|
||||
|
||||
- Resource management and scheduling
|
||||
- Ressource management and scheduling
|
||||
|
||||
(reserve CPU/RAM for containers; placement constraints)
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
- Unfortunately, as of Kubernetes 1.17, the CLI cannot create daemon sets
|
||||
- Unfortunately, as of Kubernetes 1.15, the CLI cannot create daemon sets
|
||||
|
||||
--
|
||||
|
||||
@@ -427,7 +427,7 @@ class: extra-details
|
||||
|
||||
- We need to change the selector of the `rng` service!
|
||||
|
||||
- Let's add another label to that selector (e.g. `active=yes`)
|
||||
- Let's add another label to that selector (e.g. `enabled=yes`)
|
||||
|
||||
---
|
||||
|
||||
@@ -445,11 +445,11 @@ class: extra-details
|
||||
|
||||
## The plan
|
||||
|
||||
1. Add the label `active=yes` to all our `rng` pods
|
||||
1. Add the label `enabled=yes` to all our `rng` pods
|
||||
|
||||
2. Update the selector for the `rng` service to also include `active=yes`
|
||||
2. Update the selector for the `rng` service to also include `enabled=yes`
|
||||
|
||||
3. Toggle traffic to a pod by manually adding/removing the `active` label
|
||||
3. Toggle traffic to a pod by manually adding/removing the `enabled` label
|
||||
|
||||
4. Profit!
|
||||
|
||||
@@ -464,7 +464,7 @@ be any interruption.*
|
||||
|
||||
## Adding labels to pods
|
||||
|
||||
- We want to add the label `active=yes` to all pods that have `app=rng`
|
||||
- We want to add the label `enabled=yes` to all pods that have `app=rng`
|
||||
|
||||
- We could edit each pod one by one with `kubectl edit` ...
|
||||
|
||||
@@ -474,9 +474,9 @@ be any interruption.*
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add `active=yes` to all pods that have `app=rng`:
|
||||
- Add `enabled=yes` to all pods that have `app=rng`:
|
||||
```bash
|
||||
kubectl label pods -l app=rng active=yes
|
||||
kubectl label pods -l app=rng enabled=yes
|
||||
```
|
||||
|
||||
]
|
||||
@@ -495,7 +495,7 @@ be any interruption.*
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update the service to add `active: yes` to its selector:
|
||||
- Update the service to add `enabled: yes` to its selector:
|
||||
```bash
|
||||
kubectl edit service rng
|
||||
```
|
||||
@@ -504,7 +504,7 @@ be any interruption.*
|
||||
```wait Please edit the object below```
|
||||
```keys /app: rng```
|
||||
```key ^J```
|
||||
```keys noactive: yes```
|
||||
```keys noenabled: yes```
|
||||
```key ^[``` ]
|
||||
```keys :wq```
|
||||
```key ^J```
|
||||
@@ -530,7 +530,7 @@ be any interruption.*
|
||||
|
||||
- If we want the string `"42"` or the string `"yes"`, we have to quote them
|
||||
|
||||
- So we have to use `active: "yes"`
|
||||
- So we have to use `enabled: "yes"`
|
||||
|
||||
.footnote[For a good laugh: if we had used "ja", "oui", "si" ... as the value, it would have worked!]
|
||||
|
||||
@@ -542,7 +542,7 @@ be any interruption.*
|
||||
|
||||
- Update the YAML manifest of the service
|
||||
|
||||
- Add `active: "yes"` to its selector
|
||||
- Add `enabled: "yes"` to its selector
|
||||
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
@@ -566,7 +566,7 @@ If we did everything correctly, the web UI shouldn't show any change.
|
||||
|
||||
- We want to disable the pod that was created by the deployment
|
||||
|
||||
- All we have to do, is remove the `active` label from that pod
|
||||
- All we have to do, is remove the `enabled` label from that pod
|
||||
|
||||
- To identify that pod, we can use its name
|
||||
|
||||
@@ -600,7 +600,7 @@ If we did everything correctly, the web UI shouldn't show any change.
|
||||
|
||||
- In another window, remove the label from the pod:
|
||||
```bash
|
||||
kubectl label pod -l app=rng,pod-template-hash active-
|
||||
kubectl label pod -l app=rng,pod-template-hash enabled-
|
||||
```
|
||||
(The stream of HTTP logs should stop immediately)
|
||||
|
||||
@@ -623,7 +623,7 @@ class: extra-details
|
||||
|
||||
- If we scale up our cluster by adding new nodes, the daemon set will create more pods
|
||||
|
||||
- These pods won't have the `active=yes` label
|
||||
- These pods won't have the `enabled=yes` label
|
||||
|
||||
- If we want these pods to have that label, we need to edit the daemon set spec
|
||||
|
||||
|
||||
@@ -105,7 +105,23 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
---
|
||||
|
||||
## Security implications of `kubectl apply`
|
||||
## Other dashboards
|
||||
|
||||
- [Kube Web View](https://codeberg.org/hjacobs/kube-web-view)
|
||||
|
||||
- read-only dashboard
|
||||
|
||||
- optimized for "troubleshooting and incident response"
|
||||
|
||||
- see [vision and goals](https://kube-web-view.readthedocs.io/en/latest/vision.html#vision) for details
|
||||
|
||||
- [Kube Ops View](https://github.com/hjacobs/kube-ops-view)
|
||||
|
||||
- "provides a common operational picture for multiple Kubernetes clusters"
|
||||
|
||||
---
|
||||
|
||||
# Security implications of `kubectl apply`
|
||||
|
||||
- When we do `kubectl apply -f <URL>`, we create arbitrary resources
|
||||
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
# Exercise — ConfigMaps
|
||||
|
||||
- In this exercise, we will use a ConfigMap to store static assets
|
||||
|
||||
- While there are some circumstances where this can be useful ...
|
||||
|
||||
- ... It is generally **not** a good idea!
|
||||
|
||||
- Once you've read that warning, check the next slide for instructions :)
|
||||
|
||||
---
|
||||
|
||||
## Exercise — ConfigMaps
|
||||
|
||||
This will use the wordsmith app.
|
||||
|
||||
We want to store the static files (served by `web`) in a ConfigMap.
|
||||
|
||||
1. Transform the `static` directory into a ConfigMap.
|
||||
|
||||
(https://github.com/jpetazzo/wordsmith/tree/master/web/static)
|
||||
|
||||
2. Find out where that `static` directory is located in `web`.
|
||||
|
||||
(for instance, by using `kubectl exec` to investigate)
|
||||
|
||||
3. Update the definition of the `web` Deployment to use the ConfigMap.
|
||||
|
||||
(note: fonts and images will be broken; that's OK)
|
||||
|
||||
4. Make a minor change in the ConfigMap (e.g. change the text color)
|
||||
@@ -1,63 +0,0 @@
|
||||
# Exercise — Helm charts
|
||||
|
||||
Let's write a Helm chart for wordsmith!
|
||||
|
||||
We will need the YAML manifests that we wrote earlier.
|
||||
|
||||
Level 1: create a chart to deploy wordsmith.
|
||||
|
||||
Level 2: make it so that the number of replicas can be set with `--set replicas=X`.
|
||||
|
||||
Level 3: change the colors of the lego bricks.
|
||||
|
||||
(For level 3, fork the repository and use ctr.run to build images.)
|
||||
|
||||
See next slide if you need hints!
|
||||
|
||||
---
|
||||
|
||||
## Hints
|
||||
|
||||
*Scroll one slide at a time to see hints.*
|
||||
|
||||
--
|
||||
|
||||
Use `helm create` to create a new chart.
|
||||
|
||||
--
|
||||
|
||||
Delete the content of the `templates` directory and put your YAML instead.
|
||||
|
||||
--
|
||||
|
||||
Install the resulting chart. Voilà!
|
||||
|
||||
--
|
||||
|
||||
Use `{{ .Values.replicas }}` in the YAML manifest for `words`.
|
||||
|
||||
--
|
||||
|
||||
Also add `replicas: 5` to `values.yaml` to provide a default value.
|
||||
|
||||
---
|
||||
|
||||
## Changing the color
|
||||
|
||||
- Fork the repository
|
||||
|
||||
- Make sure that your fork has valid Dockerfiles
|
||||
|
||||
(or identify a branch that has valid Dockerfiles)
|
||||
|
||||
- Use the following images:
|
||||
|
||||
ctr.run/yourgithubusername/wordsmith/db:branchname
|
||||
|
||||
(replace db with web and words for the other components)
|
||||
|
||||
- Change the images and/or CSS in `web/static`
|
||||
|
||||
- Commit, push, trigger a rolling update
|
||||
|
||||
(`imagePullPolicy` should be `Always`, which is the default)
|
||||
@@ -1,39 +0,0 @@
|
||||
# Exercise — deploying on Kubernetes
|
||||
|
||||
Let's deploy the wordsmith app on Kubernetes!
|
||||
|
||||
As a reminder, we have the following components:
|
||||
|
||||
| Name | Image | Port |
|
||||
|-------|---------------------------------|------|
|
||||
| db | jpetazzo/wordsmith-db:latest | 5432 |
|
||||
| web | jpetazzo/wordsmith-web:latest | 80 |
|
||||
| words | jpetazzo/wordsmith-words:latest | 8080 |
|
||||
|
||||
We need `web` to be available from outside the cluster.
|
||||
|
||||
See next slide if you need hints!
|
||||
|
||||
---
|
||||
|
||||
## Hints
|
||||
|
||||
*Scroll one slide at a time to see hints.*
|
||||
|
||||
--
|
||||
|
||||
- For each component, we need to create a deployment and a service
|
||||
|
||||
--
|
||||
|
||||
- Deployments can be created with `kubectl create deployment`
|
||||
|
||||
--
|
||||
|
||||
- Services can be created with `kubectl expose`
|
||||
|
||||
--
|
||||
|
||||
- Public services (like `web`) need to use a special type
|
||||
|
||||
(e.g. `NodePort`)
|
||||
@@ -1,77 +0,0 @@
|
||||
# Exercise — writing YAML
|
||||
|
||||
Let's write YAML manifests for the wordsmith app!
|
||||
|
||||
It can be a single YAML file or multiple files in a directory.
|
||||
|
||||
See next slides for testing instructions and hints.
|
||||
|
||||
---
|
||||
|
||||
## How to test our YAML
|
||||
|
||||
If `XYZ` is that YAML file (or directory with YAML files), we should be able to:
|
||||
|
||||
1. Create a new namespace, e.g. `foo123`
|
||||
|
||||
2. Deploy wordsmith with a single command
|
||||
|
||||
(e.g. `kubectl apply --namespace foo123 -f XYZ`)
|
||||
|
||||
3. Find out the connection information for `web`
|
||||
|
||||
(e.g. `kubectl get service web --namespace`)
|
||||
|
||||
4. Connect to it and see the wordsmith app
|
||||
|
||||
See next slide for hints.
|
||||
|
||||
---
|
||||
|
||||
## Strategies
|
||||
|
||||
There are at least three methods to write our YAML.
|
||||
|
||||
1. Dump the YAML of existing wordsmith deployments and services.
|
||||
|
||||
(we can dump YAML with `kubectl get -o yaml ...`)
|
||||
|
||||
2. Adapt existing YAML (from the docs or dockercoins).
|
||||
|
||||
(for reference, kubercoins is at https://github.com/jpetazzo/kubercoins)
|
||||
|
||||
3. Write it entirely from scratch.
|
||||
|
||||
See next slide for more hints.
|
||||
|
||||
---
|
||||
|
||||
## Adapting YAML
|
||||
|
||||
*Scroll one slide at a time to see hints.*
|
||||
|
||||
--
|
||||
|
||||
One option is to start with the YAML from kubercoins.
|
||||
|
||||
(see https://github.com/jpetazzo/kubercoins)
|
||||
|
||||
--
|
||||
|
||||
Adapt the YAML of a deployment (e.g. worker) to run "web".
|
||||
|
||||
--
|
||||
|
||||
We need to change the name, labels, selectors, and image.
|
||||
|
||||
--
|
||||
|
||||
Then adapt the YAML of a service (e.g. webui).
|
||||
|
||||
--
|
||||
|
||||
We need to change the name, labels, selectors, possibly port number.
|
||||
|
||||
--
|
||||
|
||||
Repeat for the other components.
|
||||
@@ -8,8 +8,6 @@ We are going to cover:
|
||||
|
||||
- Admission Webhooks
|
||||
|
||||
- The Aggregation Layer
|
||||
|
||||
---
|
||||
|
||||
## Revisiting the API server
|
||||
@@ -48,90 +46,6 @@ We are going to cover:
|
||||
|
||||
---
|
||||
|
||||
## A very simple CRD
|
||||
|
||||
The YAML below describes a very simple CRD representing different kinds of coffee:
|
||||
|
||||
```yaml
|
||||
apiVersion: apiextensions.k8s.io/v1alpha1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Creating a CRD
|
||||
|
||||
- Let's create the Custom Resource Definition for our Coffee resource
|
||||
|
||||
.exercise[
|
||||
|
||||
- Load the CRD:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/coffee-1.yaml
|
||||
```
|
||||
|
||||
- Confirm that it shows up:
|
||||
```bash
|
||||
kubectl get crds
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating custom resources
|
||||
|
||||
The YAML below defines a resource using the CRD that we just created:
|
||||
|
||||
```yaml
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: arabica
|
||||
spec:
|
||||
taste: strong
|
||||
```
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a few types of coffee beans:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/coffees.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Viewing custom resources
|
||||
|
||||
- By default, `kubectl get` only shows name and age of custom resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the coffee beans that we just created:
|
||||
```bash
|
||||
kubectl get coffees
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- We can improve that, but it's outside the scope of this section!
|
||||
|
||||
---
|
||||
|
||||
## What can we do with CRDs?
|
||||
|
||||
There are many possibilities!
|
||||
@@ -151,7 +65,7 @@ There are many possibilities!
|
||||
|
||||
- Replacing built-in types with CRDs
|
||||
|
||||
(see [this lightning talk by Tim Hockin](https://www.youtube.com/watch?v=ji0FWzFwNhA))
|
||||
(see [this lightning talk by Tim Hockin](https://www.youtube.com/watch?v=ji0FWzFwNhA&index=2&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU))
|
||||
|
||||
---
|
||||
|
||||
@@ -167,7 +81,7 @@ There are many possibilities!
|
||||
|
||||
- Generally, when creating a CRD, we also want to run a *controller*
|
||||
|
||||
(otherwise nothing will happen when we create resources of that type)
|
||||
(otherwise nothing will happen when we create resources of that type)
|
||||
|
||||
- The controller will typically *watch* our custom resources
|
||||
|
||||
@@ -181,22 +95,6 @@ Examples:
|
||||
|
||||
---
|
||||
|
||||
## (Ab)using the API server
|
||||
|
||||
- If we need to store something "safely" (as in: in etcd), we can use CRDs
|
||||
|
||||
- This gives us primitives to read/write/list objects (and optionally validate them)
|
||||
|
||||
- The Kubernetes API server can run on its own
|
||||
|
||||
(without the scheduler, controller manager, and kubelets)
|
||||
|
||||
- By loading CRDs, we can have it manage totally different objects
|
||||
|
||||
(unrelated to containers, clusters, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Service catalog
|
||||
|
||||
- *Service catalog* is another extension mechanism
|
||||
@@ -211,7 +109,7 @@ Examples:
|
||||
- ClusterServiceClass
|
||||
- ClusterServicePlan
|
||||
- ServiceInstance
|
||||
- ServiceBinding
|
||||
- ServiceBinding
|
||||
|
||||
- It uses the Open service broker API
|
||||
|
||||
@@ -219,13 +117,17 @@ Examples:
|
||||
|
||||
## Admission controllers
|
||||
|
||||
- Admission controllers are another way to extend the Kubernetes API
|
||||
- When a Pod is created, it is associated with a ServiceAccount
|
||||
|
||||
- Instead of creating new types, admission controllers can transform or vet API requests
|
||||
(even if we did not specify one explicitly)
|
||||
|
||||
- The diagram on the next slide shows the path of an API request
|
||||
- That ServiceAccount was added on the fly by an *admission controller*
|
||||
|
||||
(courtesy of Banzai Cloud)
|
||||
(specifically, a *mutating admission controller*)
|
||||
|
||||
- Admission controllers sit on the API request path
|
||||
|
||||
(see the cool diagram on next slide, courtesy of Banzai Cloud)
|
||||
|
||||
---
|
||||
|
||||
@@ -235,7 +137,7 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
## Types of admission controllers
|
||||
## Admission controllers
|
||||
|
||||
- *Validating* admission controllers can accept/reject the API call
|
||||
|
||||
@@ -249,27 +151,7 @@ class: pic
|
||||
|
||||
(see [documentation](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#what-does-each-admission-controller-do) for a list)
|
||||
|
||||
- We can also dynamically define and register our own
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Some built-in admission controllers
|
||||
|
||||
- ServiceAccount:
|
||||
|
||||
automatically adds a ServiceAccount to Pods that don't explicitly specify one
|
||||
|
||||
- LimitRanger:
|
||||
|
||||
applies resource constraints specified by LimitRange objects when Pods are created
|
||||
|
||||
- NamespaceAutoProvision:
|
||||
|
||||
automatically creates namespaces when an object is created in a non-existent namespace
|
||||
|
||||
*Note: #1 and #2 are enabled by default; #3 is not.*
|
||||
- But we can also define our own!
|
||||
|
||||
---
|
||||
|
||||
@@ -309,25 +191,19 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## The aggregation layer
|
||||
## (Ab)using the API server
|
||||
|
||||
- We can delegate entire parts of the Kubernetes API to external servers
|
||||
- If we need to store something "safely" (as in: in etcd), we can use CRDs
|
||||
|
||||
- This is done by creating APIService resources
|
||||
- This gives us primitives to read/write/list objects (and optionally validate them)
|
||||
|
||||
(check them with `kubectl get apiservices`!)
|
||||
- The Kubernetes API server can run on its own
|
||||
|
||||
- The APIService resource maps a type (kind) and version to an external service
|
||||
(without the scheduler, controller manager, and kubelets)
|
||||
|
||||
- All requests concerning that type are sent (proxied) to the external service
|
||||
- By loading CRDs, we can have it manage totally different objects
|
||||
|
||||
- This allows to have resources like CRDs, but that aren't stored in etcd
|
||||
|
||||
- Example: `metrics-server`
|
||||
|
||||
(storing live metrics in etcd would be extremely inefficient)
|
||||
|
||||
- Requires significantly more work than CRDs!
|
||||
(unrelated to containers, clusters, etc.)
|
||||
|
||||
---
|
||||
|
||||
@@ -342,5 +218,3 @@ class: extra-details
|
||||
- [Built-in Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/)
|
||||
|
||||
- [Dynamic Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/)
|
||||
|
||||
- [Aggregation Layer](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)
|
||||
|
||||
@@ -314,7 +314,7 @@ class: extra-details
|
||||
|
||||
- List all the resources created by this release:
|
||||
```bash
|
||||
kubectl get all --selector=release=java4ever
|
||||
kuectl get all --selector=release=java4ever
|
||||
```
|
||||
|
||||
]
|
||||
@@ -416,4 +416,4 @@ All unspecified values will take the default values defined in the chart.
|
||||
curl localhost:$PORT/sample/
|
||||
```
|
||||
|
||||
]
|
||||
]
|
||||
@@ -65,7 +65,7 @@ Where does that come from?
|
||||
|
||||
- Look for ConfigMaps and Secrets:
|
||||
```bash
|
||||
kubectl get configmaps,secrets
|
||||
kuebectl get configmaps,secrets
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -120,13 +120,19 @@
|
||||
|
||||
- We want our ingress load balancer to be available on port 80
|
||||
|
||||
- The best way to do that would be with a `LoadBalancer` service
|
||||
- We could do that with a `LoadBalancer` service
|
||||
|
||||
... but it requires support from the underlying infrastructure
|
||||
|
||||
- Instead, we are going to use the `hostNetwork` mode on the Traefik pods
|
||||
- We could use pods specifying `hostPort: 80`
|
||||
|
||||
- Let's see what this `hostNetwork` mode is about ...
|
||||
... but with most CNI plugins, this [doesn't work or requires additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
|
||||
|
||||
- We could use a `NodePort` service
|
||||
|
||||
... but that requires [changing the `--service-node-port-range` flag in the API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/)
|
||||
|
||||
- Last resort: the `hostNetwork` mode
|
||||
|
||||
---
|
||||
|
||||
@@ -164,26 +170,6 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Other techniques to expose port 80
|
||||
|
||||
- We could use pods specifying `hostPort: 80`
|
||||
|
||||
... but with most CNI plugins, this [doesn't work or requires additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
|
||||
|
||||
- We could use a `NodePort` service
|
||||
|
||||
... but that requires [changing the `--service-node-port-range` flag in the API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/)
|
||||
|
||||
- We could create a service with an external IP
|
||||
|
||||
... this would work, but would require a few extra steps
|
||||
|
||||
(figuring out the IP address and adding it to the service)
|
||||
|
||||
---
|
||||
|
||||
## Running Traefik
|
||||
|
||||
- The [Traefik documentation](https://docs.traefik.io/user-guide/kubernetes/#deploy-trfik-using-a-deployment-or-daemonset) tells us to pick between Deployment and Daemon Set
|
||||
|
||||
@@ -1,157 +0,0 @@
|
||||
# Interconnecting clusters
|
||||
|
||||
- We assigned different Cluster CIDRs to each cluster
|
||||
|
||||
- This allows us to connect our clusters together
|
||||
|
||||
- We will leverage kube-router BGP abilities for that
|
||||
|
||||
- We will *peer* each kube-router instance with a *route reflector*
|
||||
|
||||
- As a result, we will be able to ping each other's pods
|
||||
|
||||
---
|
||||
|
||||
## Disclaimers
|
||||
|
||||
- There are many methods to interconnect clusters
|
||||
|
||||
- Depending on your network implementation, you will use different methods
|
||||
|
||||
- The method shown here only works for nodes with direct layer 2 connection
|
||||
|
||||
- We will often need to use tunnels or other network techniques
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
|
||||
- Someone will start the *route reflector*
|
||||
|
||||
(typically, that will be the person presenting these slides!)
|
||||
|
||||
- We will update our kube-router configuration
|
||||
|
||||
- We will add a *peering* with the route reflector
|
||||
|
||||
(instructing kube-router to connect to it and exchange route information)
|
||||
|
||||
- We should see the routes to other clusters on our nodes
|
||||
|
||||
(in the output of e.g. `route -n` or `ip route show`)
|
||||
|
||||
- We should be able to ping pods of other nodes
|
||||
|
||||
---
|
||||
|
||||
## Starting the route reflector
|
||||
|
||||
- Only do this slide if you are doing this on your own
|
||||
|
||||
- There is a Compose file in the `compose/frr-route-reflector` directory
|
||||
|
||||
- Before continuing, make sure that you have the IP address of the route reflector
|
||||
|
||||
---
|
||||
|
||||
## Configuring kube-router
|
||||
|
||||
- This can be done in two ways:
|
||||
|
||||
- with command-line flags to the `kube-router` process
|
||||
|
||||
- with annotations to Node objects
|
||||
|
||||
- We will use the command-line flags
|
||||
|
||||
(because it will automatically propagate to all nodes)
|
||||
|
||||
.footnote[Note: with Calico, this is achieved by creating a BGPPeer CRD.]
|
||||
|
||||
---
|
||||
|
||||
## Updating kube-router configuration
|
||||
|
||||
- We need to pass two command-line flags to the kube-router process
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the `kuberouter.yaml` file
|
||||
|
||||
- Add the following flags to the kube-router arguments:
|
||||
```
|
||||
- "--peer-router-ips=`X.X.X.X`"
|
||||
- "--peer-router-asns=64512"
|
||||
```
|
||||
(Replace `X.X.X.X` with the route reflector address)
|
||||
|
||||
- Update the DaemonSet definition:
|
||||
```bash
|
||||
kubectl apply -f kuberouter.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Restarting kube-router
|
||||
|
||||
- The DaemonSet will not update the pods automatically
|
||||
|
||||
(it is using the default `updateStrategy`, which is `OnDelete`)
|
||||
|
||||
- We will therefore delete the pods
|
||||
|
||||
(they will be recreated with the updated definition)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Delete all the kube-router pods:
|
||||
```bash
|
||||
kubectl delete pods -n kube-system -l k8s-app=kube-router
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: the other `updateStrategy` for a DaemonSet is RollingUpdate.
|
||||
<br/>
|
||||
For critical services, we might want to precisely control the update process.
|
||||
|
||||
---
|
||||
|
||||
## Checking peering status
|
||||
|
||||
- We can see informative messages in the output of kube-router:
|
||||
```
|
||||
time="2019-04-07T15:53:56Z" level=info msg="Peer Up"
|
||||
Key=X.X.X.X State=BGP_FSM_OPENCONFIRM Topic=Peer
|
||||
```
|
||||
|
||||
- We should see the routes of the other clusters show up
|
||||
|
||||
- For debugging purposes, the reflector also exports a route to 1.0.0.2/32
|
||||
|
||||
- That route will show up like this:
|
||||
```
|
||||
1.0.0.2 172.31.X.Y 255.255.255.255 UGH 0 0 0 eth0
|
||||
```
|
||||
|
||||
- We should be able to ping the pods of other clusters!
|
||||
|
||||
---
|
||||
|
||||
## If we wanted to do more ...
|
||||
|
||||
- kube-router can also export ClusterIP addresses
|
||||
|
||||
(by adding the flag `--advertise-cluster-ip`)
|
||||
|
||||
- They are exported individually (as /32)
|
||||
|
||||
- This would allow us to easily access other clusters' services
|
||||
|
||||
(without having to resolve the individual addresses of pods)
|
||||
|
||||
- Even better if it's combined with DNS integration
|
||||
|
||||
(to facilitate name → ClusterIP resolution)
|
||||
@@ -1,76 +1,20 @@
|
||||
# Exposing containers
|
||||
|
||||
- We can connect to our pods using their IP address
|
||||
- `kubectl expose` creates a *service* for existing pods
|
||||
|
||||
- Then we need to figure out a lot of things:
|
||||
- A *service* is a stable address for a pod (or a bunch of pods)
|
||||
|
||||
- how do we look up the IP address of the pod(s)?
|
||||
- If we want to connect to our pod(s), we need to create a *service*
|
||||
|
||||
- how do we connect from outside the cluster?
|
||||
- Once a service is created, CoreDNS will allow us to resolve it by name
|
||||
|
||||
- how do we load balance traffic?
|
||||
(i.e. after creating service `hello`, the name `hello` will resolve to something)
|
||||
|
||||
- what if a pod fails?
|
||||
|
||||
- Kubernetes has a resource type named *Service*
|
||||
|
||||
- Services address all these questions!
|
||||
|
||||
---
|
||||
|
||||
## Services in a nutshell
|
||||
|
||||
- Services give us a *stable endpoint* to connect to a pod or a group of pods
|
||||
|
||||
- An easy way to create a service is to use `kubectl expose`
|
||||
|
||||
- If we have a deployment named `my-little-deploy`, we can run:
|
||||
|
||||
`kubectl expose deployment my-little-deploy --port=80`
|
||||
|
||||
... and this will create a service with the same name (`my-little-deploy`)
|
||||
|
||||
- Services are automatically added to an internal DNS zone
|
||||
|
||||
(in the example above, our code can now connect to http://my-little-deploy/)
|
||||
|
||||
---
|
||||
|
||||
## Advantages of services
|
||||
|
||||
- We don't need to look up the IP address of the pod(s)
|
||||
|
||||
(we resolve the IP address of the service using DNS)
|
||||
|
||||
- There are multiple service types; some of them allow external traffic
|
||||
|
||||
(e.g. `LoadBalancer` and `NodePort`)
|
||||
|
||||
- Services provide load balancing
|
||||
|
||||
(for both internal and external traffic)
|
||||
|
||||
- Service addresses are independent from pods' addresses
|
||||
|
||||
(when a pod fails, the service seamlessly sends traffic to its replacement)
|
||||
|
||||
---
|
||||
|
||||
## Many kinds and flavors of service
|
||||
|
||||
- There are different types of services:
|
||||
- There are different types of services, detailed on the following slides:
|
||||
|
||||
`ClusterIP`, `NodePort`, `LoadBalancer`, `ExternalName`
|
||||
|
||||
- There are also *headless services*
|
||||
|
||||
- Services can also have optional *external IPs*
|
||||
|
||||
- There is also another resource type called *Ingress*
|
||||
|
||||
(specifically for HTTP services)
|
||||
|
||||
- Wow, that's a lot! Let's start with the basics ...
|
||||
- HTTP services can also use `Ingress` resources (more on that later)
|
||||
|
||||
---
|
||||
|
||||
@@ -129,6 +73,24 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `ExternalName`
|
||||
|
||||
- No load balancer (internal or external) is created
|
||||
|
||||
- Only a DNS entry gets added to the DNS managed by Kubernetes
|
||||
|
||||
- That DNS entry will just be a `CNAME` to a provided record
|
||||
|
||||
Example:
|
||||
```bash
|
||||
kubectl create service externalname k8s --external-name kubernetes.io
|
||||
```
|
||||
*Creates a CNAME `k8s` pointing to `kubernetes.io`*
|
||||
|
||||
---
|
||||
|
||||
## Running containers with open ports
|
||||
|
||||
- Since `ping` doesn't have anything to connect to, we'll have to run something else
|
||||
@@ -213,7 +175,9 @@
|
||||
|
||||
- As a result: you *have to* indicate the port number for your service
|
||||
|
||||
(with some exceptions, like `ExternalName` or headless services, covered later)
|
||||
- Running services with arbitrary port (or port ranges) requires hacks
|
||||
|
||||
(e.g. host networking mode)
|
||||
|
||||
---
|
||||
|
||||
@@ -254,48 +218,7 @@ Try it a few times! Our requests are load balanced across multiple pods.
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `ExternalName`
|
||||
|
||||
- Services of type `ExternalName` are quite different
|
||||
|
||||
- No load balancer (internal or external) is created
|
||||
|
||||
- Only a DNS entry gets added to the DNS managed by Kubernetes
|
||||
|
||||
- That DNS entry will just be a `CNAME` to a provided record
|
||||
|
||||
Example:
|
||||
```bash
|
||||
kubectl create service externalname k8s --external-name kubernetes.io
|
||||
```
|
||||
*Creates a CNAME `k8s` pointing to `kubernetes.io`*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## External IPs
|
||||
|
||||
- We can add an External IP to a service, e.g.:
|
||||
```bash
|
||||
kubectl expose deploy my-little-deploy --port=80 --external-ip=1.2.3.4
|
||||
```
|
||||
|
||||
- `1.2.3.4` should be the address of one of our nodes
|
||||
|
||||
(it could also be a virtual address, service address, or VIP, shared by multiple nodes)
|
||||
|
||||
- Connections to `1.2.3.4:80` will be sent to our service
|
||||
|
||||
- External IPs will also show up on services of type `LoadBalancer`
|
||||
|
||||
(they will be added automatically by the process provisioning the load balancer)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Headless services
|
||||
## If we don't need a load balancer
|
||||
|
||||
- Sometimes, we want to access our scaled services directly:
|
||||
|
||||
@@ -315,7 +238,7 @@ class: extra-details
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Creating a headless services
|
||||
## Headless services
|
||||
|
||||
- A headless service is obtained by setting the `clusterIP` field to `None`
|
||||
|
||||
@@ -401,32 +324,18 @@ error: the server doesn't have a resource type "endpoint"
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The DNS zone
|
||||
## `ExternalIP`
|
||||
|
||||
- In the `kube-system` namespace, there should be a service named `kube-dns`
|
||||
- When creating a servivce, we can also specify an `ExternalIP`
|
||||
|
||||
- This is the internal DNS server that can resolve service names
|
||||
(this is not a type, but an extra attribute to the service)
|
||||
|
||||
- The default domain name for the service we created is `default.svc.cluster.local`
|
||||
- It will make the service availableon this IP address
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get the IP address of the internal DNS server:
|
||||
```bash
|
||||
IP=$(kubectl -n kube-system get svc kube-dns -o jsonpath={.spec.clusterIP})
|
||||
```
|
||||
|
||||
- Resolve the cluster IP for the `httpenv` service:
|
||||
```bash
|
||||
host httpenv.default.svc.cluster.local $IP
|
||||
```
|
||||
|
||||
]
|
||||
(if the IP address belongs to a node of the cluster)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `Ingress`
|
||||
|
||||
- Ingresses are another type (kind) of resource
|
||||
|
||||
@@ -115,22 +115,6 @@ The output is a list of available API routes.
|
||||
|
||||
---
|
||||
|
||||
## OpenAPI (fka Swagger)
|
||||
|
||||
- The Kubernetes API serves an OpenAPI Specification
|
||||
|
||||
(OpenAPI was formerly known as Swagger)
|
||||
|
||||
- OpenAPI has many advantages
|
||||
|
||||
(generate client library code, generate test code ...)
|
||||
|
||||
- For us, this means we can explore the API with [Swagger UI](https://swagger.io/tools/swagger-ui/)
|
||||
|
||||
(for instance with the [Swagger UI add-on for Firefox](https://addons.mozilla.org/en-US/firefox/addon/swagger-ui-ff/))
|
||||
|
||||
---
|
||||
|
||||
## `kubectl proxy` is intended for local use
|
||||
|
||||
- By default, the proxy listens on port 8001
|
||||
@@ -151,8 +135,6 @@ The output is a list of available API routes.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running `kubectl proxy` on a remote machine
|
||||
|
||||
- If we wanted to connect to the proxy from another machine, we would need to:
|
||||
@@ -170,8 +152,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Security considerations
|
||||
|
||||
- Running `kubectl proxy` openly is a huge security risk
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Last words
|
||||
# What's next?
|
||||
|
||||
- Congratulations!
|
||||
|
||||
@@ -189,20 +189,6 @@ are a few tools that can help us.*
|
||||
|
||||
---
|
||||
|
||||
## Developer experience
|
||||
|
||||
- How do we on-board a new developer?
|
||||
|
||||
- What do they need to install to get a dev stack?
|
||||
|
||||
- How does a code change make it from dev to prod?
|
||||
|
||||
- How does someone add a component to a stack?
|
||||
|
||||
*These questions are good "sanity checks" to validate our strategy!*
|
||||
|
||||
---
|
||||
|
||||
## Some guidelines
|
||||
|
||||
- Start small
|
||||
@@ -64,19 +64,3 @@ If it shows our nodes and their CPU and memory load, we're good!
|
||||
]
|
||||
|
||||
- We can also use selectors (`-l app=...`)
|
||||
|
||||
---
|
||||
|
||||
## Other tools
|
||||
|
||||
- kube-capacity is a great CLI tool to view resources
|
||||
|
||||
(https://github.com/robscott/kube-capacity)
|
||||
|
||||
- It can show resource and limits, and compare them with usage
|
||||
|
||||
- It can show utilization per node, or per pod
|
||||
|
||||
- kube-resource-report can generate HTML reports
|
||||
|
||||
(https://github.com/hjacobs/kube-resource-report)
|
||||
|
||||
@@ -102,6 +102,8 @@
|
||||
|
||||
]
|
||||
|
||||
- Some tools like Helm will create namespaces automatically when needed
|
||||
|
||||
---
|
||||
|
||||
## Using namespaces
|
||||
@@ -339,29 +341,12 @@ Note: we could have used `--namespace=default` for the same result.
|
||||
|
||||
- `kube-ps1` makes it easy to track these, by showing them in our shell prompt
|
||||
|
||||
- It is installed on our training clusters, and when using [shpod](https://github.com/jpetazzo/shpod)
|
||||
- It's a simple shell script available from https://github.com/jonmosco/kube-ps1
|
||||
|
||||
- It gives us a prompt looking like this one:
|
||||
- On our clusters, `kube-ps1` is installed and included in `PS1`:
|
||||
```
|
||||
[123.45.67.89] `(kubernetes-admin@kubernetes:default)` docker@node1 ~
|
||||
```
|
||||
(The highlighted part is `context:namespace`, managed by `kube-ps1`)
|
||||
|
||||
- Highly recommended if you work across multiple contexts or namespaces!
|
||||
|
||||
---
|
||||
|
||||
## Installing `kube-ps1`
|
||||
|
||||
- It's a simple shell script available from https://github.com/jonmosco/kube-ps1
|
||||
|
||||
- It needs to be [installed in our profile/rc files](https://github.com/jonmosco/kube-ps1#installing)
|
||||
|
||||
(instructions differ depending on platform, shell, etc.)
|
||||
|
||||
- Once installed, it defines aliases called `kube_ps1`, `kubeon`, `kubeoff`
|
||||
|
||||
(to selectively enable/disable it when needed)
|
||||
|
||||
- Pro-tip: install it on your machine during the next break!
|
||||
|
||||
|
||||
@@ -530,7 +530,7 @@ After the Kibana UI loads, we need to click around a bit
|
||||
|
||||
- Lookup the NodePort number and connect to it:
|
||||
```bash
|
||||
kubectl get services
|
||||
kuebctl get services
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -144,23 +144,158 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## Individual volumes
|
||||
## Persistent Volume Claims
|
||||
|
||||
- The Pods of a Stateful set can have individual volumes
|
||||
- To abstract the different types of storage, a pod can use a special volume type
|
||||
|
||||
(i.e. in a Stateful set with 3 replicas, there will be 3 volumes)
|
||||
- This type is a *Persistent Volume Claim*
|
||||
|
||||
- These volumes can be either:
|
||||
- A Persistent Volume Claim (PVC) is a resource type
|
||||
|
||||
- allocated from a pool of pre-existing volumes (disks, partitions ...)
|
||||
(visible with `kubectl get persistentvolumeclaims` or `kubectl get pvc`)
|
||||
|
||||
- created dynamically using a storage system
|
||||
- A PVC is not a volume; it is a *request for a volume*
|
||||
|
||||
- This introduces a bunch of new Kubernetes resource types:
|
||||
---
|
||||
|
||||
Persistent Volumes, Persistent Volume Claims, Storage Classes
|
||||
## Persistent Volume Claims in practice
|
||||
|
||||
(and also `volumeClaimTemplates`, that appear within Stateful Set manifests!)
|
||||
- Using a Persistent Volume Claim is a two-step process:
|
||||
|
||||
- creating the claim
|
||||
|
||||
- using the claim in a pod (as if it were any other kind of volume)
|
||||
|
||||
- A PVC starts by being Unbound (without an associated volume)
|
||||
|
||||
- Once it is associated with a Persistent Volume, it becomes Bound
|
||||
|
||||
- A Pod referring an unbound PVC will not start
|
||||
|
||||
(but as soon as the PVC is bound, the Pod can start)
|
||||
|
||||
---
|
||||
|
||||
## Binding PV and PVC
|
||||
|
||||
- A Kubernetes controller continuously watches PV and PVC objects
|
||||
|
||||
- When it notices an unbound PVC, it tries to find a satisfactory PV
|
||||
|
||||
("satisfactory" in terms of size and other characteristics; see next slide)
|
||||
|
||||
- If no PV fits the PVC, a PV can be created dynamically
|
||||
|
||||
(this requires to configure a *dynamic provisioner*, more on that later)
|
||||
|
||||
- Otherwise, the PVC remains unbound indefinitely
|
||||
|
||||
(until we manually create a PV or setup dynamic provisioning)
|
||||
|
||||
---
|
||||
|
||||
## What's in a Persistent Volume Claim?
|
||||
|
||||
- At the very least, the claim should indicate:
|
||||
|
||||
- the size of the volume (e.g. "5 GiB")
|
||||
|
||||
- the access mode (e.g. "read-write by a single pod")
|
||||
|
||||
- Optionally, it can also specify a Storage Class
|
||||
|
||||
- The Storage Class indicates:
|
||||
|
||||
- which storage system to use (e.g. Portworx, EBS...)
|
||||
|
||||
- extra parameters for that storage system
|
||||
|
||||
e.g.: "replicate the data 3 times, and use SSD media"
|
||||
|
||||
---
|
||||
|
||||
## What's a Storage Class?
|
||||
|
||||
- A Storage Class is yet another Kubernetes API resource
|
||||
|
||||
(visible with e.g. `kubectl get storageclass` or `kubectl get sc`)
|
||||
|
||||
- It indicates which *provisioner* to use
|
||||
|
||||
(which controller will create the actual volume)
|
||||
|
||||
- And arbitrary parameters for that provisioner
|
||||
|
||||
(replication levels, type of disk ... anything relevant!)
|
||||
|
||||
- Storage Classes are required if we want to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
|
||||
|
||||
(but we can also create volumes manually, and ignore Storage Classes)
|
||||
|
||||
---
|
||||
|
||||
## Defining a Persistent Volume Claim
|
||||
|
||||
Here is a minimal PVC:
|
||||
|
||||
```yaml
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: my-claim
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using a Persistent Volume Claim
|
||||
|
||||
Here is a Pod definition like the ones shown earlier, but using a PVC:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-using-a-claim
|
||||
spec:
|
||||
containers:
|
||||
- image: ...
|
||||
name: container-using-a-claim
|
||||
volumeMounts:
|
||||
- mountPath: /my-vol
|
||||
name: my-volume
|
||||
volumes:
|
||||
- name: my-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: my-claim
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volume Claims and Stateful sets
|
||||
|
||||
- The pods in a stateful set can define a `volumeClaimTemplate`
|
||||
|
||||
- A `volumeClaimTemplate` will dynamically create one Persistent Volume Claim per pod
|
||||
|
||||
- Each pod will therefore have its own volume
|
||||
|
||||
- These volumes are numbered (like the pods)
|
||||
|
||||
- When updating the stateful set (e.g. image upgrade), each pod keeps its volume
|
||||
|
||||
- When pods get rescheduled (e.g. node failure), they keep their volume
|
||||
|
||||
(this requires a storage system that is not node-local)
|
||||
|
||||
- These volumes are not automatically deleted
|
||||
|
||||
(when the stateful set is scaled down or deleted)
|
||||
|
||||
---
|
||||
|
||||
@@ -306,9 +441,11 @@ nodes and encryption of gossip traffic) were removed for simplicity.
|
||||
|
||||
## Caveats
|
||||
|
||||
- We aren't using actual persistence yet
|
||||
- We haven't used a `volumeClaimTemplate` here
|
||||
|
||||
(no `volumeClaimTemplate`, Persistent Volume, etc.)
|
||||
- That's because we don't have a storage provider yet
|
||||
|
||||
(except if you're running this on your own and your cluster has one)
|
||||
|
||||
- What happens if we lose a pod?
|
||||
|
||||
@@ -337,266 +474,3 @@ nodes and encryption of gossip traffic) were removed for simplicity.
|
||||
- we lose all the data (ouch)
|
||||
|
||||
- If we run Consul without persistent storage, backups are a good idea!
|
||||
|
||||
---
|
||||
|
||||
# Persistent Volumes Claims
|
||||
|
||||
- Our Pods can use a special volume type: a *Persistent Volume Claim*
|
||||
|
||||
- A Persistent Volume Claim (PVC) is also a Kubernetes resource
|
||||
|
||||
(visible with `kubectl get persistentvolumeclaims` or `kubectl get pvc`)
|
||||
|
||||
- A PVC is not a volume; it is a *request for a volume*
|
||||
|
||||
- It should indicate at least:
|
||||
|
||||
- the size of the volume (e.g. "5 GiB")
|
||||
|
||||
- the access mode (e.g. "read-write by a single pod")
|
||||
|
||||
---
|
||||
|
||||
## What's in a PVC?
|
||||
|
||||
- A PVC contains at least:
|
||||
|
||||
- a list of *access modes* (ReadWriteOnce, ReadOnlyMany, ReadWriteMany)
|
||||
|
||||
- a size (interpreted as the minimal storage space needed)
|
||||
|
||||
- It can also contain optional elements:
|
||||
|
||||
- a selector (to restrict which actual volumes it can use)
|
||||
|
||||
- a *storage class* (used by dynamic provisioning, more on that later)
|
||||
|
||||
---
|
||||
|
||||
## What does a PVC look like?
|
||||
|
||||
Here is a manifest for a basic PVC:
|
||||
|
||||
```yaml
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: my-claim
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using a Persistent Volume Claim
|
||||
|
||||
Here is a Pod definition like the ones shown earlier, but using a PVC:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-using-a-claim
|
||||
spec:
|
||||
containers:
|
||||
- image: ...
|
||||
name: container-using-a-claim
|
||||
volumeMounts:
|
||||
- mountPath: /my-vol
|
||||
name: my-volume
|
||||
volumes:
|
||||
- name: my-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: my-claim
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Creating and using Persistent Volume Claims
|
||||
|
||||
- PVCs can be created manually and used explicitly
|
||||
|
||||
(as shown on the previous slides)
|
||||
|
||||
- They can also be created and used through Stateful Sets
|
||||
|
||||
(this will be shown later)
|
||||
|
||||
---
|
||||
|
||||
## Lifecycle of Persistent Volume Claims
|
||||
|
||||
- When a PVC is created, it starts existing in "Unbound" state
|
||||
|
||||
(without an associated volume)
|
||||
|
||||
- A Pod referencing an unbound PVC will not start
|
||||
|
||||
(the scheduler will wait until the PVC is bound to place it)
|
||||
|
||||
- A special controller continuously monitors PVCs to associate them with PVs
|
||||
|
||||
- If no PV is available, one must be created:
|
||||
|
||||
- manually (by operator intervention)
|
||||
|
||||
- using a *dynamic provisioner* (more on that later)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Which PV gets associated to a PVC?
|
||||
|
||||
- The PV must satisfy the PVC constraints
|
||||
|
||||
(access mode, size, optional selector, optional storage class)
|
||||
|
||||
- The PVs with the closest access mode are picked
|
||||
|
||||
- Then the PVs with the closest size
|
||||
|
||||
- It is possible to specify a `claimRef` when creating a PV
|
||||
|
||||
(this will associate it to the specified PVC, but only if the PV satisfies all the requirements of the PVC; otherwise another PV might end up being picked)
|
||||
|
||||
- For all the details about the PersistentVolumeClaimBinder, check [this doc](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/persistent-storage.md#matching-and-binding)
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volume Claims and Stateful sets
|
||||
|
||||
- A Stateful set can define one (or more) `volumeClaimTemplate`
|
||||
|
||||
- Each `volumeClaimTemplate` will create one Persistent Volume Claim per pod
|
||||
|
||||
- Each pod will therefore have its own individual volume
|
||||
|
||||
- These volumes are numbered (like the pods)
|
||||
|
||||
- Example:
|
||||
|
||||
- a Stateful set is named `db`
|
||||
- it is scaled to replicas
|
||||
- it has a `volumeClaimTemplate` named `data`
|
||||
- then it will create pods `db-0`, `db-1`, `db-2`
|
||||
- these pods will have volumes named `data-db-0`, `data-db-1`, `data-db-2`
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volume Claims are sticky
|
||||
|
||||
- When updating the stateful set (e.g. image upgrade), each pod keeps its volume
|
||||
|
||||
- When pods get rescheduled (e.g. node failure), they keep their volume
|
||||
|
||||
(this requires a storage system that is not node-local)
|
||||
|
||||
- These volumes are not automatically deleted
|
||||
|
||||
(when the stateful set is scaled down or deleted)
|
||||
|
||||
- If a stateful set is scaled back up later, the pods get their data back
|
||||
|
||||
---
|
||||
|
||||
## Dynamic provisioners
|
||||
|
||||
- A *dynamic provisioner* monitors unbound PVCs
|
||||
|
||||
- It can create volumes (and the corresponding PV) on the fly
|
||||
|
||||
- This requires the PVCs to have a *storage class*
|
||||
|
||||
(annotation `volume.beta.kubernetes.io/storage-provisioner`)
|
||||
|
||||
- A dynamic provisioner only acts on PVCs with the right storage class
|
||||
|
||||
(it ignores the other ones)
|
||||
|
||||
- Just like `LoadBalancer` services, dynamic provisioners are optional
|
||||
|
||||
(i.e. our cluster may or may not have one pre-installed)
|
||||
|
||||
---
|
||||
|
||||
## What's a Storage Class?
|
||||
|
||||
- A Storage Class is yet another Kubernetes API resource
|
||||
|
||||
(visible with e.g. `kubectl get storageclass` or `kubectl get sc`)
|
||||
|
||||
- It indicates which *provisioner* to use
|
||||
|
||||
(which controller will create the actual volume)
|
||||
|
||||
- And arbitrary parameters for that provisioner
|
||||
|
||||
(replication levels, type of disk ... anything relevant!)
|
||||
|
||||
- Storage Classes are required if we want to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
|
||||
|
||||
(but we can also create volumes manually, and ignore Storage Classes)
|
||||
|
||||
---
|
||||
|
||||
## The default storage class
|
||||
|
||||
- At most one storage class can be marked as the default class
|
||||
|
||||
(by annotating it with `storageclass.kubernetes.io/is-default-class=true`)
|
||||
|
||||
- When a PVC is created, it will be annotated with the default storage class
|
||||
|
||||
(unless it specifies an explicit storage class)
|
||||
|
||||
- This only happens at PVC creation
|
||||
|
||||
(existing PVCs are not updated when we mark a class as the default one)
|
||||
|
||||
---
|
||||
|
||||
## Dynamic provisioning setup
|
||||
|
||||
This is how we can achieve fully automated provisioning of persistent storage.
|
||||
|
||||
1. Configure a storage system.
|
||||
|
||||
(It needs to have an API, or be capable of automated provisioning of volumes.)
|
||||
|
||||
2. Install a dynamic provisioner for this storage system.
|
||||
|
||||
(This is some specific controller code.)
|
||||
|
||||
3. Create a Storage Class for this system.
|
||||
|
||||
(It has to match what the dynamic provisioner is expecting.)
|
||||
|
||||
4. Annotate the Storage Class to be the default one.
|
||||
|
||||
---
|
||||
|
||||
## Dynamic provisioning usage
|
||||
|
||||
After setting up the system (previous slide), all we need to do is:
|
||||
|
||||
*Create a Stateful Set that makes use of a `volumeClaimTemplate`.*
|
||||
|
||||
This will trigger the following actions.
|
||||
|
||||
1. The Stateful Set creates PVCs according to the `volumeClaimTemplate`.
|
||||
|
||||
2. The Stateful Set creates Pods using these PVCs.
|
||||
|
||||
3. The PVCs are automatically annotated with our Storage Class.
|
||||
|
||||
4. The dynamic provisioner provisions volumes and creates the corresponding PVs.
|
||||
|
||||
5. The PersistentVolumeClaimBinder associates the PVs and the PVCs together.
|
||||
|
||||
6. PVCs are now bound, the Pods can start.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## Versions installed
|
||||
|
||||
- Kubernetes 1.17.2
|
||||
- Kubernetes 1.17.1
|
||||
- Docker Engine 19.03.5
|
||||
- Docker Compose 1.24.1
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ class: extra-details
|
||||
|
||||
- *Volumes*:
|
||||
|
||||
- appear in Pod specifications (we'll see that in a few slides)
|
||||
- appear in Pod specifications (see next slide)
|
||||
|
||||
- do not exist as API resources (**cannot** do `kubectl get volumes`)
|
||||
|
||||
@@ -232,7 +232,7 @@ spec:
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
@@ -298,14 +298,14 @@ spec:
|
||||
|
||||
- As soon as we see its IP address, access it:
|
||||
```bash
|
||||
curl `$IP`
|
||||
curl $IP
|
||||
```
|
||||
|
||||
<!-- ```bash /bin/sleep 5``` -->
|
||||
|
||||
- A few seconds later, the state of the pod will change; access it again:
|
||||
```bash
|
||||
curl `$IP`
|
||||
curl $IP
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -209,37 +209,19 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
## Managing stack deployments
|
||||
|
||||
- Applications are made of many resources
|
||||
- The best deployment tool will vary, depending on:
|
||||
|
||||
(Deployments, Services, and much more)
|
||||
- the size and complexity of your stack(s)
|
||||
- how often you change it (i.e. add/remove components)
|
||||
- the size and skills of your team
|
||||
|
||||
- We need to automate the creation / update / management of these resources
|
||||
- A few examples:
|
||||
|
||||
- There is no "absolute best" tool or method; it depends on:
|
||||
|
||||
- the size and complexity of our stack(s)
|
||||
- how often we change it (i.e. add/remove components)
|
||||
- the size and skills of our team
|
||||
|
||||
---
|
||||
|
||||
## A few tools to manage stacks
|
||||
|
||||
- Shell scripts invoking `kubectl`
|
||||
|
||||
- YAML resource manifests committed to a repo
|
||||
|
||||
- [Kustomize](https://github.com/kubernetes-sigs/kustomize)
|
||||
(YAML manifests + patches applied on top)
|
||||
|
||||
- [Helm](https://github.com/kubernetes/helm)
|
||||
(YAML manifests + templating engine)
|
||||
|
||||
- [Spinnaker](https://www.spinnaker.io/)
|
||||
(Netflix' CD platform)
|
||||
|
||||
- [Brigade](https://brigade.sh/)
|
||||
(event-driven scripting; no YAML)
|
||||
- shell scripts invoking `kubectl`
|
||||
- YAML resources descriptions committed to a repo
|
||||
- [Helm](https://github.com/kubernetes/helm) (~package manager)
|
||||
- [Spinnaker](https://www.spinnaker.io/) (Netflix' CD platform)
|
||||
- [Brigade](https://brigade.sh/) (event-driven scripting; no YAML)
|
||||
|
||||
---
|
||||
|
||||
@@ -278,3 +260,17 @@ Sorry Star Trek fans, this is not the federation you're looking for!
|
||||
- Synchronize resources across clusters
|
||||
|
||||
- Discover resources across clusters
|
||||
|
||||
---
|
||||
|
||||
## Developer experience
|
||||
|
||||
*We've put this last, but it's pretty important!*
|
||||
|
||||
- How do you on-board a new developer?
|
||||
|
||||
- What do they need to install to get a dev stack?
|
||||
|
||||
- How does a code change make it from dev to prod?
|
||||
|
||||
- How does someone add a component to a stack?
|
||||
|
||||
@@ -91,76 +91,3 @@
|
||||
because the resources that we created lack the necessary annotation.
|
||||
We can safely ignore them.)
|
||||
|
||||
---
|
||||
|
||||
## Deleting resources
|
||||
|
||||
- We can also use a YAML file to *delete* resources
|
||||
|
||||
- `kubectl delete -f ...` will delete all the resources mentioned in a YAML file
|
||||
|
||||
(useful to clean up everything that was created by `kubectl apply -f ...`)
|
||||
|
||||
- The definitions of the resources don't matter
|
||||
|
||||
(just their `kind`, `apiVersion`, and `name`)
|
||||
|
||||
---
|
||||
|
||||
## Pruning¹ resources
|
||||
|
||||
- We can also tell `kubectl` to remove old resources
|
||||
|
||||
- This is done with `kubectl apply -f ... --prune`
|
||||
|
||||
- It will remove resources that don't exist in the YAML file(s)
|
||||
|
||||
- But only if they were created with `kubectl apply` in the first place
|
||||
|
||||
(technically, if they have an annotation `kubectl.kubernetes.io/last-applied-configuration`)
|
||||
|
||||
.footnote[¹If English is not your first language: *to prune* means to remove dead or overgrown branches in a tree, to help it to grow.]
|
||||
|
||||
---
|
||||
|
||||
## YAML as source of truth
|
||||
|
||||
- Imagine the following workflow:
|
||||
|
||||
- do not use `kubectl run`, `kubectl create deployment`, `kubectl expose` ...
|
||||
|
||||
- define everything with YAML
|
||||
|
||||
- `kubectl apply -f ... --prune --all` that YAML
|
||||
|
||||
- keep that YAML under version control
|
||||
|
||||
- enforce all changes to go through that YAML (e.g. with pull requests)
|
||||
|
||||
- Our version control system now has a full history of what we deploy
|
||||
|
||||
- Compares to "Infrastructure-as-Code", but for app deployments
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Specifying the namespace
|
||||
|
||||
- When creating resources from YAML manifests, the namespace is optional
|
||||
|
||||
- If we specify a namespace:
|
||||
|
||||
- resources are created in the specified namespace
|
||||
|
||||
- this is typical for things deployed only once per cluster
|
||||
|
||||
- example: system components, cluster add-ons ...
|
||||
|
||||
- If we don't specify a namespace:
|
||||
|
||||
- resources are created in the current namespace
|
||||
|
||||
- this is typical for things that may be deployed multiple times
|
||||
|
||||
- example: applications (production, staging, feature branches ...)
|
||||
|
||||
117
slides/kube-selfpaced.yml
Normal file
117
slides/kube-selfpaced.yml
Normal file
@@ -0,0 +1,117 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Docker and Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
-
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
-
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
- k8s/yamldeploy.md
|
||||
-
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/dryrun.md
|
||||
-
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
-
|
||||
- k8s/ingress.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-secrets.md
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
-
|
||||
- k8s/configuration.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
-
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
-
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/owners-and-dependents.md
|
||||
-
|
||||
- k8s/dmuc.md
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/apilb.md
|
||||
- k8s/staticpods.md
|
||||
-
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/gitworkflows.md
|
||||
-
|
||||
- k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
101
slides/kube.yml
Normal file
101
slides/kube.yml
Normal file
@@ -0,0 +1,101 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://2020-01-caen.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
-
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
-
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
- # DAY 2
|
||||
- k8s/namespaces.md
|
||||
- k8s/yamldeploy.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/ingress.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
-
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
-
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-secrets.md
|
||||
- # DAY 3
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
-
|
||||
- k8s/dashboard.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/prometheus.md
|
||||
-
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
-
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- # END
|
||||
- k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
# EXTRA
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
#- k8s/setup-k8s.md
|
||||
#- k8s/dryrun.md
|
||||
@@ -1,10 +1,10 @@
|
||||
## Intros
|
||||
|
||||
- Hello!
|
||||
- Hello! I'm Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
|
||||
- The workshop will run from 9am to 5pm
|
||||
- The training will run from 9am to 5pm
|
||||
|
||||
- There will be a lunch break
|
||||
- There will be a lunch break at 12:30pm
|
||||
|
||||
(And coffee breaks!)
|
||||
|
||||
@@ -12,4 +12,3 @@
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
title: |
|
||||
Docker
|
||||
&
|
||||
Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-20200225-seattle)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2020-02-outreach.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
#- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
#- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Container_Networking_Basics.md
|
||||
#- containers/Network_Drivers.md
|
||||
#- containers/Container_Network_Model.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Multi_Stage_Builds.md
|
||||
#- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- # DAY 2
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
-
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
- k8s/exercise-wordsmith.md
|
||||
- k8s/yamldeploy.md
|
||||
#- k8s/setup-k8s.md
|
||||
#- k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
-
|
||||
- k8s/daemonset.md
|
||||
#- k8s/dryrun.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/exercise-yaml.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/ingress.md
|
||||
- # DAY 3
|
||||
- k8s/dashboard.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
#- k8s/record.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
#- k8s/horizontal-pod-autoscaler.md
|
||||
-
|
||||
- k8s/authn-authz.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/prometheus.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/exercise-configmap.md
|
||||
-
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
-
|
||||
- k8s/whatsnext.md
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
-
|
||||
- "# (Extra: containers) \n"
|
||||
- containers/Docker_Machine.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Windows_Containers.md
|
||||
-
|
||||
- "# (Extra: containers internals) \n"
|
||||
- containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
- containers/Containers_From_Scratch.md
|
||||
- containers/Container_Engines.md
|
||||
- containers/Pods_Anatomy.md
|
||||
-
|
||||
- "# (Extra: Helm) \n"
|
||||
#- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/exercise-helm.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-secrets.md
|
||||
-
|
||||
- "# (Extra: Kubernetes operators) \n"
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
-
|
||||
- "# (Extra: Kubernetes security) \n"
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- k8s/netpol.md
|
||||
@@ -1,49 +1,22 @@
|
||||
## Accessing these slides now
|
||||
## About these slides
|
||||
|
||||
- We recommend that you open these slides in your browser:
|
||||
|
||||
@@SLIDES@@
|
||||
|
||||
- Use arrows to move to next/previous slide
|
||||
|
||||
(up, down, left, right, page up, page down)
|
||||
|
||||
- Type a slide number + ENTER to go to that slide
|
||||
|
||||
- The slide number is also visible in the URL bar
|
||||
|
||||
(e.g. .../#123 for slide 123)
|
||||
|
||||
---
|
||||
|
||||
## Accessing these slides later
|
||||
|
||||
- Slides will remain online so you can review them later if needed
|
||||
|
||||
(let's say we'll keep them online at least 1 year, how about that?)
|
||||
|
||||
- You can download the slides using that URL:
|
||||
|
||||
@@ZIP@@
|
||||
|
||||
(then open the file `@@HTML@@`)
|
||||
|
||||
- You will to find new versions of these slides on:
|
||||
|
||||
https://container.training/
|
||||
|
||||
---
|
||||
|
||||
## These slides are open source
|
||||
|
||||
- You are welcome to use, re-use, share these slides
|
||||
|
||||
- These slides are written in markdown
|
||||
|
||||
- The sources of these slides are available in a public GitHub repository:
|
||||
- All the content is available in a public GitHub repository:
|
||||
|
||||
https://@@GITREPO@@
|
||||
|
||||
- You can get updated "builds" of the slides there:
|
||||
|
||||
http://container.training/
|
||||
|
||||
<!--
|
||||
.exercise[
|
||||
```open https://@@GITREPO@@```
|
||||
```open http://container.training/```
|
||||
]
|
||||
-->
|
||||
|
||||
--
|
||||
|
||||
- Typos? Mistakes? Questions? Feel free to hover over the bottom of the slide ...
|
||||
|
||||
.footnote[.emoji[👇] Try it! The source file will be shown and you can view it on GitHub and fork and edit it.]
|
||||
@@ -73,19 +46,3 @@ class: extra-details
|
||||
- you want only the most essential information
|
||||
|
||||
- You can review these slides another time if you want, they'll be waiting for you ☺
|
||||
|
||||
---
|
||||
|
||||
class: in-person, chat-room
|
||||
|
||||
## Chat room
|
||||
|
||||
- We've set up a chat room that we will monitor during the workshop
|
||||
|
||||
- Don't hesitate to use it to ask questions, or get help, or share feedback
|
||||
|
||||
- The chat room will also be available after the workshop
|
||||
|
||||
- Join the chat room: @@CHAT@@
|
||||
|
||||
- Say hi in the chat room!
|
||||
|
||||
@@ -58,6 +58,28 @@ Misattributed to Benjamin Franklin
|
||||
|
||||
---
|
||||
|
||||
## Navigating slides
|
||||
|
||||
- Use arrows to move to next/previous slide
|
||||
|
||||
(up, down, left, right, page up, page down)
|
||||
|
||||
- Type a slide number + ENTER to go to that slide
|
||||
|
||||
- The slide number is also visible in the URL bar
|
||||
|
||||
(e.g. .../#123 for slide 123)
|
||||
|
||||
- Slides will remain online so you can review them later if needed
|
||||
|
||||
- You can download the slides using that URL:
|
||||
|
||||
@@ZIP@@
|
||||
|
||||
(then open the file `@@HTML@@`)
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Where are we going to run our containers?
|
||||
|
||||
@@ -11,10 +11,5 @@ class: title, in-person
|
||||
@@TITLE@@<br/></br>
|
||||
|
||||
.footnote[
|
||||
WiFi: **Outreach**<br/>
|
||||
Password: **Winter9378**
|
||||
|
||||
**Slides[:](
|
||||
https://www.youtube.com/watch?v=h16zyxiwDLY
|
||||
) @@SLIDES@@**
|
||||
**Slides: @@SLIDES@@**
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user