mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-03-02 09:20:19 +00:00
Compare commits
1 Commits
2026-01-en
...
2025-10-en
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
514c6d8362 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,7 +18,6 @@ slides/index.html
|
||||
slides/past.html
|
||||
slides/slides.zip
|
||||
slides/_academy_*
|
||||
slides/fragments
|
||||
node_modules
|
||||
|
||||
### macOS ###
|
||||
|
||||
@@ -1,24 +1,26 @@
|
||||
services:
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
rng:
|
||||
build: rng
|
||||
ports:
|
||||
- "8001:80"
|
||||
- "8001:80"
|
||||
|
||||
hasher:
|
||||
build: hasher
|
||||
ports:
|
||||
- "8002:80"
|
||||
- "8002:80"
|
||||
|
||||
webui:
|
||||
build: webui
|
||||
ports:
|
||||
- "8000:80"
|
||||
- "8000:80"
|
||||
volumes:
|
||||
- "./webui/files/:/files/"
|
||||
- "./webui/files/:/files/"
|
||||
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
worker:
|
||||
build: worker
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
FROM ruby:alpine
|
||||
WORKDIR /app
|
||||
RUN apk add --update build-base curl
|
||||
RUN gem install sinatra --version '~> 3'
|
||||
RUN gem install thin
|
||||
COPY hasher.rb .
|
||||
CMD ["ruby", "hasher.rb", "-o", "::"]
|
||||
RUN gem install thin --version '~> 1'
|
||||
ADD hasher.rb /
|
||||
CMD ["ruby", "hasher.rb"]
|
||||
EXPOSE 80
|
||||
|
||||
@@ -2,6 +2,7 @@ require 'digest'
|
||||
require 'sinatra'
|
||||
require 'socket'
|
||||
|
||||
set :bind, '0.0.0.0'
|
||||
set :port, 80
|
||||
|
||||
post '/' do
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
FROM python:alpine
|
||||
WORKDIR /app
|
||||
RUN pip install Flask
|
||||
COPY rng.py .
|
||||
ENV FLASK_APP=rng FLASK_RUN_HOST=:: FLASK_RUN_PORT=80
|
||||
CMD ["flask", "run", "--without-threads"]
|
||||
COPY rng.py /
|
||||
CMD ["python", "rng.py"]
|
||||
EXPOSE 80
|
||||
|
||||
@@ -28,5 +28,5 @@ def rng(how_many_bytes):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(port=80)
|
||||
app.run(host="0.0.0.0", port=80, threaded=False)
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
FROM node:23-alpine
|
||||
WORKDIR /app
|
||||
RUN npm install express
|
||||
RUN npm install morgan
|
||||
RUN npm install redis@5
|
||||
COPY . .
|
||||
FROM node:4-slim
|
||||
RUN npm install express@4
|
||||
RUN npm install redis@3
|
||||
COPY files/ /files/
|
||||
COPY webui.js /
|
||||
CMD ["node", "webui.js"]
|
||||
EXPOSE 80
|
||||
|
||||
@@ -1,34 +1,26 @@
|
||||
import express from 'express';
|
||||
import morgan from 'morgan';
|
||||
import { createClient } from 'redis';
|
||||
|
||||
var client = await createClient({
|
||||
url: "redis://redis",
|
||||
socket: {
|
||||
family: 0
|
||||
}
|
||||
})
|
||||
.on("error", function (err) {
|
||||
console.error("Redis error", err);
|
||||
})
|
||||
.connect();
|
||||
|
||||
var express = require('express');
|
||||
var app = express();
|
||||
var redis = require('redis');
|
||||
|
||||
app.use(morgan('common'));
|
||||
var client = redis.createClient(6379, 'redis');
|
||||
client.on("error", function (err) {
|
||||
console.error("Redis error", err);
|
||||
});
|
||||
|
||||
app.get('/', function (req, res) {
|
||||
res.redirect('/index.html');
|
||||
});
|
||||
|
||||
app.get('/json', async(req, res) => {
|
||||
var coins = await client.hLen('wallet');
|
||||
var hashes = await client.get('hashes');
|
||||
var now = Date.now() / 1000;
|
||||
res.json({
|
||||
coins: coins,
|
||||
hashes: hashes,
|
||||
now: now
|
||||
app.get('/json', function (req, res) {
|
||||
client.hlen('wallet', function (err, coins) {
|
||||
client.get('hashes', function (err, hashes) {
|
||||
var now = Date.now() / 1000;
|
||||
res.json( {
|
||||
coins: coins,
|
||||
hashes: hashes,
|
||||
now: now
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
FROM python:alpine
|
||||
WORKDIR /app
|
||||
RUN pip install redis
|
||||
RUN pip install requests
|
||||
COPY worker.py .
|
||||
COPY worker.py /
|
||||
CMD ["python", "worker.py"]
|
||||
|
||||
@@ -12,5 +12,5 @@ listen very-basic-load-balancer
|
||||
server blue color.blue.svc:80
|
||||
server green color.green.svc:80
|
||||
|
||||
### Note: the services above must exist,
|
||||
### otherwise HAproxy won't start.
|
||||
# Note: the services above must exist,
|
||||
# otherwise HAproxy won't start.
|
||||
|
||||
87
k8s/traefik-v1.yaml
Normal file
87
k8s/traefik-v1.yaml
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
114
k8s/traefik-v2.yaml
Normal file
114
k8s/traefik-v2.yaml
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: traefik
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
labels:
|
||||
app: traefik
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: traefik
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: traefik
|
||||
name: traefik
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# If, for some reason, our CNI plugin doesn't support hostPort,
|
||||
# we can enable hostNetwork instead. That should work everywhere
|
||||
# but it doesn't provide the same isolation.
|
||||
#hostNetwork: true
|
||||
serviceAccountName: traefik
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:v2.10
|
||||
name: traefik
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
hostPort: 443
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --accesslog
|
||||
- --api
|
||||
- --api.insecure
|
||||
- --log.level=INFO
|
||||
- --metrics.prometheus
|
||||
- --providers.kubernetesingress
|
||||
- --entrypoints.http.Address=:80
|
||||
- --entrypoints.https.Address=:443
|
||||
- --entrypoints.https.http.tls.certResolver=default
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
---
|
||||
kind: IngressClass
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
annotations:
|
||||
ingressclass.kubernetes.io/is-default-class: "true"
|
||||
spec:
|
||||
controller: traefik.io/ingress-controller
|
||||
123
k8s/traefik.yaml
123
k8s/traefik.yaml
@@ -1,123 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: traefik
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
labels:
|
||||
app: traefik
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: traefik
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: traefik
|
||||
name: traefik
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# If, for some reason, our CNI plugin doesn't support hostPort,
|
||||
# we can enable hostNetwork instead. That should work everywhere
|
||||
# but it doesn't provide the same isolation.
|
||||
#hostNetwork: true
|
||||
serviceAccountName: traefik
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:v3.5
|
||||
name: traefik
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
hostPort: 443
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --accesslog
|
||||
- --api
|
||||
- --api.insecure
|
||||
- --entrypoints.http.Address=:80
|
||||
- --entrypoints.https.Address=:443
|
||||
- --global.sendAnonymousUsage=true
|
||||
- --log.level=INFO
|
||||
- --metrics.prometheus
|
||||
- --providers.kubernetesingress
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
---
|
||||
kind: IngressClass
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
annotations:
|
||||
ingressclass.kubernetes.io/is-default-class: "true"
|
||||
spec:
|
||||
controller: traefik.io/ingress-controller
|
||||
1
k8s/traefik.yaml
Symbolic link
1
k8s/traefik.yaml
Symbolic link
@@ -0,0 +1 @@
|
||||
traefik-v2.yaml
|
||||
@@ -36,12 +36,8 @@ _populate_zone() {
|
||||
ZONE_ID=$(_get_zone_id $1)
|
||||
shift
|
||||
for IPADDR in $*; do
|
||||
case "$IPADDR" in
|
||||
*.*) TYPE=A;;
|
||||
*:*) TYPE=AAAA;;
|
||||
esac
|
||||
cloudflare zones/$ZONE_ID/dns_records "name=*" "type=$TYPE" "content=$IPADDR"
|
||||
cloudflare zones/$ZONE_ID/dns_records "name=\@" "type=$TYPE" "content=$IPADDR"
|
||||
cloudflare zones/$ZONE_ID/dns_records "name=*" "type=A" "content=$IPADDR"
|
||||
cloudflare zones/$ZONE_ID/dns_records "name=\@" "type=A" "content=$IPADDR"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
@@ -5,9 +5,6 @@
|
||||
# 10% CPU
|
||||
# (See https://docs.google.com/document/d/1n0lwp6rQKQUIuo_A5LQ1dgCzrmjkDjmDtNj1Jn92UrI)
|
||||
# PRO2-XS = 4 core, 16 gb
|
||||
# Note that we also need 2 volumes per vcluster (one for vcluster itself, one for shpod),
|
||||
# so we might hit the maximum number of volumes per node!
|
||||
# (TODO: check what that limit is on Scaleway and Linode)
|
||||
#
|
||||
# With vspod:
|
||||
# 800 MB RAM
|
||||
@@ -18,7 +15,7 @@ set -e
|
||||
|
||||
KONKTAG=konk
|
||||
PROVIDER=linode
|
||||
STUDENTS=2
|
||||
STUDENTS=5
|
||||
|
||||
case "$PROVIDER" in
|
||||
linode)
|
||||
|
||||
@@ -56,7 +56,7 @@ _cmd_codeserver() {
|
||||
|
||||
ARCH=${ARCHITECTURE-amd64}
|
||||
CODESERVER_VERSION=4.96.4
|
||||
CODESERVER_URL=\$GITHUB/coder/code-server/releases/download/v${CODESERVER_VERSION}/code-server-${CODESERVER_VERSION}-linux-${ARCH}.tar.gz
|
||||
CODESERVER_URL=https://github.com/coder/code-server/releases/download/v${CODESERVER_VERSION}/code-server-${CODESERVER_VERSION}-linux-${ARCH}.tar.gz
|
||||
pssh "
|
||||
set -e
|
||||
i_am_first_node || exit 0
|
||||
@@ -230,7 +230,7 @@ _cmd_create() {
|
||||
;;
|
||||
*) die "Invalid mode: $MODE (supported modes: mk8s, pssh)." ;;
|
||||
esac
|
||||
|
||||
|
||||
if ! [ -f "$SETTINGS" ]; then
|
||||
die "Settings file ($SETTINGS) not found."
|
||||
fi
|
||||
@@ -375,8 +375,8 @@ _cmd_clusterize() {
|
||||
pssh -I < tags/$TAG/clusters.tsv "
|
||||
grep -w \$PSSH_HOST | tr '\t' '\n' > /tmp/cluster"
|
||||
pssh "
|
||||
echo \$PSSH_HOST > /tmp/ip_address
|
||||
head -n 1 /tmp/cluster | sudo tee /etc/ip_address_of_first_node
|
||||
echo \$PSSH_HOST > /tmp/ipv4
|
||||
head -n 1 /tmp/cluster | sudo tee /etc/ipv4_of_first_node
|
||||
echo ${CLUSTERPREFIX}1 | sudo tee /etc/name_of_first_node
|
||||
echo HOSTIP=\$PSSH_HOST | sudo tee -a /etc/environment
|
||||
NODEINDEX=\$((\$PSSH_NODENUM%$CLUSTERSIZE+1))
|
||||
@@ -459,7 +459,7 @@ _cmd_docker() {
|
||||
set -e
|
||||
### Install docker-compose.
|
||||
sudo curl -fsSL -o /usr/local/bin/docker-compose \
|
||||
\$GITHUB/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-$COMPOSE_PLATFORM
|
||||
https://github.com/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-$COMPOSE_PLATFORM
|
||||
sudo chmod +x /usr/local/bin/docker-compose
|
||||
docker-compose version
|
||||
|
||||
@@ -467,7 +467,7 @@ _cmd_docker() {
|
||||
##VERSION## https://github.com/docker/machine/releases
|
||||
MACHINE_VERSION=v0.16.2
|
||||
sudo curl -fsSL -o /usr/local/bin/docker-machine \
|
||||
\$GITHUB/docker/machine/releases/download/\$MACHINE_VERSION/docker-machine-\$(uname -s)-\$(uname -m)
|
||||
https://github.com/docker/machine/releases/download/\$MACHINE_VERSION/docker-machine-\$(uname -s)-\$(uname -m)
|
||||
sudo chmod +x /usr/local/bin/docker-machine
|
||||
docker-machine version
|
||||
"
|
||||
@@ -500,7 +500,7 @@ _cmd_kubebins() {
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
curl -L \$GITHUB/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$ARCH.tar.gz \
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$ARCH.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x kube-apiserver ]; then
|
||||
@@ -512,7 +512,7 @@ _cmd_kubebins() {
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L \$GITHUB/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$ARCH-$CNI_VERSION.tgz \
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$ARCH-$CNI_VERSION.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
@@ -562,18 +562,6 @@ EOF"
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
|
||||
echo 'alias k=kubecolor' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
|
||||
|
||||
# Install helm early
|
||||
# (so that we can use it to install e.g. Cilium etc.)
|
||||
ARCH=${ARCHITECTURE-amd64}
|
||||
HELM_VERSION=3.19.1
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/helm ]; then
|
||||
curl -fsSL https://get.helm.sh/helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz |
|
||||
sudo tar --strip-components=1 --wildcards -zx -C /usr/local/bin '*/helm'
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
helm version
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd kubeadm "Setup kubernetes clusters with kubeadm"
|
||||
@@ -597,18 +585,6 @@ _cmd_kubeadm() {
|
||||
|
||||
# Initialize kube control plane
|
||||
pssh --timeout 200 "
|
||||
IPV6=\$(ip -json a | jq -r '.[].addr_info[] | select(.scope==\"global\" and .family==\"inet6\") | .local' | head -n1)
|
||||
if [ \"\$IPV6\" ]; then
|
||||
ADVERTISE=\"advertiseAddress: \$IPV6\"
|
||||
SERVICE_SUBNET=\"serviceSubnet: fdff::/112\"
|
||||
touch /tmp/install-cilium-ipv6-only
|
||||
touch /tmp/ipv6-only
|
||||
else
|
||||
ADVERTISE=
|
||||
SERVICE_SUBNET=
|
||||
touch /tmp/install-weave
|
||||
fi
|
||||
echo IPV6=\$IPV6 ADVERTISE=\$ADVERTISE
|
||||
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token &&
|
||||
cat >/tmp/kubeadm-config.yaml <<EOF
|
||||
@@ -616,12 +592,9 @@ kind: InitConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
bootstrapTokens:
|
||||
- token: \$(cat /tmp/token)
|
||||
localAPIEndpoint:
|
||||
\$ADVERTISE
|
||||
nodeRegistration:
|
||||
ignorePreflightErrors:
|
||||
- NumCPU
|
||||
- FileContent--proc-sys-net-ipv6-conf-default-forwarding
|
||||
$IGNORE_SYSTEMVERIFICATION
|
||||
$IGNORE_SWAP
|
||||
$IGNORE_IPTABLES
|
||||
@@ -648,9 +621,7 @@ kind: ClusterConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
apiServer:
|
||||
certSANs:
|
||||
- \$(cat /tmp/ip_address)
|
||||
networking:
|
||||
\$SERVICE_SUBNET
|
||||
- \$(cat /tmp/ipv4)
|
||||
$CLUSTER_CONFIGURATION_KUBERNETESVERSION
|
||||
EOF
|
||||
sudo kubeadm init --config=/tmp/kubeadm-config.yaml
|
||||
@@ -669,20 +640,9 @@ EOF
|
||||
# Install weave as the pod network
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
if [ -f /tmp/install-weave ]; then
|
||||
curl -fsSL \$GITHUB/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml |
|
||||
sed s,weaveworks/weave,quay.io/rackspace/weave, |
|
||||
kubectl apply -f-
|
||||
fi
|
||||
if [ -f /tmp/install-cilium-ipv6-only ]; then
|
||||
helm upgrade -i cilium cilium --repo https://helm.cilium.io/ \
|
||||
--namespace kube-system \
|
||||
--set cni.chainingMode=portmap \
|
||||
--set ipv6.enabled=true \
|
||||
--set ipv4.enabled=false \
|
||||
--set underlayProtocol=ipv6 \
|
||||
--version 1.18.3
|
||||
fi
|
||||
curl -fsSL https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml |
|
||||
sed s,weaveworks/weave,quay.io/rackspace/weave, |
|
||||
kubectl apply -f-
|
||||
fi"
|
||||
|
||||
# FIXME this is a gross hack to add the deployment key to our SSH agent,
|
||||
@@ -705,16 +665,13 @@ EOF
|
||||
fi
|
||||
|
||||
# Install metrics server
|
||||
pssh -I <../k8s/metrics-server.yaml "
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
kubectl apply -f-
|
||||
fi"
|
||||
# It would be nice to be able to use that helm chart for metrics-server.
|
||||
# Unfortunately, the charts themselves are on github.com and we want to
|
||||
# avoid that due to their lack of IPv6 support.
|
||||
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
|
||||
#helm upgrade --install metrics-server \
|
||||
# --repo https://kubernetes-sigs.github.io/metrics-server/ metrics-server \
|
||||
# --namespace kube-system --set args={--kubelet-insecure-tls}
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd kubetools "Install a bunch of CLI tools for Kubernetes"
|
||||
@@ -741,7 +698,7 @@ _cmd_kubetools() {
|
||||
|
||||
# Install ArgoCD CLI
|
||||
##VERSION## https://github.com/argoproj/argo-cd/releases/latest
|
||||
URL=\$GITHUB/argoproj/argo-cd/releases/latest/download/argocd-linux-${ARCH}
|
||||
URL=https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-${ARCH}
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/argocd ]; then
|
||||
sudo curl -o /usr/local/bin/argocd -fsSL $URL
|
||||
@@ -754,7 +711,7 @@ _cmd_kubetools() {
|
||||
##VERSION## https://github.com/fluxcd/flux2/releases
|
||||
FLUX_VERSION=2.3.0
|
||||
FILENAME=flux_${FLUX_VERSION}_linux_${ARCH}
|
||||
URL=\$GITHUB/fluxcd/flux2/releases/download/v$FLUX_VERSION/$FILENAME.tar.gz
|
||||
URL=https://github.com/fluxcd/flux2/releases/download/v$FLUX_VERSION/$FILENAME.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/flux ]; then
|
||||
curl -fsSL $URL |
|
||||
@@ -769,7 +726,7 @@ _cmd_kubetools() {
|
||||
set -e
|
||||
if ! [ -x /usr/local/bin/kctx ]; then
|
||||
cd /tmp
|
||||
git clone \$GITHUB/ahmetb/kubectx
|
||||
git clone https://github.com/ahmetb/kubectx
|
||||
sudo cp kubectx/kubectx /usr/local/bin/kctx
|
||||
sudo cp kubectx/kubens /usr/local/bin/kns
|
||||
sudo cp kubectx/completion/*.bash /etc/bash_completion.d
|
||||
@@ -780,7 +737,7 @@ _cmd_kubetools() {
|
||||
set -e
|
||||
if ! [ -d /opt/kube-ps1 ]; then
|
||||
cd /tmp
|
||||
git clone \$GITHUB/jonmosco/kube-ps1
|
||||
git clone https://github.com/jonmosco/kube-ps1
|
||||
sudo mv kube-ps1 /opt/kube-ps1
|
||||
sudo -u $USER_LOGIN sed -i s/docker-prompt/kube_ps1/ /home/$USER_LOGIN/.bashrc &&
|
||||
sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc <<EOF
|
||||
@@ -797,7 +754,7 @@ EOF
|
||||
##VERSION## https://github.com/stern/stern/releases
|
||||
STERN_VERSION=1.29.0
|
||||
FILENAME=stern_${STERN_VERSION}_linux_${ARCH}
|
||||
URL=\$GITHUB/stern/stern/releases/download/v$STERN_VERSION/$FILENAME.tar.gz
|
||||
URL=https://github.com/stern/stern/releases/download/v$STERN_VERSION/$FILENAME.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
curl -fsSL $URL |
|
||||
@@ -808,11 +765,9 @@ EOF
|
||||
fi"
|
||||
|
||||
# Install helm
|
||||
HELM_VERSION=3.19.1
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/helm ]; then
|
||||
curl -fsSL https://get.helm.sh/helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz |
|
||||
sudo tar --strip-components=1 --wildcards -zx -C /usr/local/bin '*/helm'
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 | sudo bash &&
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
helm version
|
||||
fi"
|
||||
@@ -820,7 +775,7 @@ EOF
|
||||
# Install kustomize
|
||||
##VERSION## https://github.com/kubernetes-sigs/kustomize/releases
|
||||
KUSTOMIZE_VERSION=v5.4.1
|
||||
URL=\$GITHUB/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${ARCH}.tar.gz
|
||||
URL=https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${ARCH}.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kustomize ]; then
|
||||
curl -fsSL $URL |
|
||||
@@ -837,17 +792,15 @@ EOF
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ship ]; then
|
||||
##VERSION##
|
||||
curl -fsSL \$GITHUB/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_$ARCH.tar.gz |
|
||||
curl -fsSL https://github.com/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_$ARCH.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
# Install the AWS IAM authenticator
|
||||
AWSIAMAUTH_VERSION=0.7.8
|
||||
URL=\$GITHUB/kubernetes-sigs/aws-iam-authenticator/releases/download/v${AWSIAMAUTH_VERSION}/aws-iam-authenticator_${AWSIAMAUTH_VERSION}_linux_${ARCH}
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -fsSLo /usr/local/bin/aws-iam-authenticator $URL
|
||||
sudo curl -fsSLo /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/$ARCH/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
aws-iam-authenticator version
|
||||
fi"
|
||||
@@ -857,17 +810,17 @@ EOF
|
||||
if [ ! -x /usr/local/bin/jless ]; then
|
||||
##VERSION##
|
||||
sudo apt-get install -y libxcb-render0 libxcb-shape0 libxcb-xfixes0
|
||||
wget \$GITHUB/PaulJuliusMartinez/jless/releases/download/v0.9.0/jless-v0.9.0-x86_64-unknown-linux-gnu.zip
|
||||
wget https://github.com/PaulJuliusMartinez/jless/releases/download/v0.9.0/jless-v0.9.0-x86_64-unknown-linux-gnu.zip
|
||||
unzip jless-v0.9.0-x86_64-unknown-linux-gnu
|
||||
sudo mv jless /usr/local/bin
|
||||
fi"
|
||||
|
||||
# Install the krew package manager
|
||||
pssh "
|
||||
if [ ! -d /home/$USER_LOGIN/.krew ] && [ ! -f /tmp/ipv6-only ]; then
|
||||
if [ ! -d /home/$USER_LOGIN/.krew ]; then
|
||||
cd /tmp &&
|
||||
KREW=krew-linux_$ARCH
|
||||
curl -fsSL \$GITHUB/kubernetes-sigs/krew/releases/latest/download/\$KREW.tar.gz |
|
||||
curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/\$KREW.tar.gz |
|
||||
tar -zxf- &&
|
||||
sudo -u $USER_LOGIN -H ./\$KREW install krew &&
|
||||
echo export PATH=/home/$USER_LOGIN/.krew/bin:\\\$PATH | sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc
|
||||
@@ -875,7 +828,7 @@ EOF
|
||||
|
||||
# Install kubecolor
|
||||
KUBECOLOR_VERSION=0.4.0
|
||||
URL=\$GITHUB/kubecolor/kubecolor/releases/download/v${KUBECOLOR_VERSION}/kubecolor_${KUBECOLOR_VERSION}_linux_${ARCH}.tar.gz
|
||||
URL=https://github.com/kubecolor/kubecolor/releases/download/v${KUBECOLOR_VERSION}/kubecolor_${KUBECOLOR_VERSION}_linux_${ARCH}.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubecolor ]; then
|
||||
##VERSION##
|
||||
@@ -887,7 +840,7 @@ EOF
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/k9s ]; then
|
||||
FILENAME=k9s_Linux_$ARCH.tar.gz &&
|
||||
curl -fsSL \$GITHUB/derailed/k9s/releases/latest/download/\$FILENAME |
|
||||
curl -fsSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
|
||||
sudo tar -C /usr/local/bin -zx k9s
|
||||
k9s version
|
||||
fi"
|
||||
@@ -896,7 +849,7 @@ EOF
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/popeye ]; then
|
||||
FILENAME=popeye_Linux_$ARCH.tar.gz &&
|
||||
curl -fsSL \$GITHUB/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
curl -fsSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
sudo tar -C /usr/local/bin -zx popeye
|
||||
popeye version
|
||||
fi"
|
||||
@@ -909,7 +862,7 @@ EOF
|
||||
if [ ! -x /usr/local/bin/tilt ]; then
|
||||
TILT_VERSION=0.33.13
|
||||
FILENAME=tilt.\$TILT_VERSION.linux.$TILT_ARCH.tar.gz
|
||||
curl -fsSL \$GITHUB/tilt-dev/tilt/releases/download/v\$TILT_VERSION/\$FILENAME |
|
||||
curl -fsSL https://github.com/tilt-dev/tilt/releases/download/v\$TILT_VERSION/\$FILENAME |
|
||||
sudo tar -C /usr/local/bin -zx tilt
|
||||
tilt completion bash | sudo tee /etc/bash_completion.d/tilt
|
||||
tilt version
|
||||
@@ -927,7 +880,7 @@ EOF
|
||||
# Install Kompose
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kompose ]; then
|
||||
curl -fsSLo kompose \$GITHUB/kubernetes/kompose/releases/latest/download/kompose-linux-$ARCH &&
|
||||
curl -fsSLo kompose https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-$ARCH &&
|
||||
sudo install kompose /usr/local/bin
|
||||
kompose completion bash | sudo tee /etc/bash_completion.d/kompose
|
||||
kompose version
|
||||
@@ -936,7 +889,7 @@ EOF
|
||||
# Install KinD
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kind ]; then
|
||||
curl -fsSLo kind \$GITHUB/kubernetes-sigs/kind/releases/latest/download/kind-linux-$ARCH &&
|
||||
curl -fsSLo kind https://github.com/kubernetes-sigs/kind/releases/latest/download/kind-linux-$ARCH &&
|
||||
sudo install kind /usr/local/bin
|
||||
kind completion bash | sudo tee /etc/bash_completion.d/kind
|
||||
kind version
|
||||
@@ -945,7 +898,7 @@ EOF
|
||||
# Install YTT
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ytt ]; then
|
||||
curl -fsSLo ytt \$GITHUB/vmware-tanzu/carvel-ytt/releases/latest/download/ytt-linux-$ARCH &&
|
||||
curl -fsSLo ytt https://github.com/vmware-tanzu/carvel-ytt/releases/latest/download/ytt-linux-$ARCH &&
|
||||
sudo install ytt /usr/local/bin
|
||||
ytt completion bash | sudo tee /etc/bash_completion.d/ytt
|
||||
ytt version
|
||||
@@ -953,7 +906,7 @@ EOF
|
||||
|
||||
##VERSION## https://github.com/bitnami-labs/sealed-secrets/releases
|
||||
KUBESEAL_VERSION=0.26.2
|
||||
URL=\$GITHUB/bitnami-labs/sealed-secrets/releases/download/v${KUBESEAL_VERSION}/kubeseal-${KUBESEAL_VERSION}-linux-${ARCH}.tar.gz
|
||||
URL=https://github.com/bitnami-labs/sealed-secrets/releases/download/v${KUBESEAL_VERSION}/kubeseal-${KUBESEAL_VERSION}-linux-${ARCH}.tar.gz
|
||||
#case $ARCH in
|
||||
#amd64) FILENAME=kubeseal-linux-amd64;;
|
||||
#arm64) FILENAME=kubeseal-arm64;;
|
||||
@@ -970,7 +923,7 @@ EOF
|
||||
VELERO_VERSION=1.13.2
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/velero ]; then
|
||||
curl -fsSL \$GITHUB/vmware-tanzu/velero/releases/download/v$VELERO_VERSION/velero-v$VELERO_VERSION-linux-$ARCH.tar.gz |
|
||||
curl -fsSL https://github.com/vmware-tanzu/velero/releases/download/v$VELERO_VERSION/velero-v$VELERO_VERSION-linux-$ARCH.tar.gz |
|
||||
sudo tar --strip-components=1 --wildcards -zx -C /usr/local/bin '*/velero'
|
||||
velero completion bash | sudo tee /etc/bash_completion.d/velero
|
||||
velero version --client-only
|
||||
@@ -980,7 +933,7 @@ EOF
|
||||
KUBENT_VERSION=0.7.2
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubent ]; then
|
||||
curl -fsSL \$GITHUB/doitintl/kube-no-trouble/releases/download/${KUBENT_VERSION}/kubent-${KUBENT_VERSION}-linux-$ARCH.tar.gz |
|
||||
curl -fsSL https://github.com/doitintl/kube-no-trouble/releases/download/${KUBENT_VERSION}/kubent-${KUBENT_VERSION}-linux-$ARCH.tar.gz |
|
||||
sudo tar -zxvf- -C /usr/local/bin kubent
|
||||
kubent --version
|
||||
fi"
|
||||
@@ -988,7 +941,7 @@ EOF
|
||||
# Ngrok. Note that unfortunately, this is the x86_64 binary.
|
||||
# We might have to rethink how to handle this for multi-arch environments.
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ngrok ] && [ ! -f /tmp/ipv6-only ]; then
|
||||
if [ ! -x /usr/local/bin/ngrok ]; then
|
||||
curl -fsSL https://bin.equinox.io/c/bNyj1mQVY4c/ngrok-v3-stable-linux-amd64.tgz |
|
||||
sudo tar -zxvf- -C /usr/local/bin ngrok
|
||||
fi"
|
||||
@@ -1087,9 +1040,7 @@ _cmd_ping() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# If we connect to our VMs over IPv6, the IP address is between brackets.
|
||||
# Unfortunately, fping doesn't support that; so let's strip brackets here.
|
||||
tr -d [] < tags/$TAG/ips.txt | fping
|
||||
fping < tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
_cmd stage2 "Finalize the setup of managed Kubernetes clusters"
|
||||
@@ -1161,7 +1112,7 @@ _cmd_standardize() {
|
||||
sudo netfilter-persistent start
|
||||
fi"
|
||||
|
||||
# oracle-cloud-agent upgrades packages in the background.
|
||||
# oracle-cloud-agent upgrades pacakges in the background.
|
||||
# This breaks our deployment scripts, because when we invoke apt-get, it complains
|
||||
# that the lock already exists (symptom: random "Exited with error code 100").
|
||||
# Workaround: if we detect oracle-cloud-agent, remove it.
|
||||
@@ -1173,15 +1124,6 @@ _cmd_standardize() {
|
||||
sudo snap remove oracle-cloud-agent
|
||||
sudo dpkg --remove --force-remove-reinstreq unified-monitoring-agent
|
||||
fi"
|
||||
|
||||
# Check if a cachttps instance is available.
|
||||
# (This is used to access GitHub on IPv6-only hosts.)
|
||||
pssh "
|
||||
if curl -fsSLI http://cachttps.internal:3131/https://github.com/ >/dev/null; then
|
||||
echo GITHUB=http://cachttps.internal:3131/https://github.com
|
||||
else
|
||||
echo GITHUB=https://github.com
|
||||
fi | sudo tee -a /etc/environment"
|
||||
}
|
||||
|
||||
_cmd tailhist "Install history viewer on port 1088"
|
||||
@@ -1197,7 +1139,7 @@ _cmd_tailhist () {
|
||||
pssh "
|
||||
set -e
|
||||
sudo apt-get install unzip -y
|
||||
wget -c \$GITHUB/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0-linux_$ARCH.zip
|
||||
wget -c https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0-linux_$ARCH.zip
|
||||
unzip -o websocketd-0.3.0-linux_$ARCH.zip websocketd
|
||||
sudo mv websocketd /usr/local/bin/websocketd
|
||||
sudo mkdir -p /opt/tailhist
|
||||
@@ -1463,7 +1405,7 @@ _cmd_webssh() {
|
||||
sudo apt-get install python3-tornado python3-paramiko -y"
|
||||
pssh "
|
||||
cd /opt
|
||||
[ -d webssh ] || sudo git clone \$GITHUB/jpetazzo/webssh"
|
||||
[ -d webssh ] || sudo git clone https://github.com/jpetazzo/webssh"
|
||||
pssh "
|
||||
for KEYFILE in /etc/ssh/*.pub; do
|
||||
read a b c < \$KEYFILE; echo localhost \$a \$b
|
||||
@@ -1548,7 +1490,7 @@ test_vm() {
|
||||
"whoami" \
|
||||
"hostname -i" \
|
||||
"ls -l /usr/local/bin/i_am_first_node" \
|
||||
"grep . /etc/name_of_first_node /etc/ip_addres_of_first_node" \
|
||||
"grep . /etc/name_of_first_node /etc/ipv4_of_first_node" \
|
||||
"cat /etc/hosts" \
|
||||
"hostnamectl status" \
|
||||
"docker version | grep Version -B1" \
|
||||
|
||||
@@ -4,7 +4,7 @@ resource "helm_release" "_" {
|
||||
create_namespace = true
|
||||
repository = "https://charts.loft.sh"
|
||||
chart = "vcluster"
|
||||
version = "0.30.4"
|
||||
version = "0.27.1"
|
||||
values = [
|
||||
yamlencode({
|
||||
controlPlane = {
|
||||
|
||||
@@ -63,8 +63,7 @@ locals {
|
||||
|
||||
resource "local_file" "ip_addresses" {
|
||||
content = join("", formatlist("%s\n", [
|
||||
for key, value in local.ip_addresses :
|
||||
strcontains(value, ".") ? value : "[${value}]"
|
||||
for key, value in local.ip_addresses : value
|
||||
]))
|
||||
filename = "ips.txt"
|
||||
file_permission = "0600"
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
# If we deploy in IPv6-only environments, and the students don't have IPv6
|
||||
# connectivity, we want to offer a way to connect anyway. Our solution is
|
||||
# to generate an HAProxy configuration snippet, that can be copied to a
|
||||
# DualStack machine which will act as a proxy to our IPv6 machines.
|
||||
# Note that the snippet still has to be copied, so this is not a 100%
|
||||
# streamlined solution!
|
||||
|
||||
locals {
|
||||
portmaps = {
|
||||
for key, value in local.nodes :
|
||||
(10000 + proxmox_virtual_environment_vm._[key].vm_id) => local.ip_addresses[key]
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "haproxy" {
|
||||
filename = "./${var.tag}.cfg"
|
||||
file_permission = "0644"
|
||||
content = join("\n", [for port, address in local.portmaps : <<-EOT
|
||||
frontend f${port}
|
||||
bind *:${port}
|
||||
default_backend b${port}
|
||||
backend b${port}
|
||||
mode tcp
|
||||
server s${port} [${address}]:22 maxconn 16
|
||||
EOT
|
||||
])
|
||||
}
|
||||
|
||||
resource "local_file" "sshproxy" {
|
||||
filename = "sshproxy.txt"
|
||||
file_permission = "0644"
|
||||
content = join("", [
|
||||
for cid in range(1, 1 + var.how_many_clusters) :
|
||||
format("ssh -l k8s -p %d\n", proxmox_virtual_environment_vm._[format("c%03dn%03d", cid, 1)].vm_id + 10000)
|
||||
])
|
||||
}
|
||||
|
||||
@@ -1,34 +1,12 @@
|
||||
data "proxmox_virtual_environment_nodes" "_" {}
|
||||
|
||||
data "proxmox_virtual_environment_vms" "_" {
|
||||
filter {
|
||||
name = "template"
|
||||
values = [true]
|
||||
}
|
||||
}
|
||||
|
||||
data "proxmox_virtual_environment_vms" "templates" {
|
||||
for_each = toset(data.proxmox_virtual_environment_nodes._.names)
|
||||
tags = ["ubuntu"]
|
||||
filter {
|
||||
name = "node_name"
|
||||
values = [each.value]
|
||||
}
|
||||
filter {
|
||||
name = "template"
|
||||
values = [true]
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
pve_nodes = data.proxmox_virtual_environment_nodes._.names
|
||||
pve_node = { for k, v in local.nodes : k => local.pve_nodes[v.node_index % length(local.pve_nodes)] }
|
||||
pve_template_id = { for k, v in local.nodes : k => data.proxmox_virtual_environment_vms.templates[local.pve_node[k]].vms[0].vm_id }
|
||||
pve_nodes = data.proxmox_virtual_environment_nodes._.names
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "_" {
|
||||
node_name = local.pve_nodes[each.value.node_index % length(local.pve_nodes)]
|
||||
for_each = local.nodes
|
||||
node_name = local.pve_node[each.key]
|
||||
name = each.value.node_name
|
||||
tags = ["container.training", var.tag]
|
||||
stop_on_destroy = true
|
||||
@@ -46,17 +24,9 @@ resource "proxmox_virtual_environment_vm" "_" {
|
||||
# size = 30
|
||||
# discard = "on"
|
||||
#}
|
||||
### Strategy 1: clone from shared storage
|
||||
#clone {
|
||||
# vm_id = var.proxmox_template_vm_id
|
||||
# node_name = var.proxmox_template_node_name
|
||||
# full = false
|
||||
#}
|
||||
### Strategy 2: clone from local storage
|
||||
### (requires that the template exists on each node)
|
||||
clone {
|
||||
vm_id = local.pve_template_id[each.key]
|
||||
node_name = local.pve_node[each.key]
|
||||
vm_id = var.proxmox_template_vm_id
|
||||
node_name = var.proxmox_template_node_name
|
||||
full = false
|
||||
}
|
||||
agent {
|
||||
@@ -71,9 +41,7 @@ resource "proxmox_virtual_environment_vm" "_" {
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = "dhcp"
|
||||
}
|
||||
ipv6 {
|
||||
address = "dhcp"
|
||||
#gateway =
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -104,10 +72,8 @@ resource "proxmox_virtual_environment_vm" "_" {
|
||||
locals {
|
||||
ip_addresses = {
|
||||
for key, value in local.nodes :
|
||||
key => [for addr in flatten(concat(
|
||||
proxmox_virtual_environment_vm._[key].ipv6_addresses,
|
||||
proxmox_virtual_environment_vm._[key].ipv4_addresses,
|
||||
["ERROR"])) :
|
||||
addr if addr != "127.0.0.1" && addr != "::1"][0]
|
||||
key => [for addr in flatten(concat(proxmox_virtual_environment_vm._[key].ipv4_addresses, ["ERROR"])) :
|
||||
addr if addr != "127.0.0.1"][0]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "~> 0.86.0"
|
||||
version = "~> 0.70.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,11 +10,8 @@ proxmox_password = "CHANGEME"
|
||||
|
||||
# Which storage to use for VM disks. Defaults to "local".
|
||||
#proxmox_storage = "ceph"
|
||||
#proxmox_storage = "local-zfs"
|
||||
|
||||
# We recently rewrote the Proxmox configurations to automatically
|
||||
# detect which template to use; so these variables aren't used anymore.
|
||||
#proxmox_template_node_name = "CHANGEME"
|
||||
#proxmox_template_vm_id = CHANGEME
|
||||
proxmox_template_node_name = "CHANGEME"
|
||||
proxmox_template_vm_id = CHANGEME
|
||||
|
||||
|
||||
|
||||
10
slides/1.yml
10
slides/1.yml
@@ -5,7 +5,7 @@ chat: "[Mattermost](https://training.enix.io/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2026-01-enix.container.training/
|
||||
slides: https://2025-10-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
@@ -15,16 +15,16 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
- shared/handson.md
|
||||
- containers/labs-live.md
|
||||
- shared/connecting.md
|
||||
- containers/labs-async.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
|
||||
10
slides/2.yml
10
slides/2.yml
@@ -5,7 +5,7 @@ chat: "[Mattermost](https://training.enix.io/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2026-01-enix.container.training/
|
||||
slides: https://2025-10-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
@@ -15,14 +15,15 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
- k8s/prereqs-basic.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/prereqs.md
|
||||
- shared/handson.md
|
||||
- k8s/labs-live.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/yaml-dockercoins-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
@@ -81,7 +82,6 @@ content:
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-tls.md
|
||||
#- k8s/ingress-advanced.md
|
||||
- k8s/gateway-api.md
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
|
||||
13
slides/3.yml
13
slides/3.yml
@@ -1,12 +1,12 @@
|
||||
title: |
|
||||
Packaging d’applications
|
||||
Packaging d'applications
|
||||
pour Kubernetes
|
||||
|
||||
chat: "[Mattermost](https://training.enix.io/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2026-01-enix.container.training/
|
||||
slides: https://2025-10-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
@@ -16,18 +16,17 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- k8s/labs-live.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- k8s/demo-apps.md
|
||||
- k8s/templating.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
|
||||
11
slides/4.yml
11
slides/4.yml
@@ -5,7 +5,7 @@ chat: "[Mattermost](https://training.enix.io/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2026-01-enix.container.training/
|
||||
slides: https://2025-10-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
@@ -15,20 +15,20 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- k8s/labs-live.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
- shared/toc.md
|
||||
- exercises/netpol-brief.md
|
||||
- exercises/sealed-secrets-brief.md
|
||||
- exercises/rbac-brief.md
|
||||
- exercises/kyverno-ingress-domain-name-brief.md
|
||||
- exercises/reqlim-brief.md
|
||||
- shared/toc.md
|
||||
- #1
|
||||
- k8s/demo-apps.md
|
||||
- k8s/netpol.md
|
||||
@@ -47,7 +47,6 @@ content:
|
||||
- k8s/admission.md
|
||||
- k8s/cainjector.md
|
||||
- k8s/kyverno.md
|
||||
- k8s/kyverno-ingress.md
|
||||
- exercises/kyverno-ingress-domain-name-details.md
|
||||
- #3
|
||||
- k8s/resource-limits.md
|
||||
|
||||
49
slides/5.yml
49
slides/5.yml
@@ -5,7 +5,7 @@ chat: "[Mattermost](https://training.enix.io/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2026-01-enix.container.training/
|
||||
slides: https://2025-10-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
@@ -14,17 +14,13 @@ exclude:
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- shared/chat-room-im.md
|
||||
- k8s/prereqs-advanced.md
|
||||
- logistics-m5.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- k8s/labs-live.md
|
||||
- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
-
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
@@ -52,23 +48,16 @@ content:
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
#- k8s/cloud-controller-manager.md
|
||||
#-
|
||||
# - k8s/M6-START-a-company-scenario.md
|
||||
# - k8s/M6-T02-flux-install.md
|
||||
# - k8s/M6-T03-installing-tenants.md
|
||||
# - k8s/M6-R01-flux_configure-ROCKY-deployment.md
|
||||
# - k8s/M6-T05-ingress-config.md
|
||||
# - k8s/M6-M01-adding-MOVY-tenant.md
|
||||
# - k8s/M6-K01-METAL-install.md
|
||||
# - k8s/M6-K03-openebs-install.md
|
||||
# - k8s/M6-monitoring-stack-install.md
|
||||
# - k8s/M6-kyverno-install.md
|
||||
# - shared/thankyou.md
|
||||
#-
|
||||
# |
|
||||
# # (Extra content)
|
||||
# - k8s/apiserver-deepdive.md
|
||||
# - k8s/setup-overview.md
|
||||
# - k8s/setup-devel.md
|
||||
# - k8s/setup-managed.md
|
||||
# - k8s/setup-selfhosted.md
|
||||
-
|
||||
- flux/scenario.md
|
||||
- flux/bootstrap.md
|
||||
- flux/tenants.md
|
||||
- flux/app1-rocky-test.md
|
||||
- flux/ingress.md
|
||||
- flux/app2-movy-test.md
|
||||
- k8s/k0s.md
|
||||
- flux/add-cluster.md
|
||||
- flux/openebs.md
|
||||
- flux/observability.md
|
||||
- flux/kyverno.md
|
||||
- shared/thankyou.md
|
||||
@@ -2,7 +2,6 @@
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /highfive.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
@@ -25,9 +24,4 @@
|
||||
# Survey form
|
||||
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform
|
||||
|
||||
# Serve individual lessons with special URLs:
|
||||
# - access http://container.training/k8s/ingress
|
||||
# - ...redirects to http://container.training/view?k8s/ingress
|
||||
# - ...proxies to http://container.training/workshop.html?k8s/ingress
|
||||
/view /workshop.html 200
|
||||
/* /view?:splat
|
||||
/ /highfive.html 200!
|
||||
|
||||
31
slides/academy-build.py
Executable file
31
slides/academy-build.py
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
html_file = sys.argv[1]
|
||||
output_file_template = "_academy_{}.html"
|
||||
title_regex = "name: toc-(.*)"
|
||||
redirects = open("_redirects", "w")
|
||||
|
||||
sections = re.split(title_regex, open(html_file).read())[1:]
|
||||
|
||||
while sections:
|
||||
link, markdown = sections[0], sections[1]
|
||||
sections = sections[2:]
|
||||
output_file_name = output_file_template.format(link)
|
||||
with open(output_file_name, "w") as f:
|
||||
html = open("workshop.html").read()
|
||||
html = html.replace("@@MARKDOWN@@", markdown)
|
||||
titles = re.findall("# (.*)", markdown) + [""]
|
||||
html = html.replace("@@TITLE@@", "{} — Kubernetes Academy".format(titles[0]))
|
||||
html = html.replace("@@SLIDENUMBERPREFIX@@", "")
|
||||
html = html.replace("@@EXCLUDE@@", "")
|
||||
html = html.replace(".nav[", ".hide[")
|
||||
f.write(html)
|
||||
redirects.write("/{} /{} 200!\n".format(link, output_file_name))
|
||||
|
||||
html = open(html_file).read()
|
||||
html = re.sub("#toc-([^)]*)", "_academy_\\1.html", html)
|
||||
sys.stdout.write(html)
|
||||
34
slides/autopilot/package-lock.json
generated
34
slides/autopilot/package-lock.json
generated
@@ -8,7 +8,7 @@
|
||||
"name": "container-training-pub-sub-server",
|
||||
"version": "0.0.1",
|
||||
"dependencies": {
|
||||
"express": "^4.21.2",
|
||||
"express": "^4.21.1",
|
||||
"socket.io": "^4.8.0",
|
||||
"socket.io-client": "^4.7.5"
|
||||
}
|
||||
@@ -334,9 +334,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/express": {
|
||||
"version": "4.21.2",
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz",
|
||||
"integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
|
||||
"version": "4.21.1",
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.21.1.tgz",
|
||||
"integrity": "sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ==",
|
||||
"dependencies": {
|
||||
"accepts": "~1.3.8",
|
||||
"array-flatten": "1.1.1",
|
||||
@@ -357,7 +357,7 @@
|
||||
"methods": "~1.1.2",
|
||||
"on-finished": "2.4.1",
|
||||
"parseurl": "~1.3.3",
|
||||
"path-to-regexp": "0.1.12",
|
||||
"path-to-regexp": "0.1.10",
|
||||
"proxy-addr": "~2.0.7",
|
||||
"qs": "6.13.0",
|
||||
"range-parser": "~1.2.1",
|
||||
@@ -372,10 +372,6 @@
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.10.0"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/express"
|
||||
}
|
||||
},
|
||||
"node_modules/finalhandler": {
|
||||
@@ -637,9 +633,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/path-to-regexp": {
|
||||
"version": "0.1.12",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
|
||||
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ=="
|
||||
"version": "0.1.10",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz",
|
||||
"integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w=="
|
||||
},
|
||||
"node_modules/proxy-addr": {
|
||||
"version": "2.0.7",
|
||||
@@ -1268,9 +1264,9 @@
|
||||
"integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="
|
||||
},
|
||||
"express": {
|
||||
"version": "4.21.2",
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz",
|
||||
"integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
|
||||
"version": "4.21.1",
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.21.1.tgz",
|
||||
"integrity": "sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ==",
|
||||
"requires": {
|
||||
"accepts": "~1.3.8",
|
||||
"array-flatten": "1.1.1",
|
||||
@@ -1291,7 +1287,7 @@
|
||||
"methods": "~1.1.2",
|
||||
"on-finished": "2.4.1",
|
||||
"parseurl": "~1.3.3",
|
||||
"path-to-regexp": "0.1.12",
|
||||
"path-to-regexp": "0.1.10",
|
||||
"proxy-addr": "~2.0.7",
|
||||
"qs": "6.13.0",
|
||||
"range-parser": "~1.2.1",
|
||||
@@ -1477,9 +1473,9 @@
|
||||
"integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="
|
||||
},
|
||||
"path-to-regexp": {
|
||||
"version": "0.1.12",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
|
||||
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ=="
|
||||
"version": "0.1.10",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz",
|
||||
"integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w=="
|
||||
},
|
||||
"proxy-addr": {
|
||||
"version": "2.0.7",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "container-training-pub-sub-server",
|
||||
"version": "0.0.1",
|
||||
"dependencies": {
|
||||
"express": "^4.21.2",
|
||||
"express": "^4.21.1",
|
||||
"socket.io": "^4.8.0",
|
||||
"socket.io-client": "^4.7.5"
|
||||
}
|
||||
|
||||
@@ -29,20 +29,6 @@ At the end of this lesson, you will be able to:
|
||||
|
||||
---
|
||||
|
||||
## `Dockerfile` example
|
||||
|
||||
```
|
||||
FROM python:alpine
|
||||
WORKDIR /app
|
||||
RUN pip install Flask
|
||||
COPY rng.py .
|
||||
ENV FLASK_APP=rng FLASK_RUN_HOST=:: FLASK_RUN_PORT=80
|
||||
CMD ["flask", "run"]
|
||||
EXPOSE 80
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Writing our first `Dockerfile`
|
||||
|
||||
Our Dockerfile must be in a **new, empty directory**.
|
||||
|
||||
153
slides/containers/Training_Environment.md
Normal file
153
slides/containers/Training_Environment.md
Normal file
@@ -0,0 +1,153 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Our training environment
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Our training environment
|
||||
|
||||
- If you are attending a tutorial or workshop:
|
||||
|
||||
- a VM has been provisioned for each student
|
||||
|
||||
- If you are doing or re-doing this course on your own, you can:
|
||||
|
||||
- install Docker locally (as explained in the chapter "Installing Docker")
|
||||
|
||||
- install Docker on e.g. a cloud VM
|
||||
|
||||
- use https://www.play-with-docker.com/ to instantly get a training environment
|
||||
|
||||
---
|
||||
|
||||
## Our Docker VM
|
||||
|
||||
*This section assumes that you are following this course as part of
|
||||
a tutorial, training or workshop, where each student is given an
|
||||
individual Docker VM.*
|
||||
|
||||
- The VM is created just before the training.
|
||||
|
||||
- It will stay up during the whole training.
|
||||
|
||||
- It will be destroyed shortly after the training.
|
||||
|
||||
- It comes pre-loaded with Docker and some other useful tools.
|
||||
|
||||
---
|
||||
|
||||
## What *is* Docker?
|
||||
|
||||
- "Installing Docker" really means "Installing the Docker Engine and CLI".
|
||||
|
||||
- The Docker Engine is a daemon (a service running in the background).
|
||||
|
||||
- This daemon manages containers, the same way that a hypervisor manages VMs.
|
||||
|
||||
- We interact with the Docker Engine by using the Docker CLI.
|
||||
|
||||
- The Docker CLI and the Docker Engine communicate through an API.
|
||||
|
||||
- There are many other programs and client libraries which use that API.
|
||||
|
||||
---
|
||||
|
||||
## Why don't we run Docker locally?
|
||||
|
||||
- We are going to download container images and distribution packages.
|
||||
|
||||
- This could put a bit of stress on the local WiFi and slow us down.
|
||||
|
||||
- Instead, we use a remote VM that has a good connectivity
|
||||
|
||||
- In some rare cases, installing Docker locally is challenging:
|
||||
|
||||
- no administrator/root access (computer managed by strict corp IT)
|
||||
|
||||
- 32-bit CPU or OS
|
||||
|
||||
- old OS version (e.g. CentOS 6, OSX pre-Yosemite, Windows 7)
|
||||
|
||||
- It's better to spend time learning containers than fiddling with the installer!
|
||||
|
||||
---
|
||||
|
||||
## Connecting to your Virtual Machine
|
||||
|
||||
You need an SSH client.
|
||||
|
||||
* On OS X, Linux, and other UNIX systems, just use `ssh`:
|
||||
|
||||
```bash
|
||||
$ ssh <login>@<ip-address>
|
||||
```
|
||||
|
||||
* On Windows, if you don't have an SSH client, you can download:
|
||||
|
||||
* Putty (www.putty.org)
|
||||
|
||||
* Git BASH (https://git-for-windows.github.io/)
|
||||
|
||||
* MobaXterm (https://mobaxterm.mobatek.net/)
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## `tailhist`
|
||||
|
||||
The shell history of the instructor is available online in real time.
|
||||
|
||||
Note the IP address of the instructor's virtual machine (A.B.C.D).
|
||||
|
||||
Open http://A.B.C.D:1088 in your browser and you should see the history.
|
||||
|
||||
The history is updated in real time (using a WebSocket connection).
|
||||
|
||||
It should be green when the WebSocket is connected.
|
||||
|
||||
If it turns red, reloading the page should fix it.
|
||||
|
||||
---
|
||||
|
||||
## Checking your Virtual Machine
|
||||
|
||||
Once logged in, make sure that you can run a basic Docker command:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker version
|
||||
Client:
|
||||
Version: 18.03.0-ce
|
||||
API version: 1.37
|
||||
Go version: go1.9.4
|
||||
Git commit: 0520e24
|
||||
Built: Wed Mar 21 23:10:06 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: false
|
||||
Orchestrator: swarm
|
||||
|
||||
Server:
|
||||
Engine:
|
||||
Version: 18.03.0-ce
|
||||
API version: 1.37 (minimum version 1.12)
|
||||
Go version: go1.9.4
|
||||
Git commit: 0520e24
|
||||
Built: Wed Mar 21 23:08:35 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: false
|
||||
```
|
||||
]
|
||||
|
||||
If this doesn't work, raise your hand so that an instructor can assist you!
|
||||
|
||||
???
|
||||
|
||||
:EN:Container concepts
|
||||
:FR:Premier contact avec les conteneurs
|
||||
|
||||
:EN:- What's a container engine?
|
||||
:FR:- Qu'est-ce qu'un *container engine* ?
|
||||
39
slides/containers/intro.md
Normal file
39
slides/containers/intro.md
Normal file
@@ -0,0 +1,39 @@
|
||||
## A brief introduction
|
||||
|
||||
- This was initially written to support in-person, instructor-led workshops and tutorials
|
||||
|
||||
- These materials are maintained by [Jérôme Petazzoni](https://twitter.com/jpetazzo) and [multiple contributors](https://@@GITREPO@@/graphs/contributors)
|
||||
|
||||
- You can also follow along on your own, at your own pace
|
||||
|
||||
- We included as much information as possible in these slides
|
||||
|
||||
- We recommend having a mentor to help you ...
|
||||
|
||||
- ... Or be comfortable spending some time reading the Docker
|
||||
[documentation](https://docs.docker.com/) ...
|
||||
|
||||
- ... And looking for answers in the [Docker forums](https://forums.docker.com),
|
||||
[StackOverflow](http://stackoverflow.com/questions/tagged/docker),
|
||||
and other outlets
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Hands on, you shall practice
|
||||
|
||||
- Nobody ever became a Jedi by spending their lives reading Wookiepedia
|
||||
|
||||
- Likewise, it will take more than merely *reading* these slides
|
||||
to make you an expert
|
||||
|
||||
- These slides include *tons* of demos, exercises, and examples
|
||||
|
||||
- They assume that you have access to a machine running Docker
|
||||
|
||||
- If you are attending a workshop or tutorial:
|
||||
<br/>you will be given specific instructions to access a cloud VM
|
||||
|
||||
- If you are doing this on your own:
|
||||
<br/>we will tell you how to install Docker or access a Docker environment
|
||||
@@ -1,19 +0,0 @@
|
||||
## Running your own lab environments
|
||||
|
||||
- If you are doing or re-doing this course on your own, you can:
|
||||
|
||||
- install [Docker Desktop][docker-desktop] or [Podman Desktop][podman-desktop]
|
||||
<br/>(available for Linux, Mac, Windows; provides a nice GUI)
|
||||
|
||||
- install [Docker CE][docker-ce] or [Podman][podman]
|
||||
<br/>(for intermediate/advanced users who prefer the CLI)
|
||||
|
||||
- try platforms like [Play With Docker][pwd] or [KodeKloud]
|
||||
<br/>(if you can't/won't install anything locally)
|
||||
|
||||
[docker-desktop]: https://docs.docker.com/desktop/
|
||||
[podman-desktop]: https://podman-desktop.io/downloads
|
||||
[docker-ce]: https://docs.docker.com/engine/install/
|
||||
[podman]: https://podman.io/docs/installation#installing-on-linux
|
||||
[pwd]: https://labs.play-with-docker.com/
|
||||
[KodeKloud]: https://kodekloud.com/free-labs/docker/
|
||||
@@ -1,35 +0,0 @@
|
||||
## Our training environment
|
||||
|
||||
- If you are attending a live class, a VM has been provisioned for you!
|
||||
|
||||
- Each student gets an individual VM.
|
||||
|
||||
- The VM is created just before the training.
|
||||
|
||||
- It will stay up during the whole training.
|
||||
|
||||
- It will be destroyed shortly after the training.
|
||||
|
||||
- It comes pre-loaded with Docker and some other useful tools.
|
||||
|
||||
---
|
||||
|
||||
## Can we run Docker locally?
|
||||
|
||||
- If you already have Docker (or Podman) installed, you can use it!
|
||||
|
||||
- The VMs can be convenient if:
|
||||
|
||||
- you can't/won't install Docker or Podman on your machine,
|
||||
|
||||
- your local internet connection is slow.
|
||||
|
||||
- We're going to download many container images and distribution packages.
|
||||
|
||||
- If the class takes place in a venue with slow WiFi, this can slow us down.
|
||||
|
||||
- The remote VMs have good connectivity and downloads will be fast there.
|
||||
|
||||
(Initially, we provided VMs to make sure that nobody would waste time
|
||||
with installers, or because they didn't have the right permissions
|
||||
on their machine, etc.)
|
||||
@@ -39,7 +39,7 @@ the **_⚙️OPS_** team exclusively operates its clusters by updating a code ba
|
||||
_GitOps_ and `Flux` enable the **_⚙️OPS_** team to rely on the _first-class citizen pattern_ in Kubernetes' world through these steps:
|
||||
|
||||
- describe the **desired target state**
|
||||
- and let the **automated convergence** happen
|
||||
- and let the **automated convergence** happens
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -12,119 +12,119 @@
|
||||
<table>
|
||||
|
||||
<tr>
|
||||
<td>Mardi 20 janvier 2026</td>
|
||||
<td>Mardi 23 septembre 2025</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 21 janvier 2026</td>
|
||||
<td>Mercredi 24 septembre 2025</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 22 janvier 2026</td>
|
||||
<td>Jeudi 25 septembre 2025</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 23 janvier 2026</td>
|
||||
<td>Vendredi 26 septembre 2025</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Lundi 26 janvier 2026</td>
|
||||
<td>Mardi 30 septembre 2025</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 27 janvier 2026</td>
|
||||
<td>Mercredi 1 octobre 2025</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 28 janvier 2026</td>
|
||||
<td>Jeudi 2 octobre 2025</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 29 janvier 2026</td>
|
||||
<td>Vendredi 3 octobre 2025</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Jeudi 5 février 2026</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 6 février 2026</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lundi 9 février 2026</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 10 février 2026</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Mercredi 11 février 2026</td>
|
||||
<td>Mardi 7 octobre 2025</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 12 février 2026</td>
|
||||
<td>Mercredi 8 octobre 2025</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 13 février 2026</td>
|
||||
<td>Jeudi 9 octobre 2025</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Lundi 16 février 2026</td>
|
||||
<td>Mardi 14 octobre 2025</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 15 octobre 2025</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 16 octobre 2025</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 17 octobre 2025</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Mardi 4 novembre 2025</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 17 février 2026</td>
|
||||
<td>Mercredi 5 novembre 2025</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 18 février 2026</td>
|
||||
<td>Jeudi 6 novembre 2025</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 19 février 2026</td>
|
||||
<td>Vendredi 7 novembre 2025</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
|
||||
@@ -17,19 +17,17 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/about-slides.md
|
||||
- shared/handson.md
|
||||
- containers/labs-live.md
|
||||
- shared/connecting.md
|
||||
- containers/labs-async.md
|
||||
- shared/toc.md
|
||||
-
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
|
||||
@@ -16,19 +16,17 @@ exclude:
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/about-slides.md
|
||||
- shared/handson.md
|
||||
#- containers/labs-live.md
|
||||
#- shared/connecting.md
|
||||
- containers/labs-async.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
|
||||
@@ -17,19 +17,17 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/about-slides.md
|
||||
- shared/handson.md
|
||||
- containers/labs-live.md
|
||||
- shared/connecting.md
|
||||
- containers/labs-async.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
|
||||
@@ -180,7 +180,7 @@ An Ingress Controller! 😅
|
||||
|
||||
- Install an Ingress Controller:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/traefik.yaml
|
||||
kubectl apply -f ~/container.training/k8s/traefik-v2.yaml
|
||||
```
|
||||
|
||||
- Wait a little bit, and check that we now have a `kubernetes.io/tls` Secret:
|
||||
|
||||
@@ -1,216 +0,0 @@
|
||||
# CloudNativePG
|
||||
|
||||
- CloudNativePG (CNPG) is an operator to run PostreSQL on Kubernetes
|
||||
|
||||
- Makes it easy to run production Postgres on K8S
|
||||
|
||||
- Supports streaming replication, backups, PITR, TLS, monitoring...
|
||||
|
||||
- Open source
|
||||
|
||||
- Accepted to CNCF on January 21, 2025 at the Sandbox maturity level
|
||||
|
||||
(https://www.cncf.io/projects/cloudnativepg/)
|
||||
|
||||
---
|
||||
|
||||
## A few examples
|
||||
|
||||
- [EphemeraSearch](https://www.ephemerasearch.com/)
|
||||
|
||||
*personal project, ~200 GB database, tiny budget*
|
||||
|
||||
- [Sellsy](https://enix.io/en/clients/)
|
||||
|
||||
*40,000 databases across 50 clusters, Talos, Proxmox VE*
|
||||
|
||||
- MistralAI
|
||||
|
||||
*30 production clusters, each from a few GB to a few TB size*
|
||||
|
||||
→ CNPG works for environments with small, big, and many clusters!
|
||||
|
||||
---
|
||||
|
||||
## Typical operation
|
||||
|
||||
- Decide what kind of storage we want to use
|
||||
|
||||
(cloud, local, distributed, hyperconverged...)
|
||||
|
||||
- Decide on backup strategy
|
||||
|
||||
(typically object store, e.g. S3-compatible)
|
||||
|
||||
- Set up `StorageClass` if needed
|
||||
|
||||
- Install CNPG
|
||||
|
||||
- Deploy Postgres cluster(s) with YAML manifests
|
||||
|
||||
- Profit!
|
||||
|
||||
---
|
||||
|
||||
## Local vs remote storage
|
||||
|
||||
- Local storage can feel less safe
|
||||
|
||||
(compared to a SAN, cloud block device, distributed volume...)
|
||||
|
||||
- However, it can be much faster
|
||||
|
||||
(much lower latency, much higher throughput)
|
||||
|
||||
- If we're using replication, losing a local volume is no problem
|
||||
|
||||
- Distributed storage can also fail
|
||||
|
||||
(or be unavailable for a while)
|
||||
|
||||
---
|
||||
|
||||
## CNPG installation
|
||||
|
||||
Example with Helm:
|
||||
```bash
|
||||
helm upgrade --install --namespace cnpg-system --create-namespace \
|
||||
--repo https://cloudnative-pg.io/charts/ \
|
||||
cloudnative-pg cloudnative-pg \
|
||||
--version 1.25.1
|
||||
```
|
||||
|
||||
Interesting options to add, to integrate with Prometheus Operator:
|
||||
```bash
|
||||
--set monitoring.podMonitorEnabled=true
|
||||
--set monitoring.grafanaDashboard.create=true
|
||||
--set monitoring.grafanaDashboard.namespace=prom-system
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Minimal Postgres cluster
|
||||
|
||||
```yaml
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: minimal
|
||||
spec:
|
||||
instances: 2
|
||||
storage:
|
||||
size: 10G
|
||||
```
|
||||
|
||||
Note: this is missing (notably) resource requests and backups!
|
||||
|
||||
---
|
||||
|
||||
## `kubectl` plugin
|
||||
|
||||
- There is a `kubectl-cnpg` plugin
|
||||
|
||||
- Install it (e.g. with `krew`)
|
||||
|
||||
- Check commands like:
|
||||
|
||||
`k cnpg status`
|
||||
|
||||
`k cnpg psql`
|
||||
|
||||
`k cnpg backup`
|
||||
|
||||
`k cnpg promote`
|
||||
|
||||
---
|
||||
|
||||
## Production clusters
|
||||
|
||||
Check the following YAML manifest:
|
||||
|
||||
https://github.com/jpetazzo/pozok/blob/main/cluster-production.yaml
|
||||
|
||||
If you want to test this, you need an S3-compatible object store.
|
||||
|
||||
- Set the required variables:
|
||||
|
||||
`$CLUSTER_NAME`, `$AWS_ACCESS_KEY_ID`, `$AWS_SECRET_ACCESS_KEY`, `$AWS_DEFAULT_REGION`, `$BUCKET_NAME`, `$AWS_ENDPOINT_URL`
|
||||
|
||||
- Then `envsubst < cluster-production.yaml | kubectl apply -f-`
|
||||
|
||||
- Cluster comes up; backups and WAL segments land in the S3 bucket!
|
||||
|
||||
---
|
||||
|
||||
## Automated switchover
|
||||
|
||||
- CNPG detects when we `kubectl cordon` a node
|
||||
|
||||
- It assumes "cordon = maintenance"
|
||||
|
||||
- If the node hosts a primary server, it initiates a switchover
|
||||
|
||||
- It also uses Pod Disruption Budgets (PDB) to collaborate with evictions
|
||||
|
||||
(the PDB prevents the eviction of the primary until it gets demoted)
|
||||
|
||||
---
|
||||
|
||||
## Benchmarking
|
||||
|
||||
- Postgres has `pgbench`
|
||||
|
||||
- Step 1: execute e.g. `pgbench -i -s 10` to prepare the database
|
||||
|
||||
(`-s` is an optional "scaling factor" for a bigger dataset)
|
||||
|
||||
- Step 2: execute `pgbench -P1 -T10` to run the benchmark
|
||||
|
||||
(`-P1` = report progress every second, `-T10` = run for 10 seconds)
|
||||
|
||||
- These commands can be executed in the pod running the primary, e.g.:
|
||||
|
||||
`kubectl exec minimal-1 -- pgbench app -i -s 10`
|
||||
|
||||
`kubectl exec minimal-1 -- pgbench app -P1 -T60`
|
||||
|
||||
---
|
||||
|
||||
## CNPG lab 1
|
||||
|
||||
- Install CNPG on a managed cluster with a default `StorageClass`
|
||||
|
||||
- Provision a CNPG cluster (primary+replica)
|
||||
|
||||
- Run a `pgbench` (e.g. 60 seconds)
|
||||
|
||||
- Note the number of transactions / second
|
||||
|
||||
- Install another `StorageClass` (e.g. `rancher/local-path-provisioner`)
|
||||
|
||||
- Provision another CNPG cluster with that storage class
|
||||
|
||||
- Run a benchmark and compare the numbers
|
||||
|
||||
- Discuss!
|
||||
|
||||
---
|
||||
|
||||
## CNPG lab 2
|
||||
|
||||
- This one requires access to an S3-compatible object store
|
||||
|
||||
- Deploy a cluster sending backups to the object store
|
||||
|
||||
- Run a benchmark (to populate the database)
|
||||
|
||||
- Trigger a backup (e.g. with `k cnpg backup`)
|
||||
|
||||
- Create a new cluster from the backup
|
||||
|
||||
- Confirm that the numbers of rows (e.g. in `pgbench_history`) is the same
|
||||
|
||||
???
|
||||
|
||||
:EN:- Deploying Postgres clusters with CloudNativePG
|
||||
:FR:- Déployer des clusters Postgres avec CloudNativePG
|
||||
@@ -729,8 +729,8 @@ class: extra-details
|
||||
|
||||
- Relevant documentation:
|
||||
|
||||
[Service spec](https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec),
|
||||
[LabelSelector spec](https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/label-selector/),
|
||||
[Service spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#servicespec-v1-core),
|
||||
[LabelSelector spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#labelselector-v1-meta),
|
||||
[label selector doc](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors)
|
||||
|
||||
---
|
||||
|
||||
@@ -8,8 +8,6 @@
|
||||
|
||||
## The `color` app
|
||||
|
||||
- Source code: https://github.com/jpetazzo/color
|
||||
|
||||
- Image name: `jpetazzo/color`, `ghcr.io/jpetazzo/color`
|
||||
|
||||
- Available for linux/amd64, linux/arm64, linux/arm/v7 platforms
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
# ExternalDNS
|
||||
|
||||
- https://github.com/kubernetes-sigs/external-dns
|
||||
|
||||
- Open source controller
|
||||
|
||||
- “Configure external DNS servers dynamically from Kubernetes resources”
|
||||
|
||||
- ExternalDNS will automatically create DNS records from Kubernetes resources
|
||||
|
||||
- Example:
|
||||
|
||||
- we own the domain `example.com`
|
||||
|
||||
- we create an Ingress resource for `dev.example.com`
|
||||
|
||||
- ExternalDNS automatically creates a DNS record for `dev.example.com`
|
||||
<br/>(with the IP address used by our Ingress Controller)
|
||||
|
||||
---
|
||||
|
||||
## Supported Kubernetes resources
|
||||
|
||||
- Services
|
||||
|
||||
- Ingresses
|
||||
|
||||
- HTTPRoutes (Gateway API)
|
||||
|
||||
- Nodes
|
||||
|
||||
- [And much more!][externaldns-sources]
|
||||
|
||||
(ExternalDNS call these "sources".)
|
||||
|
||||
[externaldns-sources]: https://kubernetes-sigs.github.io/external-dns/latest/docs/sources/about/
|
||||
|
||||
---
|
||||
|
||||
## Supported DNS providers
|
||||
|
||||
- [More than 25 providers are supported "in-tree"][externaldns-intree]
|
||||
|
||||
- [At least as many are supported out of tree, through a webhook system][externaldns-webhooks]
|
||||
|
||||
- These providers include:
|
||||
|
||||
- cloud providers like Route53, CloudFlare, Exoscale, Linode, OVHcloud, Scaleway...
|
||||
|
||||
- self-hosted DNS like PowerDNS, CoreDNS...
|
||||
|
||||
- DNS included in routers and similar appliances like Microtik, Pi-Hole...
|
||||
|
||||
- generic DNS update protocols like the one defined in [RFC2136]
|
||||
|
||||
[externaldns-intree]: https://kubernetes-sigs.github.io/external-dns/latest/docs/providers/
|
||||
[externaldns-webhooks]: https://kubernetes-sigs.github.io/external-dns/latest/#new-providers
|
||||
[RFC2136]: https://datatracker.ietf.org/doc/html/rfc2136
|
||||
|
||||
---
|
||||
|
||||
## Order of operations
|
||||
|
||||
1. Have a domain name
|
||||
|
||||
2. Set up the domain name with a DNS provider
|
||||
|
||||
3. Install and configure ExternalDNS
|
||||
|
||||
4. Create Kubernetes resources; for instance:
|
||||
|
||||
- a Service with the annotation `external-dns.alpha.kubernetes.io/hostname`
|
||||
|
||||
- an Ingress mentioning one or multiple hosts
|
||||
|
||||
---
|
||||
|
||||
## What are we going to use?
|
||||
|
||||
- If you need a domain name, you can get a cheap one from one of these providers:
|
||||
|
||||
[Porkbun] / [Infomaniak] / [BookMyName]
|
||||
|
||||
(we're not affiliated with them, but we're happy customers!)
|
||||
|
||||
- For the DNS provider, we're going to use Linode DNS
|
||||
|
||||
(but anything else will work just as well)
|
||||
|
||||
[Porkbun]: https://porkbun.com
|
||||
[Infomaniak]: https://infomaniak.com/en/domains
|
||||
[BookMyName]: https://bookmyname.com
|
||||
|
||||
---
|
||||
|
||||
## Prep work
|
||||
|
||||
- Make sure that the domain name is set up to use the DNS provider
|
||||
|
||||
(technically the "NS records" should be set up properly)
|
||||
|
||||
- Make sure that you have an API token for the DNS provider
|
||||
|
||||
(or whatever equivalent is necessary to update DNS records there)
|
||||
|
||||
- Pro-tip: change the default TTL for the domain to a relatively low value
|
||||
|
||||
(e.g. 300 seconds / 5 minutes)
|
||||
|
||||
- This will be useful to reduce the impact of *negative caching* when testing
|
||||
|
||||
(i.e. accessing an entry that doesn't exist yet)
|
||||
|
||||
---
|
||||
|
||||
## Deploying ExternalDNS
|
||||
|
||||
- Option 1: use the container image
|
||||
<br/>(`registry.k8s.io/external-dns/external-dns`)
|
||||
|
||||
- create a Deployment using the image
|
||||
- ideally, set up RBAC resources (ServiceAccount, ClusterRole, ClusterRoleBinding)
|
||||
- configure through command-line flags or environment variables
|
||||
|
||||
- Option 2: use the upstream Helm chart
|
||||
<br/>(https://artifacthub.io/packages/helm/external-dns/external-dns)
|
||||
|
||||
- set value `provider.name`
|
||||
- set value `env` to pass configuration options (e.g. provider credentials)
|
||||
|
||||
- Option 3: use the Bitnami Helm chart
|
||||
|
||||
⚠️ NOT RECOMMENDED DUE TO BROADCOM'S LICENSING CHANGES
|
||||
|
||||
---
|
||||
|
||||
## Using the official Helm chart
|
||||
|
||||
- We're going to install ExternalDNS with the official Helm chart
|
||||
|
||||
- We'll put the Linode API token in a separate Secret
|
||||
|
||||
- We'll reference that Secret in the chart configuration values
|
||||
|
||||
- This means that we could manage that secret with a separate process
|
||||
|
||||
(e.g. [External Secrets Operator][eso], [Sealed Secrets][sealed-secrets]...)
|
||||
|
||||
[eso]: https://external-secrets.io/latest/
|
||||
[sealed-secrets]: https://github.com/bitnami-labs/sealed-secrets
|
||||
|
||||
---
|
||||
|
||||
## Installing the chart
|
||||
|
||||
- We're doing this first, because it will create the `external-dns` Namespace
|
||||
|
||||
.lab[
|
||||
|
||||
- Create the `external-dns` Namespace and deploy ExternalDNS there:
|
||||
```bash
|
||||
helm upgrade --install external-dns external-dns \
|
||||
--repo https://kubernetes-sigs.github.io/external-dns/ \
|
||||
--namespace external-dns --create-namespace \
|
||||
--set provider.name=linode \
|
||||
--set env[0].name=LINODE_TOKEN \
|
||||
--set env[0].valueFrom.secretKeyRef.name=external-dns \
|
||||
--set env[0].valueFrom.secretKeyRef.key=LINODE_TOKEN \
|
||||
#
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating the Secret
|
||||
|
||||
- First, create an API token on Linode
|
||||
|
||||
(it should be on [that page](https://cloud.linode.com/profile/tokens), then click `Create A Personal Access Token`)
|
||||
|
||||
.lab[
|
||||
|
||||
- Create a Secret with our new API token:
|
||||
```bash
|
||||
kubectl create secret generic external-dns --namespace external-dns \
|
||||
--from-literal=LINODE_TOKEN=`...`
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking that ExternalDNS is up and running
|
||||
|
||||
- Note that it might take a minute for ExternalDNS to start successfully
|
||||
|
||||
(because the Secret didn't exist yet when we deployed the chart)
|
||||
|
||||
.lab[
|
||||
|
||||
- Check the status of the pods:
|
||||
```bash
|
||||
kubectl get pods --namespace=external-dns
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- If the Pod is in status `CreateContainerConfigError`, give it a minute
|
||||
|
||||
(and/or check what's going on with `kubectl describe`)
|
||||
|
||||
---
|
||||
|
||||
## Testing ExternalDNS
|
||||
|
||||
- Assuming that our domain is `example.com`...
|
||||
|
||||
- We can annotate a `LoadBalancer` Service to add a record for its `ExternalIP`:
|
||||
```bash
|
||||
kubectl annotate service web \
|
||||
external-dns.alpha.kubernetes.io/hostname=demo-public.`example.com`
|
||||
```
|
||||
|
||||
- We can also annotate a `ClusterIP` Service to add a record for its `ClusterIP`:
|
||||
```bash
|
||||
kubectl annotate service web \
|
||||
external-dns.alpha.kubernetes.io/internal-hostname=demo-private.`example.com`
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- Check ExternalDNS logs:
|
||||
```bash
|
||||
kubectl logs -n external-dns -l app.kubernetes.io/name=external-dns
|
||||
```
|
||||
|
||||
- The DNS records should also show up in Linode DNS web interface
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Ingress
|
||||
|
||||
- When using ExternalDNS with Ingress resources:
|
||||
|
||||
*make sure that the ADDRESS field in the Ingress isn't blank!*
|
||||
|
||||
- ExternalDNS uses that field to know the IP address to use in DNS records
|
||||
|
||||
- This field should be automatically filled by the Ingress Controller
|
||||
|
||||
- Some Ingress Controllers don't to it automatically
|
||||
|
||||
(and might require additional configuration)
|
||||
|
||||
- Example: for Traefik, look for option `publishedService`
|
||||
|
||||
???
|
||||
|
||||
:EN:- Deploying ExternalDNS
|
||||
:FR:- Déployer ExternalDNS
|
||||
@@ -1,253 +0,0 @@
|
||||
# Running a Harbor registry
|
||||
|
||||
- There are many open source registries available out there
|
||||
|
||||
- We're going to show an end-to-end example using a very popular one:
|
||||
|
||||
[Harbor](https://goharbor.io) (https://goharbor.io)
|
||||
|
||||
- We will:
|
||||
|
||||
- install Harbor
|
||||
|
||||
- create a private registry on Harbor
|
||||
|
||||
- set up an automated build pipeline pushing images to Harbor
|
||||
|
||||
- configure an app to use images from the private registry
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
- Virtually all registry clients **require** TLS when communicating with registries
|
||||
|
||||
(one exception: when the registry is on `localhost`)
|
||||
|
||||
- This means that we'll need a valid TLS certificate for our registry
|
||||
|
||||
- We can easily get one with cert-manager and e.g. Let's Encrypt
|
||||
|
||||
(as long as we can associate a domain with our ingress controller)
|
||||
|
||||
- To run the demos in this chapter, **we need a domain name!**
|
||||
|
||||
---
|
||||
|
||||
## Alternatives
|
||||
|
||||
- We could configure our build pipeline to ignore certificates
|
||||
|
||||
(so that it can push images without complaining)
|
||||
|
||||
- We could hack something so that the registry is available over `localhost`
|
||||
|
||||
- Or we could add the registry's certificate everywhere
|
||||
|
||||
(in our build pipeline, on our container engines...)
|
||||
|
||||
- These extra steps are out of scope for this chapter
|
||||
|
||||
- **We need a domain name!**
|
||||
|
||||
---
|
||||
|
||||
## Automating TLS certificates
|
||||
|
||||
- Let's install Traefik:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/traefik.yml
|
||||
```
|
||||
|
||||
- And cert-manager:
|
||||
```bash
|
||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.1/cert-manager.yaml
|
||||
```
|
||||
|
||||
- Edit the `ClusterIssuer` manifest and apply it:
|
||||
```bash
|
||||
vim ~/container.training/k8s/cm-clusterissuer.yaml
|
||||
kubectl apply -f ~/container.training/k8s/cm-clusterissuer.yaml
|
||||
```
|
||||
⚠️ Make sure to update the cluster issuer name to `letsencrypt-production` !
|
||||
|
||||
---
|
||||
|
||||
## Checking that it works
|
||||
|
||||
- Deploy a simple application and expose it with TLS:
|
||||
```bash
|
||||
kubectl create deployment blue --image jpetazzo/color --replicas 2 --port 80
|
||||
kubectl expose deployment blue
|
||||
kubectl create ingress blue --rule=blue.`$DOMAIN`/*=blue:80,tls \
|
||||
--annotation cert-manager.io/cluster-issuer=letsencrypt-production
|
||||
```
|
||||
|
||||
- Check that the certificate was correctly issued:
|
||||
```bash
|
||||
kubectl get cert
|
||||
curl https://blue.`$DOMAIN`/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Deploying Harbor
|
||||
|
||||
- There is a Helm chart (https://artifacthub.io/packages/helm/harbor/harbor)
|
||||
|
||||
- Let's install it:
|
||||
```bash
|
||||
helm upgrade --install --repo https://helm.goharbor.io \
|
||||
--namespace harbor --create-namespace harbor harbor \
|
||||
--set persistence.enabled=false \
|
||||
--set expose.ingress.hosts.core=harbor.`$DOMAIN` \
|
||||
--set expose.ingress.annotations."cert-manager\.io/cluster-issuer"=letsencrypt-production \
|
||||
--version 1.18.0
|
||||
```
|
||||
|
||||
- Wait until all pods are `Running` in the `harbor` namespace
|
||||
|
||||
---
|
||||
|
||||
## Logging into Harbor
|
||||
|
||||
- Go to https://harbor.$DOMAIN/
|
||||
|
||||
- The default login is `admin`
|
||||
|
||||
- The default password is `Harbor12345`
|
||||
|
||||
(yes, it would be a good idea to change that in production😁)
|
||||
|
||||
---
|
||||
|
||||
## Creating a new repository
|
||||
|
||||
- In Harbor, repositories are associated to "projects"
|
||||
|
||||
- Create a new project named `dockercoins`
|
||||
|
||||
---
|
||||
|
||||
## Creating Harbor users
|
||||
|
||||
- We will create two "robot accounts":
|
||||
|
||||
- one with `push` permission (for the build pipeline)
|
||||
|
||||
- one with `pull` permission (for our Kubernetes workloads)
|
||||
|
||||
- Create a first robot account, `dockercoins-push`
|
||||
|
||||
- don't give any systems permission
|
||||
|
||||
- for project permissions, check `dockercoins`
|
||||
|
||||
- then select permissions, check `push` and `pull`
|
||||
|
||||
- Write down the user and password!
|
||||
|
||||
---
|
||||
|
||||
## Setting up the build pipeline
|
||||
|
||||
- This part requires a GitHub account
|
||||
|
||||
- On GitHub, fork https://github.com/jpetazzo/dockercoins
|
||||
|
||||
(it has a GitHub Actions workflow that is almost ready to use!)
|
||||
|
||||
- In your fork, go to settings / secrets and variables / actions
|
||||
|
||||
- Create the following secrets:
|
||||
|
||||
`REGISTRY_ADDRESS` = `harbor.$DOMAIN` (make sure to enter the real domain of course!)
|
||||
|
||||
`REGISTRY_USERNAME` = the user name generated by Harbor
|
||||
|
||||
`REGISTRY_PASSWORD` = the password generated by Harbor
|
||||
|
||||
---
|
||||
|
||||
## Setting up the build pipeline
|
||||
|
||||
- Edit `.github/workflows/automated-build.yaml`
|
||||
|
||||
- Comment out the steps related to GitHub Container Registry and Docker Hub
|
||||
|
||||
- Uncomment the steps related to the custom external registry
|
||||
|
||||
- Commit
|
||||
|
||||
- In your fork, click on the "Actions" button on top
|
||||
|
||||
- You should see the workflow running
|
||||
|
||||
- After a couple of minutes, it should (hopefully) report success
|
||||
|
||||
---
|
||||
|
||||
## Creating the `pull` robot account
|
||||
|
||||
- In Harbor, create another robot account
|
||||
|
||||
- Let's name it `dockercoins-pull`
|
||||
|
||||
- Again, don't give it any systems permission
|
||||
|
||||
- Give it `pull` permissions for the `dockercions` project
|
||||
|
||||
- Write down the user and password
|
||||
|
||||
---
|
||||
|
||||
## Create a Secret for the `pull` account
|
||||
|
||||
- Let's create a Kubernetes Secret holding the registry credentials:
|
||||
```bash
|
||||
kubectl create secret docker-registry dockercoins-pull \
|
||||
--docker-username '`robot$dockercoins-pull``' \
|
||||
--docker-password `abcdefghijKLMNOPQRST` \
|
||||
--docker-server harbor.`$DOMAIN`
|
||||
```
|
||||
|
||||
- Make sure to quote the username (the `$` will cause problems otherwise)
|
||||
|
||||
---
|
||||
|
||||
## Use the Secret
|
||||
|
||||
- We have two possibilities here:
|
||||
|
||||
- add `imagePullSecrets` to every Pod template that needs them
|
||||
|
||||
- add `imagePullSecrets` to the ServiceAccount used by the Pods
|
||||
|
||||
- Let's patch the default ServiceAccount:
|
||||
```bash
|
||||
kubectl patch serviceaccount default \
|
||||
--patch 'imagePullSecrets: [ name: dockercoins-pull ]'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use the private registry
|
||||
|
||||
- Make a copy of `~/container.training/k8s/dockercoins.yml`
|
||||
|
||||
- In that copy, replace every `dockercoins/*` image with `harbor.$DOMAIN/dockercoins/*`
|
||||
|
||||
(put the actual domain, not `$DOMAIN`!)
|
||||
|
||||
- Apply that YAML
|
||||
|
||||
- Check that the application is up and running
|
||||
|
||||
- Check that the number of pulls has increased in the Harbor web UI
|
||||
|
||||
- Congratulations, you've deployed an image from a self-hosted private registry! 🎉
|
||||
|
||||
???
|
||||
|
||||
:EN:- Hosting private images with Harbor
|
||||
:FR:- Héberger des images privées avec Harbor
|
||||
@@ -1,512 +0,0 @@
|
||||
# Setting up an Ingress Controller
|
||||
|
||||
- We're going to install Traefik as our Ingress Controller
|
||||
|
||||
- arbitrary choice; but it has a nice dashboard, which is helpful when troubleshooting
|
||||
|
||||
- also, Traefik releases are named after tasty cheeses :)
|
||||
|
||||
- We're going to install it using the official Helm chart
|
||||
|
||||
- also an arbitrary choice; but a fairly straightforward one
|
||||
|
||||
- Helm charts can easily fit in other tools (like Flux, ArgoCD, Terraform/OpenTofu...)
|
||||
|
||||
- There can be some differences depending on how we want to handle inbound traffic
|
||||
|
||||
- Let's review the different possibilities!
|
||||
|
||||
---
|
||||
|
||||
## Scenario 1: `LoadBalancer` Service
|
||||
|
||||
- This is the default option for most Ingress Controllers
|
||||
|
||||
(i.e. what you get if you install their Helm charts without further configuration)
|
||||
|
||||
- It requires a cluster where `LoadBalancer` Services are available
|
||||
|
||||
- most cloud-based, managed clusters support `LoadBalancer` Services
|
||||
|
||||
- on-premises clusters can also support them with e.g. [MetalLB] or [kube-vip])
|
||||
|
||||
- The Ingress Controller runs with a Deployment
|
||||
|
||||
(typically scaled to at least two replicas, to ensure high availability)
|
||||
|
||||
- It is exposed with a `LoadBalancer` Service
|
||||
|
||||
---
|
||||
|
||||
## Scenario 2: `hostPort`
|
||||
|
||||
- This is a good fallback option when `LoadBalancer` Services aren't available
|
||||
|
||||
- It typically requires extra configuration steps or options when installing the controller
|
||||
|
||||
- It requires a cluster where at least some Nodes have public IP addresses
|
||||
|
||||
- The Ingress Controller runs with a DaemonSet
|
||||
|
||||
(potentially with a `nodeSelector` to restrict it to a specific set of nodes)
|
||||
|
||||
- The Ingress Controller Pods are exposed by using one (or multiple) `hostPort`
|
||||
|
||||
- `hostPort` creates a direct port mapping on the Node, for instance:
|
||||
|
||||
*port 80 on the Node → port 8080 in the Pod*
|
||||
|
||||
- It can also create a shorter, faster path to the application Pods
|
||||
|
||||
---
|
||||
|
||||
## Scenario 3: `hostNetwork`
|
||||
|
||||
- This is similar to `hostPort`
|
||||
|
||||
(but a bit less secure)
|
||||
|
||||
- Ingress controller Pods run with `hostNetwork: true`
|
||||
|
||||
- This lets the Pods use the network stack of the Node that they're running on
|
||||
|
||||
- When the Ingress Controller binds to port 80, it means "port 80 on the Node"
|
||||
|
||||
- The Ingress Controller must be given permissions to bind to ports below 1024
|
||||
|
||||
(it must either run as root, or leverage `NET_BIND_SERVICE` capability accordingly)
|
||||
|
||||
- The Ingress Controller can potentially bind to any port on the Node
|
||||
|
||||
(this might not be desirable!)
|
||||
|
||||
---
|
||||
|
||||
## Scenario 4: `externalIPs`
|
||||
|
||||
- Heads up, this is a rather exotic scenario, but here we go!
|
||||
|
||||
- It's possible to [manually assign `externalIPs` to a Service][externalips]
|
||||
|
||||
(including `ClusterIP` services)
|
||||
|
||||
- When TCP connections (or UDP packets) destined to an `externalIP` reach a Node...
|
||||
|
||||
...the Node will forward these connections or packets to the relevant Pods
|
||||
|
||||
- This requires manual management of a pool of available `externalIPs`
|
||||
|
||||
- It also requires some network engineering so that the traffic reaches the Nodes
|
||||
|
||||
(in other words: just setting `externalIPs` on Services won't be enough!)
|
||||
|
||||
- This is how some controllers like [MetalLB] or [kube-vip] operate
|
||||
|
||||
[externalips]: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
|
||||
|
||||
---
|
||||
|
||||
## Scenario 5: local dev clusters
|
||||
|
||||
- Local dev clusters are typically not reachable from the outside world
|
||||
|
||||
- They rarely have `LoadBalancer` Services
|
||||
|
||||
(when they do, they use local addresses, and are sometimes limited to a single one)
|
||||
|
||||
- Their Nodes might not be directly reachable
|
||||
|
||||
(making the `hostPort` and `hostNetwork` strategies impractical)
|
||||
|
||||
- In some cases, it's possible to map a port on our machine to a port on the dev cluster
|
||||
|
||||
(e.g. [KinD has an `extraPortMappings` option][kind-extraportmappings])
|
||||
|
||||
- We can also use `kubectl port-forward` to test our local Ingress Controller
|
||||
|
||||
[kind-extraportmappings]: https://kind.sigs.k8s.io/docs/user/configuration/#extra-port-mappings
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Local clusters details
|
||||
|
||||
- When using Docker-based clusters on Linux:
|
||||
|
||||
*connect directly to the node's IP address (172.X.Y.Z)*
|
||||
|
||||
- When using Docker-based clusters with Docker Desktop:
|
||||
|
||||
*set up port mapping (then connect to localhost:XYZ)*
|
||||
|
||||
- Generic scenario:
|
||||
|
||||
*run `kubectl port-forward 8888:80` to the Ingress Controller*
|
||||
<br/>
|
||||
*(and then connect to `http://localhost:8888`)*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why not a `NodePort` Service?
|
||||
|
||||
- Node ports are typically in the 30000-32767 range
|
||||
|
||||
- Web site users don't want to specify port numbers
|
||||
|
||||
(e.g. "connect to https://blahblah.whatever:31550")
|
||||
|
||||
- Our Ingress Controller needs to actually be exposed on port 80
|
||||
|
||||
(and 443 if we want to handle HTTPS)
|
||||
|
||||
---
|
||||
|
||||
## Installing Traefik with a `LoadBalancer`
|
||||
|
||||
- We're going to use the official Helm chart
|
||||
|
||||
(https://artifacthub.io/packages/helm/traefik/traefik)
|
||||
|
||||
- Its default configuration values should work out of the box
|
||||
|
||||
(as long as our cluster supports `LoadBalancer` Services!)
|
||||
|
||||
.lab[
|
||||
|
||||
- Install the Traefik Helm chart:
|
||||
```bash
|
||||
helm upgrade --install --repo https://traefik.github.io/charts \
|
||||
traefik traefik --namespace traefik --create-namespace \
|
||||
--version 37.4.0
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- That's it; now let's send it a test request!
|
||||
|
||||
---
|
||||
|
||||
## Retrieving the Ingress Controller address
|
||||
|
||||
- Our Ingress Controller uses a `LoadBalancer` Service
|
||||
|
||||
- We want to obtain that Service's `EXTERNAL-IP`
|
||||
|
||||
.lab[
|
||||
|
||||
- Retrieve the `EXTERNAL-IP` that has been allocated to the Service:
|
||||
```bash
|
||||
kubectl get services --namespace=traefik
|
||||
```
|
||||
|
||||
- Send a test request; it should show `404 not found`:
|
||||
```bash
|
||||
curl http://`<EXTERNAL-IP>`
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- Note: that `EXTERNAL-IP` might be `<Pending>` for a little while before showing up
|
||||
|
||||
(in that case, just try again a few seconds later)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Scripting it
|
||||
|
||||
- If we want to include these steps in a script, here's what we can do!
|
||||
|
||||
.lab[
|
||||
|
||||
- Use `kubectl wait` to wait until a specific field exists in the resource:
|
||||
```bash
|
||||
kubectl wait service traefik --namespace=traefik \
|
||||
--for=jsonpath=.status.loadBalancer.ingress
|
||||
```
|
||||
|
||||
- Then extract the IP address:
|
||||
```bash
|
||||
kubectl get service traefik --namespace=traefik \
|
||||
-o jsonpath={.status.loadBalancer.ingress[0].ip}
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- Note: on some providers like AWS, you might have to use `.hostname` instead of `.ip`
|
||||
|
||||
- Note: there might be multiple addresses; the command above returns only the first one
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Make it production-ready
|
||||
|
||||
- To improve the availability of our Ingress Controller:
|
||||
|
||||
- configure at least 2 replicas (in case of Node outage)
|
||||
|
||||
- add a `podAntiAffinity` (to make sure Pods are not all in the same place)
|
||||
|
||||
- add a PodDisruptionBudget (to handle planned maintenance, e.g. cluster ugprades)
|
||||
|
||||
- set resource requests and limits for CPU and RAM
|
||||
|
||||
- To monitor our Ingress Controller:
|
||||
|
||||
- collect the metrics exposed by Traefik (e.g. with Prometheus)
|
||||
|
||||
- set up some alerting (e.g. with [stakater/IngressMonitorController])
|
||||
|
||||
[stakater/IngressMonitorController]: https://github.com/stakater/IngressMonitorController
|
||||
|
||||
---
|
||||
|
||||
## Installing Traefik with a `DaemonSet` + `hostPort`
|
||||
|
||||
- The plan is to run one Traefik Pod on each Node of the cluster
|
||||
|
||||
- For that, we need a `DaemonSet` instead of a `Deployment`
|
||||
|
||||
- Instead of a `LoadBalancer` Service, we'll use a `hostPort`
|
||||
|
||||
(actually, multiple `hostPort`; at least one for HTTP and one for HTTPS)
|
||||
|
||||
- Let's see how to do that with the Traefik Helm chart!
|
||||
|
||||
- We'll be looking at the chart's [default values] and [values schema]
|
||||
|
||||
[default values]: https://artifacthub.io/packages/helm/traefik/traefik?modal=values
|
||||
[values schema]: https://artifacthub.io/packages/helm/traefik/traefik?modal=values-schema
|
||||
|
||||
---
|
||||
|
||||
## Switching to a `DaemonSet`
|
||||
|
||||
- In the chart's [default values], looking for the string `DaemonSet` gives us this:
|
||||
```yaml
|
||||
deployment:
|
||||
# -- Enable deployment
|
||||
enabled: true
|
||||
# -- Deployment or `DaemonSet`
|
||||
kind: Deployment
|
||||
```
|
||||
|
||||
- This means we need to set `deployment.kind=DaemonSet`!
|
||||
|
||||
---
|
||||
|
||||
## Using `hostPort`
|
||||
|
||||
- In the chart's [default values], we find 3 references mentioning `hostPort`:
|
||||
.small[
|
||||
```yaml
|
||||
ports:
|
||||
traefik:
|
||||
port: 8080
|
||||
# -- Use hostPort if set.
|
||||
`hostPort`: # @schema type:[integer, null]; minimum:0
|
||||
...
|
||||
web:
|
||||
## -- Enable this entrypoint as a default entrypoint. When a service doesn't explicitly set an entrypoint ...
|
||||
# asDefault: true
|
||||
port: 8000
|
||||
# `hostPort`: 8000
|
||||
...
|
||||
websecure:
|
||||
## -- Enable this entrypoint as a default entrypoint. When a service doesn't explicitly set an entrypoint ...
|
||||
# asDefault: true
|
||||
port: 8443
|
||||
`hostPort`: # @schema type:[integer, null]; minimum:0
|
||||
```
|
||||
]
|
||||
|
||||
- This deserves a small explanation about the Traefik concept of "entrypoint"!
|
||||
|
||||
---
|
||||
|
||||
## Traefik "entrypoints"
|
||||
|
||||
- An entrypoint in Traefik is basically an open port
|
||||
|
||||
- Common Traefik configurations will have 3 entrypoints (3 open ports):
|
||||
|
||||
- `web` for HTTP traffic
|
||||
|
||||
- `websecure` for HTTPS traffic
|
||||
|
||||
- `traefik` for Traefik dashboard and API
|
||||
|
||||
- We'll set `ports.web.hostPort=80` and `ports.websecure.hostPort=443`
|
||||
|
||||
⚠️ Traefik entrypoints are totally unrelated to `ENTRYPOINT` in Dockerfiles!
|
||||
|
||||
---
|
||||
|
||||
## Traefik Service
|
||||
|
||||
- By default, the Helm chart creates a `LoadBalancer` Service
|
||||
|
||||
- We don't need that anymore, so we can either:
|
||||
|
||||
- disable it altogether (`service.enabled=false`)
|
||||
|
||||
- switch it to a `ClusterIP` service (`service.type=ClusterIP`)
|
||||
|
||||
- Either option is fine!
|
||||
|
||||
---
|
||||
|
||||
## Putting it all together
|
||||
|
||||
- We're going to use a bunch of `--set` flags with all the options that we gathered
|
||||
|
||||
- We could also put them in a YAML file and use `--values`
|
||||
|
||||
.lab[
|
||||
|
||||
- Install Traefik with all our options:
|
||||
```bash
|
||||
helm upgrade --install --repo https://traefik.github.io/charts \
|
||||
traefik traefik --namespace traefik --create-namespace \
|
||||
--set deployment.kind=DaemonSet \
|
||||
--set ports.web.hostPort=80 \
|
||||
--set ports.websecure.hostPort=443 \
|
||||
--set service.type=ClusterIP \
|
||||
--version 37.4.0
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our Ingress Controller
|
||||
|
||||
- We should be able to connect to *any* Node of the cluster, on port 80
|
||||
|
||||
.lab[
|
||||
|
||||
- Send a test request:
|
||||
```bash
|
||||
curl http://`<node address/`
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- We should see `404 not found`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Control plane nodes
|
||||
|
||||
- When running Kubernetes on-premises, it's typical to have "control plane nodes"
|
||||
|
||||
- These nodes are dedicated to the control plane Pods, and won't run normal workloads
|
||||
|
||||
- If you have such a cluster (e.g. deployed with `kubeadm` on multiple nodes):
|
||||
|
||||
- get the list of nodes (`kubectl get nodes`)
|
||||
|
||||
- check where Traefik Pods are running (`kubectl get pods --namespace=traefik`)
|
||||
|
||||
- You should see that Traefik is not running on control plane nodes!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running Traefik on the control plane
|
||||
|
||||
- If we want to do that, we need to provide a *toleration*
|
||||
|
||||
- That toleration needs to match the *taint* on the control plane nodes
|
||||
|
||||
- To review the taints on our nodes, we can use one of these commands:
|
||||
```bash
|
||||
kubectl get nodes -o custom-columns=NAME:metadata.name,TAINTS:spec.taints
|
||||
kubectl get nodes -o json | jq '.items[] | [.metadata.name, .spec.taints]'
|
||||
```
|
||||
|
||||
- Then, to place the proper toleration on Traefik pods:
|
||||
```bash
|
||||
--set tolerations[0].key=node-role.kubernetes.io/control-plane
|
||||
--set tolerations[0].effect=NoSchedule
|
||||
```
|
||||
|
||||
- Note: as we keep adding options, writing a values YAML file becomes more convenient!
|
||||
|
||||
---
|
||||
|
||||
## What about local dev clusters?
|
||||
|
||||
- Follow the instructions for "normal" clusters (with a `LoadBalancer` service)
|
||||
|
||||
- Once Traefik is up and running, set up a port-forward:
|
||||
```bash
|
||||
kubectl port-forward --namespace=traefik service/traefik 8888:80
|
||||
```
|
||||
|
||||
- Connect to http://localhost:8888
|
||||
|
||||
- You should see a `404 not found` served by Traefik
|
||||
|
||||
- Whenever you'll need the "IP address of the Ingress Controller", use `localhost:8888`
|
||||
|
||||
(you'll need to specify that port number)
|
||||
|
||||
- With some clusters (e.g. KinD) it's also possible to set up local port mappings
|
||||
to avoid specifying the port number; but the port-forward method should work everywhere
|
||||
|
||||
---
|
||||
|
||||
## The Traefik dashboard
|
||||
|
||||
- Accessing the Traefik dashboard requires multiple steps
|
||||
|
||||
- First, the dashboard feature needs to be enabled in Traefik
|
||||
|
||||
*the Helm chart does this automatically by default*
|
||||
|
||||
- Next, there needs to be a "route" inside Traefik to expose the dashboard
|
||||
|
||||
*this can be done by setting `ingressRoute.dashboard.enabled=true`*
|
||||
|
||||
- Finally, we need to connect to the correct entrypoint
|
||||
|
||||
*by default, that will be the internal entrypoint on port 8080, on `/dashboard`*
|
||||
|
||||
---
|
||||
|
||||
## Accessing the Traefik dashboard
|
||||
|
||||
- Redeploy Traefik, adding `--set ingressRoute.dashboard.enabled=true`
|
||||
|
||||
- Then use port-forward to access the internal `traefik` entrypoint:
|
||||
```bash
|
||||
kubectl port-forward --namespace=traefik deployment/traefik 1234:8080
|
||||
kubectl port-forward --namespace=traefik daemonset/traefik 1234:8080
|
||||
```
|
||||
(use the appropriate command depending on how you're running Traefik)
|
||||
|
||||
- Connect to http://localhost:1234/dashboard/ (with the trailing slash!)
|
||||
|
||||
- You should see the Traefik dashboard!
|
||||
|
||||
- Note: it's only available on the internal port, but there is no authentication by default!
|
||||
|
||||
(you might want to add authentication or e.g. set up a NetworkPolicy to secure it)
|
||||
|
||||
???
|
||||
|
||||
[MetalLB]: https://metallb.org/
|
||||
[kube-vip]: https://kube-vip.io/
|
||||
|
||||
:EN:- Setting up an Ingress Controller
|
||||
:FR:- Mise en place d'un Ingress Controller
|
||||
@@ -46,7 +46,7 @@ A few use-cases:
|
||||
|
||||
- Ingress
|
||||
|
||||
- requires an Ingress Controller
|
||||
- requires an ingress controller
|
||||
- can implement TLS transparently for the app
|
||||
- only supports HTTP
|
||||
- can do content-based routing (e.g. per URI)
|
||||
@@ -61,40 +61,11 @@ A few use-cases:
|
||||
|
||||
- Designed to expose HTTP services
|
||||
|
||||
- Requires an *Ingress Controller*
|
||||
- Requires an *ingress controller*
|
||||
|
||||
(otherwise, resources can be created, but nothing happens)
|
||||
|
||||
- Some Kubernetes distributions automatically install an Ingress Controller
|
||||
|
||||
(or they give that option as an easy "on/off" switch at install time)
|
||||
|
||||
- It's relatively rare, though, because Ingress Controllers aren't "one size fits all"
|
||||
|
||||
---
|
||||
|
||||
## Checking if we have an Ingress Controller
|
||||
|
||||
- A modern Ingress Controller will create an IngressClass resource
|
||||
|
||||
- We can check simply by running `kubectl get ingressclasses`
|
||||
|
||||
- Example:
|
||||
```shell
|
||||
$ kubectl get ingressclasses
|
||||
NAME CONTROLLER PARAMETERS AGE
|
||||
traefik traefik.io/ingress-controller <none> 139m
|
||||
```
|
||||
|
||||
- It's also possible to have an IngressClass without a working Ingress Controller
|
||||
|
||||
(e.g. if the controller is broken, or has been partially uninstalled...)
|
||||
|
||||
---
|
||||
|
||||
## A taxonomy of Ingress Controllers
|
||||
|
||||
- Some Ingress Controllers are based on existing load balancers
|
||||
- Some ingress controllers are based on existing load balancers
|
||||
|
||||
(HAProxy, NGINX...)
|
||||
|
||||
@@ -102,56 +73,7 @@ A few use-cases:
|
||||
|
||||
(Contour, Traefik...)
|
||||
|
||||
- Some are proprietary to a specific hardware or cloud vendor
|
||||
|
||||
(GKE Ingress, AWS ALB Ingress)
|
||||
|
||||
- Note: there is no "default" or "official" Ingress Controller!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Details about these proprietary controllers
|
||||
|
||||
- GKE has "[GKE Ingress]", a custom Ingress Controller
|
||||
|
||||
(enabled by default but [does not use IngressClass][gke-ingressclass])
|
||||
|
||||
- EKS has "AWS ALB Ingress Controller" as well
|
||||
|
||||
(not enabled by default, requires extra setup)
|
||||
|
||||
- They leverage cloud-specific HTTP load balancers
|
||||
|
||||
(GCP HTTP LB, AWS ALB)
|
||||
|
||||
- They typically carry a cost *per ingress resource*
|
||||
|
||||
[GKE Ingress]: https://cloud.google.com/kubernetes-engine/docs/concepts/ingress
|
||||
[gke-ingressclass]: https://docs.cloud.google.com/kubernetes-engine/docs/concepts/ingress#deprecated_annotation
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Single or multiple LoadBalancer
|
||||
|
||||
- Most Ingress Controllers will create a LoadBalancer Service
|
||||
|
||||
(and will receive all HTTP/HTTPS traffic through it)
|
||||
|
||||
- We need to point our DNS entries to the IP address of that LB
|
||||
|
||||
- Some rare Ingress Controllers will allocate one LB per ingress resource
|
||||
|
||||
(example: the GKE Ingress and ALB Ingress mentioned previously)
|
||||
|
||||
- This leads to increased costs
|
||||
|
||||
- Note that it's possible to have multiple "rules" per ingress resource
|
||||
|
||||
(this will reduce costs but may be less convenient to manage)
|
||||
- Note: there is no "default" or "official" ingress controller!
|
||||
|
||||
---
|
||||
|
||||
@@ -181,14 +103,11 @@ class: extra-details
|
||||
|
||||
- etc.
|
||||
|
||||
*Supporting these features in a standard, vendor-independent way, is
|
||||
one of the goals of the Gateway API. (More on that at the end of this section!)*
|
||||
|
||||
---
|
||||
|
||||
## Principle of operation
|
||||
|
||||
- Step 1: deploy an *Ingress Controller*
|
||||
- Step 1: deploy an *ingress controller*
|
||||
|
||||
(one-time setup; typically done by cluster admin)
|
||||
|
||||
@@ -196,78 +115,146 @@ one of the goals of the Gateway API. (More on that at the end of this section!)*
|
||||
|
||||
- maps a domain and/or path to a Kubernetes Service
|
||||
|
||||
- the controller watches Ingress resources and sets up a LB
|
||||
- the controller watches ingress resources and sets up a LB
|
||||
|
||||
- Step 3: set up DNS (optional)
|
||||
|
||||
- associate DNS entries with the load balancer address
|
||||
|
||||
- this can be automated with [ExternalDNS]
|
||||
|
||||
[ExternalDNS]: https://github.com/kubernetes-sigs/external-dns
|
||||
|
||||
---
|
||||
|
||||
## Ingress in action
|
||||
class: extra-details
|
||||
|
||||
- We're going to deploy an Ingress Controller
|
||||
## Special cases
|
||||
|
||||
(unless our cluster already has one that we can use)
|
||||
- GKE has "[GKE Ingress]", a custom ingress controller
|
||||
|
||||
- Then, we will create ingress resources for various HTTP services
|
||||
(enabled by default)
|
||||
|
||||
- We'll demonstrate DNS integration as well
|
||||
- EKS has "AWS ALB Ingress Controller" as well
|
||||
|
||||
- If you don't have a domain name for this part, you can use [nip.io]
|
||||
(not enabled by default, requires extra setup)
|
||||
|
||||
(`*.1.2.3.4.nip.io` resolves to `1.2.3.4`)
|
||||
- They leverage cloud-specific HTTP load balancers
|
||||
|
||||
---
|
||||
(GCP HTTP LB, AWS ALB)
|
||||
|
||||
## Deploying the Ingress Controller
|
||||
- They typically a cost *per ingress resource*
|
||||
|
||||
- Many variations are possible, depending on:
|
||||
|
||||
- which Ingress Controller we pick
|
||||
|
||||
- whether `LoadBalancer` Services are available or not
|
||||
|
||||
- the deployment tool we want to use (Helm, plain YAML...)
|
||||
|
||||
- If you're attending a live class, we're going to take a shortcut
|
||||
|
||||
(with a ready-to-use manifest optimized for the clusters we use in class)
|
||||
|
||||
- Otherwise, check the section dedicated to Ingress Controller setup first
|
||||
|
||||
---
|
||||
|
||||
## If you're attending a live class...
|
||||
|
||||
- Each student is assigned a pre-configured cluster
|
||||
|
||||
(sometimes, multiple clusters, to demonstrate different scenarios)
|
||||
|
||||
- We have prepared a YAML manifest that will take care of setting up Traefik for you
|
||||
|
||||
.lab[
|
||||
|
||||
- Install Traefik on your cluster:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/traefik.yml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- Note: this YAML manifest is only suitable for live class clusters!
|
||||
[GKE Ingress]: https://cloud.google.com/kubernetes-engine/docs/concepts/ingress
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's about this manifest?
|
||||
## Single or multiple LoadBalancer
|
||||
|
||||
- It runs Traefik with a DaemonSet
|
||||
- Most ingress controllers will create a LoadBalancer Service
|
||||
|
||||
(and will receive all HTTP/HTTPS traffic through it)
|
||||
|
||||
- We need to point our DNS entries to the IP address of that LB
|
||||
|
||||
- Some rare ingress controllers will allocate one LB per ingress resource
|
||||
|
||||
(example: the GKE Ingress and ALB Ingress mentioned previously)
|
||||
|
||||
- This leads to increased costs
|
||||
|
||||
- Note that it's possible to have multiple "rules" per ingress resource
|
||||
|
||||
(this will reduce costs but may be less convenient to manage)
|
||||
|
||||
---
|
||||
|
||||
## Ingress in action
|
||||
|
||||
- We will deploy the Traefik ingress controller
|
||||
|
||||
- this is an arbitrary choice
|
||||
|
||||
- maybe motivated by the fact that Traefik releases are named after cheeses
|
||||
|
||||
- We will create ingress resources for various HTTP services
|
||||
|
||||
- For DNS, we can use [nip.io](http://nip.io/)
|
||||
|
||||
- `*.1.2.3.4.nip.io` resolves to `1.2.3.4`
|
||||
|
||||
---
|
||||
|
||||
## Classic ingress controller setup
|
||||
|
||||
- Ingress controller runs with a Deployment
|
||||
|
||||
(with at least 2 replicas for redundancy)
|
||||
|
||||
- It is exposed with a `LoadBalancer` Service
|
||||
|
||||
- Typical for cloud-based clusters
|
||||
|
||||
- Also common when running or on-premises with [MetalLB] or [kube-vip]
|
||||
|
||||
[MetalLB]: https://metallb.org/
|
||||
[kube-vip]: https://kube-vip.io/
|
||||
|
||||
---
|
||||
|
||||
## Alternate ingress controller setup
|
||||
|
||||
- Ingress controller runs with a DaemonSet
|
||||
|
||||
(on bigger clusters, this can be coupled with a `nodeSelector`)
|
||||
|
||||
- It is exposed with `externalIPs`, `hostPort`, or `hostNetwork`
|
||||
|
||||
- Typical for on-premises clusters
|
||||
|
||||
(where at least a set of nodes have a stable IP and high availability)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why not a `NodePort` Service?
|
||||
|
||||
- Node ports are typically in the 30000-32767 range
|
||||
|
||||
- Web site users don't want to specify port numbers
|
||||
|
||||
(e.g. "connect to https://blahblah.whatever:31550")
|
||||
|
||||
- Our ingress controller needs to actually be exposed on port 80
|
||||
|
||||
(and 443 if we want to handle HTTPS)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Local clusters
|
||||
|
||||
- When running a local cluster, some extra steps might be necessary
|
||||
|
||||
- When using Docker-based clusters on Linux:
|
||||
|
||||
*connect directly to the node's IP address (172.X.Y.Z)*
|
||||
|
||||
- When using Docker-based clusters with Docker Desktop:
|
||||
|
||||
*set up port mapping (then connect to localhost:XYZ)*
|
||||
|
||||
- Generic scenario:
|
||||
|
||||
*run `kubectl port-forward 8888:80` to the ingress controller*
|
||||
<br/>
|
||||
*(and then connect to `http://localhost:8888`)*
|
||||
|
||||
---
|
||||
|
||||
## Trying it out with Traefik
|
||||
|
||||
- We are going to run Traefik with a DaemonSet
|
||||
|
||||
(there will be one instance of Traefik on every node of the cluster)
|
||||
|
||||
@@ -275,215 +262,322 @@ class: extra-details
|
||||
|
||||
- This means that we will be able to connect to any node of the cluster on port 80
|
||||
|
||||
- It also includes a *toleration* to make sure Traefik runs on every Node
|
||||
---
|
||||
|
||||
(including the control plane Node)
|
||||
## Running Traefik
|
||||
|
||||
- The [Traefik documentation][traefikdoc] recommends to use a Helm chart
|
||||
|
||||
- For simplicity, we're going to use a custom YAML manifest
|
||||
|
||||
- Our manifest will:
|
||||
|
||||
- use a Daemon Set so that each node can accept connections
|
||||
|
||||
- enable `hostPort: 80`
|
||||
|
||||
- add a *toleration* so that Traefik also runs on all nodes
|
||||
|
||||
- We could do the same with the official [Helm chart][traefikchart]
|
||||
|
||||
[traefikdoc]: https://doc.traefik.io/traefik/getting-started/install-traefik/#use-the-helm-chart
|
||||
[traefikchart]: https://artifacthub.io/packages/helm/traefik/traefik
|
||||
|
||||
---
|
||||
|
||||
## Creating Ingress resources
|
||||
class: extra-details
|
||||
|
||||
## Taints and tolerations
|
||||
|
||||
- A *taint* is an attribute added to a node
|
||||
|
||||
- It prevents pods from running on the node
|
||||
|
||||
- ... Unless they have a matching *toleration*
|
||||
|
||||
- When deploying with `kubeadm`:
|
||||
|
||||
- a taint is placed on the node dedicated to the control plane
|
||||
|
||||
- the pods running the control plane have a matching toleration
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Checking taints on our nodes
|
||||
|
||||
.lab[
|
||||
|
||||
- Check our nodes specs:
|
||||
```bash
|
||||
kubectl get node node1 -o json | jq .spec
|
||||
kubectl get node node2 -o json | jq .spec
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see a result only for `node1` (the one with the control plane):
|
||||
|
||||
```json
|
||||
"taints": [
|
||||
{
|
||||
"effect": "NoSchedule",
|
||||
"key": "node-role.kubernetes.io/master"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Understanding a taint
|
||||
|
||||
- The `key` can be interpreted as:
|
||||
|
||||
- a reservation for a special set of pods
|
||||
<br/>
|
||||
(here, this means "this node is reserved for the control plane")
|
||||
|
||||
- an error condition on the node
|
||||
<br/>
|
||||
(for instance: "disk full," do not start new pods here!)
|
||||
|
||||
- The `effect` can be:
|
||||
|
||||
- `NoSchedule` (don't run new pods here)
|
||||
|
||||
- `PreferNoSchedule` (try not to run new pods here)
|
||||
|
||||
- `NoExecute` (don't run new pods and evict running pods)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Checking tolerations on the control plane
|
||||
|
||||
.lab[
|
||||
|
||||
- Check tolerations for CoreDNS:
|
||||
```bash
|
||||
kubectl -n kube-system get deployments coredns -o json |
|
||||
jq .spec.template.spec.tolerations
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The result should include:
|
||||
```json
|
||||
{
|
||||
"effect": "NoSchedule",
|
||||
"key": "node-role.kubernetes.io/master"
|
||||
}
|
||||
```
|
||||
|
||||
It means: "bypass the exact taint that we saw earlier on `node1`."
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Special tolerations
|
||||
|
||||
.lab[
|
||||
|
||||
- Check tolerations on `kube-proxy`:
|
||||
```bash
|
||||
kubectl -n kube-system get ds kube-proxy -o json |
|
||||
jq .spec.template.spec.tolerations
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The result should include:
|
||||
```json
|
||||
{
|
||||
"operator": "Exists"
|
||||
}
|
||||
```
|
||||
|
||||
This one is a special case that means "ignore all taints and run anyway."
|
||||
|
||||
---
|
||||
|
||||
## Running Traefik on our cluster
|
||||
|
||||
- We provide a YAML file (@@LINK[k8s/traefik.yaml]) which contains:
|
||||
|
||||
- a `traefik` Namespace
|
||||
|
||||
- a `traefik` DaemonSet in that Namespace
|
||||
|
||||
- RBAC rules allowing Traefik to watch the necessary API objects
|
||||
|
||||
.lab[
|
||||
|
||||
- Apply the YAML:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/traefik.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking that Traefik runs correctly
|
||||
|
||||
- If Traefik started correctly, we now have a web server listening on each node
|
||||
|
||||
.lab[
|
||||
|
||||
- Check that Traefik is serving 80/tcp:
|
||||
```bash
|
||||
curl localhost
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should get a `404 page not found` error.
|
||||
|
||||
This is normal: we haven't provided any ingress rule yet.
|
||||
|
||||
---
|
||||
|
||||
## Traefik web UI
|
||||
|
||||
- Traefik provides a web dashboard
|
||||
|
||||
- With the current install method, it's listening on port 8080
|
||||
|
||||
.lab[
|
||||
|
||||
- Go to `http://node1:8080` (replacing `node1` with its IP address)
|
||||
|
||||
<!-- ```open http://node1:8080``` -->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Setting up routing ingress rules
|
||||
|
||||
- We are going to use the `jpetazzo/color` image
|
||||
|
||||
- This image contains a simple static HTTP server on port 80
|
||||
|
||||
- We will run a Deployment, e.g. `blue`
|
||||
- We will run 3 deployments (`red`, `green`, `blue`)
|
||||
|
||||
- We will expose that Deployment with a Service
|
||||
- We will create 3 services (one for each deployment)
|
||||
|
||||
- And create an Ingress for that Service
|
||||
- Then we will create 3 ingress rules (one for each service)
|
||||
|
||||
- We will route requests to `/red`, `/green`, `/blue`
|
||||
|
||||
---
|
||||
|
||||
## Deploying the `blue` app
|
||||
|
||||
- Nothing special here; we're just creating a Deployment and a Service
|
||||
## Running colorful web servers
|
||||
|
||||
.lab[
|
||||
|
||||
- Create the Deployment:
|
||||
- Run all three deployments:
|
||||
```bash
|
||||
kubectl create deployment blue --image=jpetazzo/color
|
||||
kubectl create deployment red --image=jpetazzo/color
|
||||
kubectl create deployment green --image=jpetazzo/color
|
||||
kubectl create deployment blue --image=jpetazzo/color
|
||||
```
|
||||
|
||||
- Expose it with a Service:
|
||||
- Create a service for each of them:
|
||||
```bash
|
||||
kubectl expose deployment blue --port=80
|
||||
kubectl expose deployment red --port=80
|
||||
kubectl expose deployment green --port=80
|
||||
kubectl expose deployment blue --port=80
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating Ingress resources
|
||||
## Creating ingress resources
|
||||
|
||||
- There is a convenient helper command, `kubectl create ingress`
|
||||
- Since Kubernetes 1.19, we can use `kubectl create ingress`
|
||||
|
||||
(available since Kubernetes 1.19; before that, the only way was to use YAML manifests)
|
||||
|
||||
- An Ingress resource can contain one or multiple "rules"
|
||||
(if you're running an older version of Kubernetes, **you must upgrade**)
|
||||
|
||||
.lab[
|
||||
|
||||
- Create an Ingress with a single rule:
|
||||
- Create the three ingress resources:
|
||||
```bash
|
||||
kubectl create ingress blue --rule=/blue=blue:80
|
||||
```
|
||||
]
|
||||
|
||||
`/blue` = HTTP path that the Ingress should use
|
||||
|
||||
`blue:80` = Service name + port where requests should be sent
|
||||
|
||||
---
|
||||
|
||||
## Testing our new Ingress
|
||||
|
||||
- We need to know to which IP address to connect
|
||||
|
||||
- If you're attending a live class; of if you installed your Ingress Controller with a DaemonSet and `hostPort` or `hostNetwork`:
|
||||
|
||||
*use the IP address of any of the nodes of your cluster*
|
||||
|
||||
- If you installed your Ingress Controller with a `LoadBalancer` Service:
|
||||
|
||||
*use the EXTERNAL-IP of the Service*
|
||||
|
||||
- If you're using a local dev cluster:
|
||||
|
||||
*it depends; we suggest `kubectl port-forward` and then use `localhost`*
|
||||
|
||||
---
|
||||
|
||||
## Testing our new Ingress
|
||||
|
||||
- Connect to `http://<IP address>/blue`
|
||||
|
||||
- We should see a reply from the `blue` Deployment
|
||||
|
||||
---
|
||||
|
||||
## Using domain names
|
||||
|
||||
- With Ingress, we can use what is often called "name-based virtual hosting"
|
||||
|
||||
- This lets us host multiple web apps on a single IP address
|
||||
|
||||
- All we need is to used a different domain name for each web app
|
||||
|
||||
(e.g.: `blue.example.com`, `green.example.com`, `red.example.com`...)
|
||||
|
||||
- We could use a real domain name, or, for simplicity, [nip.io]
|
||||
|
||||
- In the next steps, we'll assume that our Ingress controller uses IP address `A.B.C.D`
|
||||
|
||||
(make sure to substitute accordingly!)
|
||||
|
||||
---
|
||||
|
||||
## Before creating the Ingress
|
||||
|
||||
- We will make the `blue` Deployment available at the URL http://blue.A.B.C.D.nip.io
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's check what happens if we connect to that address right now:
|
||||
```bash
|
||||
curl http://blue.A.B.C.D.nip.io
|
||||
kubectl create ingress red --rule=/red=red:80
|
||||
kubectl create ingress green --rule=/green=green:80
|
||||
kubectl create ingress blue --rule=/blue=blue:80
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- If we're using Traefik, it will give us a very terse `404 not found` error
|
||||
|
||||
(that's expected!)
|
||||
|
||||
---
|
||||
|
||||
## Creating the Ingress
|
||||
## Testing
|
||||
|
||||
- This will be very similar to the Ingress that we created earlier
|
||||
|
||||
- But we're going to add a domain name in the rule
|
||||
- We should now be able to access `localhost/red`, `localhost/green`, etc.
|
||||
|
||||
.lab[
|
||||
|
||||
- Create the Ingress:
|
||||
- Check that these routes work correctly:
|
||||
```bash
|
||||
kubectl create ingress blue-with-domain --rule=blue.A.B.C.D.nip.io/=blue:80
|
||||
```
|
||||
|
||||
- Test it:
|
||||
```bash
|
||||
curl http://blue.A.B.C.D.nip.io
|
||||
curl http://localhost/red
|
||||
curl http://localhost/green
|
||||
curl http://localhost/blue
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- We should see a response from the `blue` Deployment
|
||||
---
|
||||
|
||||
## Accessing other URIs
|
||||
|
||||
- What happens if we try to access e.g. `/blue/hello`?
|
||||
|
||||
.lab[
|
||||
|
||||
- Retrieve the `ClusterIP` of Service `blue`:
|
||||
```bash
|
||||
BLUE=$(kubectl get svc blue -o jsonpath={.spec.clusterIP})
|
||||
```
|
||||
|
||||
- Check that the `blue` app serves `/hello`:
|
||||
```bash
|
||||
curl $BLUE/hello
|
||||
```
|
||||
|
||||
- See what happens if we try to access it through the Ingress:
|
||||
```bash
|
||||
curl http://localhost/blue/hello
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Exact or prefix matches
|
||||
|
||||
- By default, Ingress rules are *exact* matches
|
||||
- By default, ingress rules are *exact* matches
|
||||
|
||||
(the request is routed only for the specified URL)
|
||||
(the request is routed only if the URI is exactly `/blue`)
|
||||
|
||||
.lab[
|
||||
|
||||
- Confirm that only `/` routes to the `blue` app:
|
||||
```bash
|
||||
curl http://blue.A.B.C.D.nip.io # works
|
||||
curl http://blue.A.B.C.D.nip.io/hello # does not work
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- How do we change that?
|
||||
|
||||
---
|
||||
|
||||
## Specifying a prefix match
|
||||
|
||||
- If a rule ends with `*`, it will be interpreted as a prefix match
|
||||
- We can also ask a *prefix* match by adding a `*` to the rule
|
||||
|
||||
.lab[
|
||||
|
||||
- Create a prefix match rule for the `blue` service:
|
||||
```bash
|
||||
kubectl create ingress blue-with-prefix --rule=blue.A.B.C.D.nip.io/*=blue:80
|
||||
kubectl create ingress bluestar --rule=/blue*=blue:80
|
||||
```
|
||||
|
||||
- Check that it works:
|
||||
```bash
|
||||
curl http://blue.A.B.C.D.nip.io/hello
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What do Ingress manifests look like?
|
||||
|
||||
- Let's have a look at the manifests generated by `kubectl create ingress`!
|
||||
|
||||
- We'll use `-o yaml` to display the YAML generated by `kubectl`
|
||||
|
||||
- And `--dry-run=client` to instruct `kubectl` to skip resource creation
|
||||
|
||||
.lab[
|
||||
|
||||
- Generate and display a few manifests:
|
||||
```bash
|
||||
kubectl create ingress -o yaml --dry-run=client \
|
||||
exact-route --rule=/blue=blue:80
|
||||
|
||||
kubectl create ingress -o yaml --dry-run=client \
|
||||
with-a-domain --rule=blue.test/=blue:80
|
||||
|
||||
kubectl create ingress -o yaml --dry-run=client \
|
||||
now-with-a-prefix --rule=blue.test/*=blue:80
|
||||
curl http://localhost/blue/hello
|
||||
```
|
||||
|
||||
]
|
||||
@@ -492,18 +586,78 @@ class: extra-details
|
||||
|
||||
## Multiple rules per Ingress resource
|
||||
|
||||
- It is also possible to have multiple rules in a single Ingress resource
|
||||
|
||||
- Let's see what that looks like, too!
|
||||
- It is also possible to have multiple rules in a single resource
|
||||
|
||||
.lab[
|
||||
|
||||
- Show the manifest for an Ingress resource with multiple rules:
|
||||
- Create an Ingress resource with multiple rules:
|
||||
```bash
|
||||
kubectl create ingress -o yaml --dry-run=client rgb \
|
||||
--rule=/red*=red:80 \
|
||||
--rule=/green*=green:80 \
|
||||
--rule=/blue*=blue:80
|
||||
kubectl create ingress rgb \
|
||||
--rule=/red*=red:80 \
|
||||
--rule=/green*=green:80 \
|
||||
--rule=/blue*=blue:80
|
||||
```
|
||||
|
||||
- Check that everything still works after deleting individual rules
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using domain-based routing
|
||||
|
||||
- In the previous examples, we didn't use domain names
|
||||
|
||||
(we routed solely based on the URI of the request)
|
||||
|
||||
- We are now going to show how to use domain-based routing
|
||||
|
||||
- We are going to assume that we have a domain name
|
||||
|
||||
(for instance: `cloudnative.tld`)
|
||||
|
||||
- That domain name should be set up so that a few subdomains point to the ingress
|
||||
|
||||
(for instance, `blue.cloudnative.tld`, `green.cloudnative.tld`...)
|
||||
|
||||
- For simplicity or flexibility, we can also use a wildcard record
|
||||
|
||||
---
|
||||
|
||||
## Setting up DNS
|
||||
|
||||
- To make our lives easier, we will use [nip.io](http://nip.io)
|
||||
|
||||
- Check out `http://red.A.B.C.D.nip.io`
|
||||
|
||||
(replacing A.B.C.D with the IP address of `node1`)
|
||||
|
||||
- We should get the same `404 page not found` error
|
||||
|
||||
(meaning that our DNS is "set up properly", so to speak!)
|
||||
|
||||
---
|
||||
|
||||
## Setting up name-based Ingress
|
||||
|
||||
.lab[
|
||||
|
||||
- Set the `$IPADDR` variable to our ingress controller address:
|
||||
```bash
|
||||
IPADDR=`A.B.C.D`
|
||||
```
|
||||
|
||||
- Create our Ingress resource:
|
||||
```bash
|
||||
kubectl create ingress rgb-with-domain \
|
||||
--rule=red.$IPADDR.nip.io/*=red:80 \
|
||||
--rule=green.$IPADDR.nip.io/*=green:80 \
|
||||
--rule=blue.$IPADDR.nip.io/*=blue:80
|
||||
```
|
||||
|
||||
- Test it out:
|
||||
```bash
|
||||
curl http://red.$IPADDR.nip.io/hello
|
||||
```
|
||||
|
||||
]
|
||||
@@ -512,9 +666,9 @@ class: extra-details
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using multiple Ingress Controllers
|
||||
## Using multiple ingress controllers
|
||||
|
||||
- You can have multiple Ingress Controllers active simultaneously
|
||||
- You can have multiple ingress controllers active simultaneously
|
||||
|
||||
(e.g. Traefik and NGINX)
|
||||
|
||||
@@ -522,7 +676,7 @@ class: extra-details
|
||||
|
||||
(e.g. one for internal, another for external traffic)
|
||||
|
||||
- To indicate which Ingress Controller should be used by a given Ingress resouce:
|
||||
- To indicate which ingress controller should be used by a given Ingress resouce:
|
||||
|
||||
- before Kubernetes 1.18, use the `kubernetes.io/ingress.class` annotation
|
||||
|
||||
@@ -536,11 +690,7 @@ class: extra-details
|
||||
|
||||
- A lot of things have been left out of the Ingress v1 spec
|
||||
|
||||
(e.g.: routing requests according to weight, cookies, across namespaces...)
|
||||
|
||||
- Most Ingress Controllers have vendor-specific ways to address these shortcomings
|
||||
|
||||
- But since they're vendor-specific, migrations become more complex
|
||||
(routing requests according to weight, cookies, across namespaces...)
|
||||
|
||||
- Example: stripping path prefixes
|
||||
|
||||
@@ -552,79 +702,17 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## A word about Ingress NGINX
|
||||
|
||||
- There are two Ingress Controllers based on NGINX (both open source)
|
||||
|
||||
- [F5 NGINX Ingress Controller][f5-nginx] aka "NGINX Ingress" ([GitHub repo][f5-nginx-repo], [docs][f5-nginx-docs])
|
||||
|
||||
- developed and maintained by F5 (company that acquired NGINX in 2019)
|
||||
|
||||
- supports vendor-specific CRDs like [VirtualServer and VirtualServerRoute][f5-nginx-crds]
|
||||
|
||||
- Ingress NGINX Controller aka "Ingress NGINX" ([GitHub repo][k8s-nginx-repo], [docs][k8s-nginx-docs])
|
||||
|
||||
- one of the earliest Kubernetes Ingress Controllers
|
||||
|
||||
- developed by the community
|
||||
|
||||
- **no longer under active development; maintenance will stop in March 2026**
|
||||
(check the [announcement][k8s-nginx-announcement])
|
||||
|
||||
[f5-nginx]: https://docs.nginx.com/nginx-ingress-controller/
|
||||
[f5-nginx-docs]: https://docs.nginx.com/nginx-ingress-controller
|
||||
[f5-nginx-repo]: https://github.com/nginx/kubernetes-ingress
|
||||
[f5-nginx-crds]: https://docs.nginx.com/nginx-ingress-controller/configuration/virtualserver-and-virtualserverroute-resources/
|
||||
[k8s-nginx-docs]: https://kubernetes.github.io/ingress-nginx/
|
||||
[k8s-nginx-repo]: https://github.com/kubernetes/ingress-nginx
|
||||
[k8s-nginx-announcement]: https://kubernetes.io/blog/2025/11/11/ingress-nginx-retirement/
|
||||
|
||||
---
|
||||
|
||||
## A word about software sustainability
|
||||
|
||||
- From the Ingress NGINX retirement announcement:
|
||||
|
||||
*Despite the project’s popularity among users, Ingress NGINX has always struggled with insufficient or barely-sufficient maintainership. For years, the project has had only one or two people doing development work, on their own time, after work hours and on weekends.*
|
||||
|
||||
--
|
||||
|
||||
- If your production, mission-critical workloads depend on open source software:
|
||||
|
||||
*what happens if the maintainers throw the towel?*
|
||||
|
||||
--
|
||||
|
||||
- If your production, mission-critical workloads depend on commercial software:
|
||||
|
||||
*what happens if the the company behind it goes out of business?*
|
||||
|
||||
*what happens if they drastically change their business model or [pricing][vmware1] [structure][vmware2]?*
|
||||
|
||||
[vmware1]: https://www.theregister.com/2025/05/22/euro_cloud_body_ecco_says_broadcom_licensing_unfair/
|
||||
[vmware2]: https://www.ciodive.com/news/att-broadcom-vmware-price-hikes-court-battle/728603/
|
||||
|
||||
---
|
||||
|
||||
## Ingress in the future
|
||||
|
||||
- The [Gateway API SIG](https://gateway-api.sigs.k8s.io/) is probably be the future of Ingress
|
||||
- The [Gateway API SIG](https://gateway-api.sigs.k8s.io/) might be the future of Ingress
|
||||
|
||||
- It proposes new resources:
|
||||
|
||||
GatewayClass, Gateway, HTTPRoute, TCPRoute...
|
||||
|
||||
- It now has feature parity with Ingress
|
||||
|
||||
(=it can be used instead of Ingress resources; or in addition to them)
|
||||
|
||||
- It is, however, more complex to set up and operate
|
||||
|
||||
(at least for now!)
|
||||
- It is now in beta (since v0.5.0, released in 2022)
|
||||
|
||||
???
|
||||
|
||||
[nip.io]: http://nip.io
|
||||
|
||||
:EN:- The Ingress resource
|
||||
:FR:- La ressource *ingress*
|
||||
|
||||
37
slides/k8s/intro.md
Normal file
37
slides/k8s/intro.md
Normal file
@@ -0,0 +1,37 @@
|
||||
## A brief introduction
|
||||
|
||||
- This was initially written by [Jérôme Petazzoni](https://twitter.com/jpetazzo) to support in-person,
|
||||
instructor-led workshops and tutorials
|
||||
|
||||
- Credit is also due to [multiple contributors](https://@@GITREPO@@/graphs/contributors) — thank you!
|
||||
|
||||
- You can also follow along on your own, at your own pace
|
||||
|
||||
- We included as much information as possible in these slides
|
||||
|
||||
- We recommend having a mentor to help you ...
|
||||
|
||||
- ... Or be comfortable spending some time reading the Kubernetes [documentation](https://kubernetes.io/docs/) ...
|
||||
|
||||
- ... And looking for answers on [StackOverflow](http://stackoverflow.com/questions/tagged/kubernetes) and other outlets
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Hands on, you shall practice
|
||||
|
||||
- Nobody ever became a Jedi by spending their lives reading Wookiepedia
|
||||
|
||||
- Likewise, it will take more than merely *reading* these slides
|
||||
to make you an expert
|
||||
|
||||
- These slides include *tons* of demos, exercises, and examples
|
||||
|
||||
- They assume that you have access to a Kubernetes cluster
|
||||
|
||||
- If you are attending a workshop or tutorial:
|
||||
<br/>you will be given specific instructions to access your cluster
|
||||
|
||||
- If you are doing this on your own:
|
||||
<br/>the first chapter will give you various options to get your own cluster
|
||||
@@ -18,52 +18,9 @@
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Heads up!
|
||||
|
||||
- We're going to connect directly to pods and services, using internal addresses
|
||||
|
||||
- This will only work:
|
||||
|
||||
- if you're attending a live class with our special lab environment
|
||||
|
||||
- or if you're using our dev containers within codespaces
|
||||
|
||||
- If you're using a "normal" Kubernetes cluster (including minikube, KinD, etc):
|
||||
|
||||
*you will not be able to access these internal addresses directly!*
|
||||
|
||||
- In that case, we suggest that you run an interactive container, e.g.:
|
||||
```bash
|
||||
kubectl run --rm -ti --image=archlinux myshell
|
||||
```
|
||||
|
||||
- ...And each time you see a `curl` or `ping` command run it in that container instead
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## But, why?
|
||||
|
||||
- Internal addresses are only reachable from within the cluster
|
||||
|
||||
(=from a pod, or when logged directly inside a node)
|
||||
|
||||
- Our special lab environments and our dev containers let us do it anyways
|
||||
|
||||
(because it's nice and convenient when learning Kubernetes)
|
||||
|
||||
- But that doesn't work on "normal" Kubernetes clusters
|
||||
|
||||
- Instead, we can use [`kubectl port-forward`][kubectl-port-forward] on these clusters
|
||||
|
||||
[kubectl-port-forward]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_port-forward/
|
||||
|
||||
---
|
||||
|
||||
## Running containers with open ports
|
||||
|
||||
- Let's run a small web server in a container
|
||||
- Since `ping` doesn't have anything to connect to, we'll have to run something else
|
||||
|
||||
- We are going to use `jpetazzo/color`, a tiny HTTP server written in Go
|
||||
|
||||
@@ -111,7 +68,7 @@ class: extra-details
|
||||
|
||||
- Send an HTTP request to the Pod:
|
||||
```bash
|
||||
curl http://`IP-ADDRESS`
|
||||
curl http://`IP-ADDRESSS`
|
||||
```
|
||||
|
||||
]
|
||||
@@ -120,6 +77,25 @@ You should see a response from the Pod.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running with a local cluster
|
||||
|
||||
If you're running with a local cluster (Docker Desktop, KinD, minikube...),
|
||||
you might get a connection timeout (or a message like "no route to host")
|
||||
because the Pod isn't reachable directly from your local machine.
|
||||
|
||||
In that case, you can test the connection to the Pod by running a shell
|
||||
*inside* the cluster:
|
||||
|
||||
```bash
|
||||
kubectl run -it --rm my-test-pod --image=fedora
|
||||
```
|
||||
|
||||
Then run `curl` in that Pod.
|
||||
|
||||
---
|
||||
|
||||
## The Pod doesn't have a "stable identity"
|
||||
|
||||
- The IP address that we used above isn't "stable"
|
||||
@@ -195,7 +171,7 @@ class: extra-details
|
||||
(i.e. a service is not just an IP address; it's an IP address + protocol + port)
|
||||
|
||||
- As a result: you *have to* indicate the port number for your service
|
||||
|
||||
|
||||
(with some exceptions, like `ExternalName` or headless services, covered later)
|
||||
|
||||
---
|
||||
@@ -234,7 +210,7 @@ class: extra-details
|
||||
|
||||
- Keep sending requests to the Service address:
|
||||
```bash
|
||||
while sleep 0.3; do curl -m1 http://$CLUSTER_IP; done
|
||||
while sleep 0.3; do curl http://$CLUSTER_IP; done
|
||||
```
|
||||
|
||||
- Meanwhile, delete the Pod:
|
||||
@@ -248,8 +224,6 @@ class: extra-details
|
||||
|
||||
- ...But requests will keep flowing after that (without requiring a manual intervention)
|
||||
|
||||
- The `-m1` option is here to specify a 1-second timeout
|
||||
|
||||
---
|
||||
|
||||
## Load balancing
|
||||
@@ -288,7 +262,7 @@ class: extra-details
|
||||
|
||||
- Get a shell in a Pod:
|
||||
```bash
|
||||
kubectl run --rm -it --image=archlinux test-dns-integration
|
||||
kubectl run --rm -it --image=fedora test-dns-integration
|
||||
```
|
||||
|
||||
- Try to resolve the `blue` Service from the Pod:
|
||||
@@ -304,73 +278,21 @@ class: extra-details
|
||||
|
||||
## Under the hood...
|
||||
|
||||
- Let's check the content of `/etc/resolv.conf` inside a Pod
|
||||
- Check the content of `/etc/resolv.conf` inside a Pod
|
||||
|
||||
- It should look approximately like this:
|
||||
```
|
||||
search default.svc.cluster.local svc.cluster.local cluster.local ...
|
||||
nameserver 10.96.0.10
|
||||
options ndots:5
|
||||
```
|
||||
- It will have `nameserver X.X.X.X` (e.g. 10.96.0.10)
|
||||
|
||||
- Let's break down what these lines mean...
|
||||
- Now check `kubectl get service kube-dns --namespace=kube-system`
|
||||
|
||||
---
|
||||
- ...It's the same address! 😉
|
||||
|
||||
class: extra-details
|
||||
- The FQDN of a service is actually:
|
||||
|
||||
## `nameserver 10.96.0.10`
|
||||
`<service-name>.<namespace>.svc.<cluster-domain>`
|
||||
|
||||
- This is the address of the DNS server used by programs running in the Pod
|
||||
- `<cluster-domain>` defaults to `cluster.local`
|
||||
|
||||
- The exact address might be different
|
||||
|
||||
(this one is the default one when setting up a cluster with `kubeadm`)
|
||||
|
||||
- This address will correspond to a Service on our cluster
|
||||
|
||||
- Check what we have in `kube-system`:
|
||||
```bash
|
||||
kubectl get services --namespace=kube-system
|
||||
```
|
||||
|
||||
- There will typically be a service named `kube-dns` with that exact address
|
||||
|
||||
(that's Kubernetes' internal DNS service!)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `search default.svc.cluster.local ...`
|
||||
|
||||
- This is the "search list"
|
||||
|
||||
- When a program tries to resolve `foo`, the resolver will try to resolve:
|
||||
|
||||
`foo.default.svc.cluster.local` (if the Pod is in the `default` Namespace)
|
||||
|
||||
`foo.svc.cluster.local`
|
||||
|
||||
`foo.cluster.local`
|
||||
|
||||
...(the other entries in the search list)...
|
||||
|
||||
`foo`
|
||||
|
||||
- As a result, if there is Service named `foo` in the Pod's Namespace, we obtain its address!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do You Want To Know More?
|
||||
|
||||
- If you want even more details about DNS resolution on Kubernetes and Linux...
|
||||
|
||||
check [this blog post][dnsblog]!
|
||||
|
||||
[dnsblog]: https://jpetazzo.github.io/2024/05/12/understanding-kubernetes-dns-hostnetwork-dnspolicy-dnsconfigforming/
|
||||
- And the `search` includes `<namespace>.svc.<cluster-domain>`
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -8,17 +8,17 @@
|
||||
|
||||
- In detail:
|
||||
|
||||
- all nodes can reach each other directly (without NAT)
|
||||
- all nodes must be able to reach each other, without NAT
|
||||
|
||||
- all pods can reach each other directly (without NAT)
|
||||
- all pods must be able to reach each other, without NAT
|
||||
|
||||
- pods and nodes can reach each other directly (without NAT)
|
||||
- pods and nodes must be able to reach each other, without NAT
|
||||
|
||||
- each pod is aware of its IP address (again: no NAT)
|
||||
- each pod is aware of its IP address (no NAT)
|
||||
|
||||
- Most Kubernetes clusters rely on the CNI to configure Pod networking
|
||||
- pod IP addresses are assigned by the network implementation
|
||||
|
||||
(allocate IP addresses, create and configure network interfaces, routing...)
|
||||
- Kubernetes doesn't mandate any particular implementation
|
||||
|
||||
---
|
||||
|
||||
@@ -32,15 +32,13 @@
|
||||
|
||||
- No new protocol
|
||||
|
||||
- IP addresses are allocated by the network stack, not by the users
|
||||
- The network implementation can decide how to allocate addresses
|
||||
|
||||
(this avoids complex constraints associated with address portability)
|
||||
- IP addresses don't have to be "portable" from a node to another
|
||||
|
||||
- CNI is very flexible and lends itself to many different models
|
||||
(We can use e.g. a subnet per node and use a simple routed topology)
|
||||
|
||||
(switching, routing, tunneling... virtually anything is possible!)
|
||||
|
||||
- Example: we could have one subnet per node and use a simple routed topology
|
||||
- The specification is simple enough to allow many various implementations
|
||||
|
||||
---
|
||||
|
||||
@@ -48,11 +46,11 @@
|
||||
|
||||
- Everything can reach everything
|
||||
|
||||
- if we want network isolation, we need to add network policies
|
||||
- if you want security, you need to add network policies
|
||||
|
||||
- some clusters (like AWS EKS) don't include a network policy controller out of the box
|
||||
- the network implementation that you use needs to support them
|
||||
|
||||
- There are literally dozens of Kubernetes network implementations out there
|
||||
- There are literally dozens of implementations out there
|
||||
|
||||
(https://github.com/containernetworking/cni/ lists more than 25 plugins)
|
||||
|
||||
@@ -60,73 +58,67 @@
|
||||
|
||||
(Services map to a single UDP or TCP port; no port ranges or arbitrary IP packets)
|
||||
|
||||
- The default Kubernetes service proxy, `kube-proxy`, doesn't scale very well
|
||||
|
||||
(although this is improved considerably in [recent versions of kube-proxy][tables-have-turned])
|
||||
|
||||
[tables-have-turned]: https://www.youtube.com/watch?v=yOGHb2HjslY
|
||||
- `kube-proxy` is on the data path when connecting to a pod or container,
|
||||
<br/>and it's not particularly fast (relies on userland proxying or iptables)
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes network model: in practice
|
||||
|
||||
- We don't need to worry about networking in local development clusters
|
||||
- The nodes that we are using have been set up to use [Weave](https://github.com/weaveworks/weave)
|
||||
|
||||
(it's set up automatically for us and we almost never need to change anything)
|
||||
- We don't endorse Weave in a particular way, it just Works For Us
|
||||
|
||||
- We also don't need to worry about it in managed clusters
|
||||
- Don't worry about the warning about `kube-proxy` performance
|
||||
|
||||
(except if we want to reconfigure or replace whatever was installed automatically)
|
||||
- Unless you:
|
||||
|
||||
- We *do* need to pick a network stack in all other scenarios:
|
||||
- routinely saturate 10G network interfaces
|
||||
- count packet rates in millions per second
|
||||
- run high-traffic VOIP or gaming platforms
|
||||
- do weird things that involve millions of simultaneous connections
|
||||
<br/>(in which case you're already familiar with kernel tuning)
|
||||
|
||||
- installing Kubernetes on bare metal or on "raw" virtual machines
|
||||
|
||||
- when we manage the control plane ourselves
|
||||
- If necessary, there are alternatives to `kube-proxy`; e.g.
|
||||
[`kube-router`](https://www.kube-router.io)
|
||||
|
||||
---
|
||||
|
||||
## Which network stack should we use?
|
||||
class: extra-details
|
||||
|
||||
*It depends!*
|
||||
## The Container Network Interface (CNI)
|
||||
|
||||
- [Weave] = super easy to install, no config needed, low footprint...
|
||||
|
||||
*but it's not maintained anymore, alas!*
|
||||
- Most Kubernetes clusters use CNI "plugins" to implement networking
|
||||
|
||||
- [Cilium] = very powerful and flexible, some consider it "best in class"...
|
||||
- When a pod is created, Kubernetes delegates the network setup to these plugins
|
||||
|
||||
*but it's based on eBPF, which might make troubleshooting challenging!*
|
||||
(it can be a single plugin, or a combination of plugins, each doing one task)
|
||||
|
||||
- Other solid choices include [Calico], [Flannel], [kube-router]
|
||||
- Typically, CNI plugins will:
|
||||
|
||||
- And of course, some cloud providers / network vendors have their own solutions
|
||||
- allocate an IP address (by calling an IPAM plugin)
|
||||
|
||||
(which may or may not be appropriate for your use-case!)
|
||||
- add a network interface into the pod's network namespace
|
||||
|
||||
- Do you want speed? Reliability? Security? Observability?
|
||||
|
||||
[Weave]: https://github.com/weaveworks/weave
|
||||
[Cilium]: https://cilium.io/
|
||||
[Calico]: https://docs.tigera.io/calico/latest/about/
|
||||
[Flannel]: https://github.com/flannel-io/flannel
|
||||
[kube-router]: https://www.kube-router.io/
|
||||
- configure the interface as well as required routes etc.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Multiple moving parts
|
||||
|
||||
- The "pod-to-pod network" or "pod network" or "CNI":
|
||||
- The "pod-to-pod network" or "pod network":
|
||||
|
||||
- provides communication between pods and nodes
|
||||
|
||||
- is generally implemented with CNI plugins
|
||||
|
||||
- The "pod-to-service network" or "Kubernetes service proxy":
|
||||
- The "pod-to-service network":
|
||||
|
||||
- provides internal communication and load balancing
|
||||
|
||||
- implemented with kube-proxy by default
|
||||
- is generally implemented with kube-proxy (or e.g. kube-router)
|
||||
|
||||
- Network policies:
|
||||
|
||||
|
||||
@@ -1,249 +0,0 @@
|
||||
## Painting pods
|
||||
|
||||
- As an example, we'll implement a policy regarding "Pod color"
|
||||
|
||||
- The color of a Pod is the value of the label `color`
|
||||
|
||||
- Example: `kubectl label pod hello color=yellow` to paint a Pod in yellow
|
||||
|
||||
- We want to implement the following policies:
|
||||
|
||||
- color is optional (i.e. the label is not required)
|
||||
|
||||
- if color is set, it *must* be `red`, `green`, or `blue`
|
||||
|
||||
- once the color has been set, it cannot be changed
|
||||
|
||||
- once the color has been set, it cannot be removed
|
||||
|
||||
---
|
||||
|
||||
## Immutable primary colors, take 1
|
||||
|
||||
- First, we will add a policy to block forbidden colors
|
||||
|
||||
(i.e. only allow `red`, `green`, or `blue`)
|
||||
|
||||
- One possible approach:
|
||||
|
||||
- *match* all pods that have a `color` label that is not `red`, `green`, or `blue`
|
||||
|
||||
- *deny* these pods
|
||||
|
||||
- We could also *match* all pods, then *deny* with a condition
|
||||
|
||||
---
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
@@INCLUDE[k8s/kyverno-pod-color-1.yaml]
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing without the policy
|
||||
|
||||
- First, let's create a pod with an "invalid" label
|
||||
|
||||
(while we still can!)
|
||||
|
||||
- We will use this later
|
||||
|
||||
.lab[
|
||||
|
||||
- Create a pod:
|
||||
```bash
|
||||
kubectl run test-color-0 --image=nginx
|
||||
```
|
||||
|
||||
- Apply a color label:
|
||||
```bash
|
||||
kubectl label pod test-color-0 color=purple
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Load and try the policy
|
||||
|
||||
.lab[
|
||||
|
||||
- Load the policy:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/kyverno-pod-color-1.yaml
|
||||
```
|
||||
|
||||
- Create a pod:
|
||||
```bash
|
||||
kubectl run test-color-1 --image=nginx
|
||||
```
|
||||
|
||||
- Try to apply a few color labels:
|
||||
```bash
|
||||
kubectl label pod test-color-1 color=purple
|
||||
kubectl label pod test-color-1 color=red
|
||||
kubectl label pod test-color-1 color-
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Immutable primary colors, take 2
|
||||
|
||||
- Next rule: once a `color` label has been added, it cannot be changed
|
||||
|
||||
(i.e. if `color=red`, we can't change it to `color=blue`)
|
||||
|
||||
- Our approach:
|
||||
|
||||
- *match* all pods
|
||||
|
||||
- add a *precondition* matching pods that have a `color` label
|
||||
<br/>
|
||||
(both in their "before" and "after" states)
|
||||
|
||||
- *deny* these pods if their `color` label has changed
|
||||
|
||||
- Again, other approaches are possible!
|
||||
|
||||
---
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
@@INCLUDE[k8s/kyverno-pod-color-2.yaml]
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Comparing "old" and "new"
|
||||
|
||||
- The fields of the webhook payload are available through `{{ request }}`
|
||||
|
||||
- For UPDATE requests, we can access:
|
||||
|
||||
`{{ request.oldObject }}` → the object as it is right now (before the request)
|
||||
|
||||
`{{ request.object }}` → the object with the changes made by the request
|
||||
|
||||
---
|
||||
|
||||
## Missing labels
|
||||
|
||||
- We can access the `color` label through `{{ request.object.metadata.labels.color }}`
|
||||
|
||||
- If we reference a label (or any field) that doesn't exist, the policy fails
|
||||
|
||||
(with an error similar to `JMESPAth query failed: Unknown key ... in path`)
|
||||
|
||||
- If a precondition fails, the policy will be skipped altogether (and ignored!)
|
||||
|
||||
- To work around that, [use an OR expression][non-existence-checks]:
|
||||
|
||||
`{{ requests.object.metadata.labels.color || '' }}`
|
||||
|
||||
[non-existence-checks]: https://kyverno.io/docs/policy-types/cluster-policy/jmespath/#non-existence-checks
|
||||
|
||||
---
|
||||
|
||||
## Load and try the policy
|
||||
|
||||
.lab[
|
||||
|
||||
- Load the policy:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/kyverno-pod-color-2.yaml
|
||||
```
|
||||
|
||||
- Create a pod:
|
||||
```bash
|
||||
kubectl run test-color-2 --image=nginx
|
||||
```
|
||||
|
||||
- Try to apply a few color labels:
|
||||
```bash
|
||||
kubectl label pod test-color-2 color=purple
|
||||
kubectl label pod test-color-2 color=red
|
||||
kubectl label pod test-color-2 color=blue --overwrite
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Immutable primary colors, take 3
|
||||
|
||||
- Last rule: once a `color` label has been added, it cannot be removed
|
||||
|
||||
- Our approach is to match all pods that:
|
||||
|
||||
- *had* a `color` label (in `request.oldObject`)
|
||||
|
||||
- *don't have* a `color` label (in `request.Object`)
|
||||
|
||||
- And *deny* these pods
|
||||
|
||||
- Again, other approaches are possible!
|
||||
|
||||
---
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
@@INCLUDE[k8s/kyverno-pod-color-3.yaml]
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Load and try the policy
|
||||
|
||||
.lab[
|
||||
|
||||
- Load the policy:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/kyverno-pod-color-3.yaml
|
||||
```
|
||||
|
||||
- Create a pod:
|
||||
```bash
|
||||
kubectl run test-color-3 --image=nginx
|
||||
```
|
||||
|
||||
- Try to apply a few color labels:
|
||||
```bash
|
||||
kubectl label pod test-color-3 color=purple
|
||||
kubectl label pod test-color-3 color=red
|
||||
kubectl label pod test-color-3 color-
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Background checks
|
||||
|
||||
- What about the `test-color-0` pod that we create initially?
|
||||
|
||||
(remember: we did set `color=purple`)
|
||||
|
||||
- We can see the infringing Pod in a PolicyReport
|
||||
|
||||
.lab[
|
||||
|
||||
- Check that the pod still an "invalid" color:
|
||||
```bash
|
||||
kubectl get pods -L color
|
||||
```
|
||||
|
||||
- List PolicyReports:
|
||||
```bash
|
||||
kubectl get policyreports
|
||||
kubectl get polr
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(Sometimes it takes a little while for the infringement to show up, though.)
|
||||
@@ -1,223 +0,0 @@
|
||||
## Detecting duplicate Ingress routes
|
||||
|
||||
- What happens when two Ingress resources have the same host+path?
|
||||
|
||||
--
|
||||
|
||||
- Undefined behavior!
|
||||
|
||||
--
|
||||
|
||||
- Possibilities:
|
||||
|
||||
- one of the Ingress rules is ignored (newer, older, lexicographic, random...)
|
||||
|
||||
- both Ingress rules are ignored
|
||||
|
||||
- traffic is randomly processed by both rules (sort of load balancing)
|
||||
|
||||
- creation of the second resource is blocked by an admission policy
|
||||
|
||||
--
|
||||
|
||||
- Can we implement that last option with Kyverno? 🤔
|
||||
|
||||
---
|
||||
|
||||
## General idea
|
||||
|
||||
- When a new Ingress resource is created:
|
||||
|
||||
*check if there is already an identical Ingress resource*
|
||||
|
||||
- We'll want to use the `apiCall` feature
|
||||
|
||||
(to retrieve all existing Ingress resources across all Namespaces)
|
||||
|
||||
- Problem: we don't care about strict equality
|
||||
|
||||
(there could be different labels, annotations, TLS configuration)
|
||||
|
||||
- Problem: an Ingress resource is a collection of *rules*
|
||||
|
||||
(we want to check if any rule of the new Ingress...
|
||||
<br/>...conflicts with any rule of an existing Ingress)
|
||||
|
||||
---
|
||||
|
||||
## Good news, everyone
|
||||
|
||||
- There is an example in the Kyverno documentation!
|
||||
|
||||
[Unique Ingress Host and Path][kyverno-unique-ingress]
|
||||
|
||||
--
|
||||
|
||||
- Unfortunately, the example doesn't really work
|
||||
|
||||
(at least as of [Kyverno 1.16 / January 2026][kyverno-unique-ingress-github])
|
||||
|
||||
- Can you see problems with it?
|
||||
|
||||
--
|
||||
|
||||
- Suggestion: load the policy and make some experiments!
|
||||
|
||||
(remember to switch the `validationFailureAction` to `Enforce` for easier testing)
|
||||
|
||||
[kyverno-unique-ingress]: https://kyverno.io/policies/other/unique-ingress-host-and-path/unique-ingress-host-and-path/
|
||||
[kyverno-unique-ingress-github]: https://github.com/kyverno/policies/blob/release-1.16/other/unique-ingress-host-and-path/unique-ingress-host-and-path.yaml
|
||||
|
||||
---
|
||||
|
||||
## Problem - no `host`
|
||||
|
||||
- If we try to create an Ingress without specifying the `host`:
|
||||
```
|
||||
JMESPath query failed: Unknown key "host" in path
|
||||
```
|
||||
|
||||
- In some cases, this could be a feature
|
||||
|
||||
(maybe we don't want to allow Ingress rules without a `host`!)
|
||||
|
||||
---
|
||||
|
||||
## Problem - no UPDATE
|
||||
|
||||
- If we try to modify an existing Ingress, the modification will be blocked
|
||||
|
||||
- This is because the "new" Ingress rules are checked against "existing" rules
|
||||
|
||||
- When we CREATE a new Ingress, its rules don't exist yet (no conflict)
|
||||
|
||||
- When we UPDATE an existing Ingress, its rules will show up in the existing rules
|
||||
|
||||
- By definition, a rule will always conflict with itself
|
||||
|
||||
- So UPDATE requests will always be blocked
|
||||
|
||||
- If we exclude UPDATE operations, then it will be possible to introduce conflicts
|
||||
|
||||
(by modifying existing Ingress resources to add/edit rules in them)
|
||||
|
||||
- This problem makes the policy useless as it is (unless we completely block updates)
|
||||
|
||||
---
|
||||
|
||||
## Problem - poor UX
|
||||
|
||||
- When the policy detects a conflict, it doesn't say which other resource is involved
|
||||
|
||||
- Sometimes, it's possible to find it manually
|
||||
|
||||
(with a bunch of clever `kubectl get ingresses --all-namespaces` commands)
|
||||
|
||||
- Sometimes, we don't have read permissions on the conflicting resource
|
||||
|
||||
(e.g. if it's in a different Namespace that we cannot access)
|
||||
|
||||
- It would be nice if the policy could report the exact Ingress and Namespace involved
|
||||
|
||||
---
|
||||
|
||||
## Problem - useless block
|
||||
|
||||
- There is a `preconditions` block to ignore `DELETE` operations
|
||||
|
||||
- This is useless, as the default is to match only `CREATE` and `UPDATE` requests
|
||||
|
||||
(See the [documentation about match statements][kyverno-match])
|
||||
|
||||
- This block can be safely removed
|
||||
|
||||
[kyverno-patch]: https://kyverno.io/docs/policy-types/cluster-policy/match-exclude/#match-statements
|
||||
|
||||
---
|
||||
|
||||
## Solution - no `host`
|
||||
|
||||
- In Kyverno, when doing a lookup, the way to handle non-existent keys is with a `||`
|
||||
|
||||
- For instance, replace `{{element.host}}` with `{{element.host||''}}`
|
||||
|
||||
(or a placeholder value like `{{element.host||'NOHOST'}}`)
|
||||
|
||||
---
|
||||
|
||||
## Solution - no UPDATE
|
||||
|
||||
- When retrieving existing Ingress resources, we need to exclude the current one
|
||||
|
||||
- This can look like this:
|
||||
```yaml
|
||||
context:
|
||||
- name: ingresses
|
||||
apiCall:
|
||||
urlPath: "/apis/networking.k8s.io/v1/ingresses"
|
||||
jmesPath: |
|
||||
items[?
|
||||
metadata.namespace!='{{request.object.metadata.namespace}}'
|
||||
||
|
||||
metadata.name!='{{request.object.metadata.name}}'
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Solution - poor UX
|
||||
|
||||
- Ideally, when there is a conflict, we'd like to display a message like this one:
|
||||
```
|
||||
Ingress host+path combinations must be unique across the cluster.
|
||||
This Ingress contains a rule for host 'www.example.com' and path '/',
|
||||
which conflicts with Ingress 'example' in Namespace 'default'.
|
||||
```
|
||||
|
||||
- This requires a significant refactor of the policy logic
|
||||
|
||||
- Instead of:
|
||||
|
||||
*loop on rules; filter by rule's host; find if there is any common path*
|
||||
|
||||
- We need e.g.:
|
||||
|
||||
*loop on rules; nested loop on paths; filter ingresses with conflicts*
|
||||
|
||||
- This requires nested loops, and way to access the `element` of each nested loop
|
||||
|
||||
---
|
||||
|
||||
## Nested loops
|
||||
|
||||
- As of January 2026, this isn't very well documented
|
||||
|
||||
(author's note: I had to [dive into Kyverno's code][kyverno-nested-element] to figure it out...)
|
||||
|
||||
- The trick is that the outer loop's element is `element0`, the next one is `element1`, etc.
|
||||
|
||||
- Additionally, there is a bug in Kyverno's context handling when defining a variable in a loop
|
||||
|
||||
(the variable needs to be defined at the top-level, with e.g. a dummy value)
|
||||
|
||||
TODO: propose a PR to Kyverno's documentation! 🤓💡
|
||||
|
||||
[kyverno-nested-element]: https://github.com/kyverno/kyverno/blob/5d5345ec3347f4f5c281652461d42231ea3703e5/pkg/engine/context/context.go#L284
|
||||
|
||||
---
|
||||
|
||||
## Putting it all together
|
||||
|
||||
- Try to write a Kyverno policy to detect conflicting Ingress resources
|
||||
|
||||
- Make sure to test the following edge cases:
|
||||
|
||||
- rules that don't define a host (e.g. `kubectl create ingress test --rule=/=test:80`)
|
||||
|
||||
- ingresses with multiple rules
|
||||
|
||||
- no-op edits (e.g. adding a label or annotation)
|
||||
|
||||
- conflicting edits (e.g. adding/editing a rule that adds a conflict)
|
||||
|
||||
- rules for `host1/path1` and `host2/path2` shouldn't conflict with `host1/path2`
|
||||
@@ -4,16 +4,17 @@
|
||||
|
||||
- It has many use cases, including:
|
||||
|
||||
- enforcing or giving warnings about best practices or misconfigurations
|
||||
<br/>(e.g. `:latest` images, healthchecks, requests and limits...)
|
||||
|
||||
- tightening security
|
||||
<br/>(possibly for multitenant clusters)
|
||||
- validating resources when they are created/edited
|
||||
<br/>(blocking or logging violations)
|
||||
|
||||
- preventing some modifications
|
||||
<br/>(e.g. restricting modifications to some fields, labels...)
|
||||
|
||||
- modifying, generating, cleaning up resources automatically
|
||||
- modifying resources automatically
|
||||
|
||||
- generating resources automatically
|
||||
|
||||
- clean up resources automatically
|
||||
|
||||
---
|
||||
|
||||
@@ -117,6 +118,14 @@
|
||||
|
||||
---
|
||||
|
||||
## Kyverno in action
|
||||
|
||||
- We're going to install Kyverno on our cluster
|
||||
|
||||
- Then, we will use it to implement a few policies
|
||||
|
||||
---
|
||||
|
||||
## Installing Kyverno
|
||||
|
||||
The recommended [installation method][install-kyverno] is to use Helm charts.
|
||||
@@ -141,9 +150,9 @@ The recommended [installation method][install-kyverno] is to use Helm charts.
|
||||
|
||||
- Which resources does it *select?*
|
||||
|
||||
- *match* and/or *exclude* resources
|
||||
- can specify resources to *match* and/or *exclude*
|
||||
|
||||
- match by *kind*, *selector*, *namespace selector*, user/roles doing the action...
|
||||
- can specify *kinds* and/or *selector* and/or users/roles doing the action
|
||||
|
||||
- Which operation should be done?
|
||||
|
||||
@@ -155,47 +164,183 @@ The recommended [installation method][install-kyverno] is to use Helm charts.
|
||||
|
||||
---
|
||||
|
||||
## Validating objects
|
||||
## Painting pods
|
||||
|
||||
Example: [require resource requests and limits][kyverno-requests-limits].
|
||||
- As an example, we'll implement a policy regarding "Pod color"
|
||||
|
||||
```yaml
|
||||
validate:
|
||||
message: "CPU and memory resource requests and memory limits are required."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
requests:
|
||||
memory: "?*"
|
||||
cpu: "?*"
|
||||
limits:
|
||||
memory: "?*"
|
||||
```
|
||||
- The color of a Pod is the value of the label `color`
|
||||
|
||||
(The full policy also has sections for `initContainers` and `ephemeralContainers`.)
|
||||
- Example: `kubectl label pod hello color=yellow` to paint a Pod in yellow
|
||||
|
||||
[kyverno-requests-limits]: https://kyverno.io/policies/best-practices/require-pod-requests-limits/require-pod-requests-limits/
|
||||
- We want to implement the following policies:
|
||||
|
||||
- color is optional (i.e. the label is not required)
|
||||
|
||||
- if color is set, it *must* be `red`, `green`, or `blue`
|
||||
|
||||
- once the color has been set, it cannot be changed
|
||||
|
||||
- once the color has been set, it cannot be removed
|
||||
|
||||
---
|
||||
|
||||
## Optional fields
|
||||
## Immutable primary colors, take 1
|
||||
|
||||
Example: [disallow `NodePort` Services][kyverno-disallow-nodeports].
|
||||
- First, we will add a policy to block forbidden colors
|
||||
|
||||
(i.e. only allow `red`, `green`, or `blue`)
|
||||
|
||||
- One possible approach:
|
||||
|
||||
- *match* all pods that have a `color` label that is not `red`, `green`, or `blue`
|
||||
|
||||
- *deny* these pods
|
||||
|
||||
- We could also *match* all pods, then *deny* with a condition
|
||||
|
||||
---
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
validate:
|
||||
message: "Services of type NodePort are not allowed."
|
||||
pattern:
|
||||
spec:
|
||||
=(type): "!NodePort"
|
||||
@@INCLUDE[k8s/kyverno-pod-color-1.yaml]
|
||||
```
|
||||
]
|
||||
|
||||
`=(...):` means that the field is optional.
|
||||
---
|
||||
|
||||
`type: "!NodePort"` would *require* the field to exist, but be different from `NodePort`.
|
||||
## Testing without the policy
|
||||
|
||||
[kyverno-disallow-nodeports]: https://kyverno.io/policies/best-practices/restrict-node-port/restrict-node-port/
|
||||
- First, let's create a pod with an "invalid" label
|
||||
|
||||
(while we still can!)
|
||||
|
||||
- We will use this later
|
||||
|
||||
.lab[
|
||||
|
||||
- Create a pod:
|
||||
```bash
|
||||
kubectl run test-color-0 --image=nginx
|
||||
```
|
||||
|
||||
- Apply a color label:
|
||||
```bash
|
||||
kubectl label pod test-color-0 color=purple
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Load and try the policy
|
||||
|
||||
.lab[
|
||||
|
||||
- Load the policy:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/kyverno-pod-color-1.yaml
|
||||
```
|
||||
|
||||
- Create a pod:
|
||||
```bash
|
||||
kubectl run test-color-1 --image=nginx
|
||||
```
|
||||
|
||||
- Try to apply a few color labels:
|
||||
```bash
|
||||
kubectl label pod test-color-1 color=purple
|
||||
kubectl label pod test-color-1 color=red
|
||||
kubectl label pod test-color-1 color-
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Immutable primary colors, take 2
|
||||
|
||||
- Next rule: once a `color` label has been added, it cannot be changed
|
||||
|
||||
(i.e. if `color=red`, we can't change it to `color=blue`)
|
||||
|
||||
- Our approach:
|
||||
|
||||
- *match* all pods
|
||||
|
||||
- add a *precondition* matching pods that have a `color` label
|
||||
<br/>
|
||||
(both in their "before" and "after" states)
|
||||
|
||||
- *deny* these pods if their `color` label has changed
|
||||
|
||||
- Again, other approaches are possible!
|
||||
|
||||
---
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
@@INCLUDE[k8s/kyverno-pod-color-2.yaml]
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Comparing "old" and "new"
|
||||
|
||||
- The fields of the webhook payload are available through `{{ request }}`
|
||||
|
||||
- For UPDATE requests, we can access:
|
||||
|
||||
`{{ request.oldObject }}` → the object as it is right now (before the request)
|
||||
|
||||
`{{ request.object }}` → the object with the changes made by the request
|
||||
|
||||
---
|
||||
|
||||
## Missing labels
|
||||
|
||||
- We can access the `color` label through `{{ request.object.metadata.labels.color }}`
|
||||
|
||||
- If we reference a label (or any field) that doesn't exist, the policy fails
|
||||
|
||||
(with an error similar to `JMESPAth query failed: Unknown key ... in path`)
|
||||
|
||||
- If a precondition fails, the policy will be skipped altogether (and ignored!)
|
||||
|
||||
- To work around that, [use an OR expression][non-existence-checks]:
|
||||
|
||||
`{{ requests.object.metadata.labels.color || '' }}`
|
||||
|
||||
- Note that in older versions of Kyverno, this wasn't always necessary
|
||||
|
||||
(e.g. in *preconditions*, a missing label would evalute to an empty string)
|
||||
|
||||
[non-existence-checks]: https://kyverno.io/docs/policy-types/cluster-policy/jmespath/#non-existence-checks
|
||||
|
||||
---
|
||||
|
||||
## Load and try the policy
|
||||
|
||||
.lab[
|
||||
|
||||
- Load the policy:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/kyverno-pod-color-2.yaml
|
||||
```
|
||||
|
||||
- Create a pod:
|
||||
```bash
|
||||
kubectl run test-color-2 --image=nginx
|
||||
```
|
||||
|
||||
- Try to apply a few color labels:
|
||||
```bash
|
||||
kubectl label pod test-color-2 color=purple
|
||||
kubectl label pod test-color-2 color=red
|
||||
kubectl label pod test-color-2 color=blue --overwrite
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -209,7 +354,7 @@ validate:
|
||||
|
||||
(more on that later)
|
||||
|
||||
- We (very often) need to change the `failureAction` to `Enforce`
|
||||
- We need to change the `failureAction` to `Enforce`
|
||||
|
||||
---
|
||||
|
||||
@@ -237,7 +382,7 @@ validate:
|
||||
|
||||
- Existing objects are not affected
|
||||
|
||||
(e.g. if we create "invalid" objects *before* installing the policy)
|
||||
(e.g. if we have a pod with `color=pink` *before* installing our policy)
|
||||
|
||||
- Kyvero can also run checks in the background, and report violations
|
||||
|
||||
@@ -245,80 +390,128 @@ validate:
|
||||
|
||||
- `background: true/false` controls that
|
||||
|
||||
- When would we want to disabled it? 🤔
|
||||
|
||||
---
|
||||
|
||||
## Loops
|
||||
## Accessing `AdmissionRequest` context
|
||||
|
||||
Example: [require image tags][kyverno-disallow-latest].
|
||||
- In some of our policies, we want to prevent an *update*
|
||||
|
||||
This uses `request`, which gives access to the `AdmissionRequest` payload.
|
||||
(as opposed to a mere *create* operation)
|
||||
|
||||
`request` has an `object` field containing the object that we're validating.
|
||||
- We want to compare the *old* and *new* version
|
||||
|
||||
(to check if a specific label was removed)
|
||||
|
||||
- The `AdmissionRequest` object has `object` and `oldObject` fields
|
||||
|
||||
(the `AdmissionRequest` object is the thing that gets submitted to the webhook)
|
||||
|
||||
- We access the `AdmissionRequest` object through `{{ request }}`
|
||||
|
||||
---
|
||||
|
||||
## `{{ request }}`
|
||||
|
||||
- The `{{ request }}` context is only available when there is an `AdmissionRequest`
|
||||
|
||||
- When a resource is "at rest", there is no `{{ request }}` (and no old/new)
|
||||
|
||||
- Therefore, a policy that uses `{{ request }}` cannot validate existing objects
|
||||
|
||||
(it can only be used when an object is actually created/updated/deleted)
|
||||
|
||||
--
|
||||
|
||||
- *Well, actually...*
|
||||
|
||||
--
|
||||
|
||||
- Kyverno exposes `{{ request.object }}` and `{{ request.namespace }}`
|
||||
|
||||
(see [the documentation](https://kyverno.io/docs/policy-reports/background/) for details!)
|
||||
|
||||
---
|
||||
|
||||
## Immutable primary colors, take 3
|
||||
|
||||
- Last rule: once a `color` label has been added, it cannot be removed
|
||||
|
||||
- Our approach is to match all pods that:
|
||||
|
||||
- *had* a `color` label (in `request.oldObject`)
|
||||
|
||||
- *don't have* a `color` label (in `request.Object`)
|
||||
|
||||
- And *deny* these pods
|
||||
|
||||
- Again, other approaches are possible!
|
||||
|
||||
---
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
validate:
|
||||
message: "An image tag is required."
|
||||
foreach:
|
||||
- list: "request.object.spec.containers"
|
||||
pattern:
|
||||
image: "*:*"
|
||||
@@INCLUDE[k8s/kyverno-pod-color-3.yaml]
|
||||
```
|
||||
|
||||
Note: again, there should also be an entry for `initContainers` and `ephemeralContainers`.
|
||||
|
||||
[kyverno-disallow-latest]: https://kyverno.io/policies/best-practices/disallow-latest-tag/disallow-latest-tag/
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
## Load and try the policy
|
||||
|
||||
## ...Or not to loop
|
||||
.lab[
|
||||
|
||||
Requiring image tags can also be achieved like this:
|
||||
- Load the policy:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/kyverno-pod-color-3.yaml
|
||||
```
|
||||
|
||||
```yaml
|
||||
validate:
|
||||
message: "An image tag is required."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "*:*"
|
||||
=(initContainers):
|
||||
- image: "*:*"
|
||||
=(ephemeralContainers):
|
||||
- image: "*:*"
|
||||
```
|
||||
- Create a pod:
|
||||
```bash
|
||||
kubectl run test-color-3 --image=nginx
|
||||
```
|
||||
|
||||
- Try to apply a few color labels:
|
||||
```bash
|
||||
kubectl label pod test-color-3 color=purple
|
||||
kubectl label pod test-color-3 color=red
|
||||
kubectl label pod test-color-3 color-
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## `request` and other variables
|
||||
## Background checks
|
||||
|
||||
- `request` gives us access to the `AdmissionRequest` payload
|
||||
- What about the `test-color-0` pod that we create initially?
|
||||
|
||||
- This gives us access to a bunch of interesting fields:
|
||||
(remember: we did set `color=purple`)
|
||||
|
||||
`request.operation`: CREATE, UPDATE, DELETE, or CONNECT
|
||||
- We can see the infringing Pod in a PolicyReport
|
||||
|
||||
`request.object`: the object being created or modified
|
||||
.lab[
|
||||
|
||||
`request.oldObject`: the object being modified (only for UPDATE)
|
||||
- Check that the pod still an "invalid" color:
|
||||
```bash
|
||||
kubectl get pods -L color
|
||||
```
|
||||
|
||||
`request.userInfo`: information about the user making the API request
|
||||
- List PolicyReports:
|
||||
```bash
|
||||
kubectl get policyreports
|
||||
kubectl get polr
|
||||
```
|
||||
|
||||
- `object` and `oldObject` are very convenient to block specific *modifications*
|
||||
]
|
||||
|
||||
(e.g. making some labels or annotations immutable)
|
||||
|
||||
(See [here][kyverno-request] for details.)
|
||||
|
||||
[kyverno-request]: https://kyverno.io/docs/policy-types/cluster-policy/variables/#variables-from-admission-review-requests
|
||||
(Sometimes it takes a little while for the infringement to show up, though.)
|
||||
|
||||
---
|
||||
|
||||
## Generating objects
|
||||
|
||||
- Let's review a fairly common use-case...
|
||||
|
||||
- When we create a Namespace, we also want to automatically create:
|
||||
|
||||
- a LimitRange (to set default CPU and RAM requests and limits)
|
||||
@@ -359,13 +552,13 @@ Note: the `apiVersion` field appears to be optional.
|
||||
|
||||
- Excerpt:
|
||||
```yaml
|
||||
generate:
|
||||
kind: LimitRange
|
||||
name: default-limitrange
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
limits:
|
||||
generate:
|
||||
kind: LimitRange
|
||||
name: default-limitrange
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
limits:
|
||||
```
|
||||
|
||||
- Note that we have to specify the `namespace`
|
||||
@@ -374,80 +567,11 @@ Note: the `apiVersion` field appears to be optional.
|
||||
|
||||
---
|
||||
|
||||
## Templates and JMESpath
|
||||
|
||||
- We can use `{{ }}` templates in Kyverno policies
|
||||
|
||||
(when generating or validating resources; in conditions, pre-conditions...)
|
||||
|
||||
- This lets us access `request` as well as [a few other variables][kyverno-variables]
|
||||
|
||||
- We can also use JMESPath expressions, for instance:
|
||||
|
||||
`{{request.object.spec.containers[?name=='worker'].image}}`
|
||||
|
||||
`{{request.object.spec.[containers,initContainers][][].image}}`
|
||||
|
||||
- To experiment with JMESPath, use e.g. [jmespath.org] or [install the kyverno CLI][kyverno-cli]
|
||||
|
||||
(then use `kubectl kyverno jp query < data.json ...expression... `)
|
||||
|
||||
[jmespath.org]: https://jmespath.org/
|
||||
[kyverno-cli]: https://kyverno.io/docs/kyverno-cli/install/
|
||||
[kyverno-variables]: https://kyverno.io/docs/policy-types/cluster-policy/variables/#pre-defined-variables
|
||||
|
||||
---
|
||||
|
||||
## Data sources
|
||||
|
||||
- It's also possible to access data in Kubernetes ConfigMaps:
|
||||
```yaml
|
||||
context:
|
||||
- name: ingressconfig
|
||||
configMap:
|
||||
name: ingressconfig
|
||||
namespace: {{request.object.metadata.namespace}}
|
||||
```
|
||||
|
||||
- And then use it e.g. in a policy generating or modifying Ingress resources:
|
||||
```yaml
|
||||
...
|
||||
host: {{request.object.metadata.name}}.{{ingressconfig.data.domainsuffix}}
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes API calls
|
||||
|
||||
- It's also possible to access arbitrary Kubernetes resources through API calls:
|
||||
```yaml
|
||||
context:
|
||||
- name: dns
|
||||
apiCall:
|
||||
urlPath: "/api/v1/namespaces/kube-system/services/kube-dns"
|
||||
jmesPath: "spec.clusterIP"
|
||||
```
|
||||
|
||||
- And then use that e.g. in a mutating policy:
|
||||
```yaml
|
||||
mutate:
|
||||
patchStrategicMerge:
|
||||
spec:
|
||||
containers:
|
||||
- (name): "*"
|
||||
env:
|
||||
- name: DNS
|
||||
value: "{{dns}}"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Lifecycle
|
||||
|
||||
- After generated objects have been created, we can change them
|
||||
|
||||
(Kyverno won't automatically revert them)
|
||||
(Kyverno won't update them)
|
||||
|
||||
- Except if we use `clone` together with the `synchronize` flag
|
||||
|
||||
@@ -455,6 +579,8 @@ Note: the `apiVersion` field appears to be optional.
|
||||
|
||||
- This is convenient for e.g. ConfigMaps shared between Namespaces
|
||||
|
||||
- Objects are generated only at *creation* (not when updating an old object)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
@@ -473,14 +599,12 @@ class: extra-details
|
||||
|
||||
(in the generated object `metadata`)
|
||||
|
||||
- See [Linking resources with ownerReferences][kyverno-ownerref] for an example
|
||||
- See [Linking resources with ownerReferences][ownerref] for an example
|
||||
|
||||
[kyverno-ownerref]: https://kyverno.io/docs/policy-types/cluster-policy/generate/#linking-trigger-with-downstream
|
||||
[ownerref]: https://kyverno.io/docs/writing-policies/generate/#linking-trigger-with-downstream
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Asynchronous creation
|
||||
|
||||
- Kyverno creates resources asynchronously
|
||||
@@ -497,30 +621,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Autogen rules for Pod validating policies
|
||||
|
||||
- In Kubernetes, we rarely create Pods directly
|
||||
|
||||
(instead, we create controllers like Deployments, DaemonSets, Jobs, etc)
|
||||
|
||||
- As a result, Pod validating policies can be tricky to debug
|
||||
|
||||
(the policy blocks invalid Pods, but doesn't block their controller)
|
||||
|
||||
- Kyverno helps us with "autogen rules"
|
||||
|
||||
(when we create a Pod policy, it will automatically create policies on Pod controllers)
|
||||
|
||||
- This can be customized if needed; [see documentation for details][kyverno-autogen]
|
||||
|
||||
(it can be disabled, or extended to Custom Resources)
|
||||
|
||||
[kyverno-autogen]: https://kyverno.io/docs/policy-types/cluster-policy/autogen/
|
||||
|
||||
---
|
||||
|
||||
## Footprint (current versions)
|
||||
|
||||
- 14 CRDs
|
||||
@@ -563,7 +663,45 @@ class: extra-details
|
||||
|
||||
- There is also a CLI tool (not discussed here)
|
||||
|
||||
- It continues to evolve and gain new features
|
||||
---
|
||||
|
||||
## Caveats
|
||||
|
||||
- The `{{ request }}` context is powerful, but difficult to validate
|
||||
|
||||
(Kyverno can't know ahead of time how it will be populated)
|
||||
|
||||
- Advanced policies (with conditionals) have unique, exotic syntax:
|
||||
```yaml
|
||||
spec:
|
||||
=(volumes):
|
||||
=(hostPath):
|
||||
path: "!/var/run/docker.sock"
|
||||
```
|
||||
|
||||
- Writing and validating policies can be difficult
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Pods created by controllers
|
||||
|
||||
- When e.g. a ReplicaSet or DaemonSet creates a pod, it "owns" it
|
||||
|
||||
(the ReplicaSet or DaemonSet is listed in the Pod's `.metadata.ownerReferences`)
|
||||
|
||||
- Kyverno treats these Pods differently
|
||||
|
||||
- If my understanding of the code is correct (big *if*):
|
||||
|
||||
- it skips validation for "owned" Pods
|
||||
|
||||
- instead, it validates their controllers
|
||||
|
||||
- this way, Kyverno can report errors on the controller instead of the pod
|
||||
|
||||
- This can be a bit confusing when testing policies on such pods!
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -1,275 +0,0 @@
|
||||
## Running your own lab environments
|
||||
|
||||
- To practice outside of live classes, you will need your own cluster
|
||||
|
||||
- We'll give you 4 possibilities, depending on your goals:
|
||||
|
||||
- level 0 (no installation required)
|
||||
|
||||
- level 1 (local development cluster)
|
||||
|
||||
- level 2 (cluster with multiple nodes)
|
||||
|
||||
- level 3 ("real" cluster with all the bells and whistles)
|
||||
|
||||
---
|
||||
|
||||
## Level 0
|
||||
|
||||
- Use a free, cloud-based, environment
|
||||
|
||||
- Pros: free, and nothing to install locally
|
||||
|
||||
- Cons: lots of limitations
|
||||
|
||||
- requires online access
|
||||
|
||||
- resources (CPU, RAM, disk) are limited
|
||||
|
||||
- provides a single-node cluster
|
||||
|
||||
- by default, only available through the online IDE
|
||||
<br/>(extra steps required to use local tools and IDE)
|
||||
|
||||
- networking stack is different from a normal cluster
|
||||
|
||||
- cluster might be automatically destroyed once in a while
|
||||
|
||||
---
|
||||
|
||||
## When is it a good match?
|
||||
|
||||
- Great for your first steps with Kubernetes
|
||||
|
||||
- Convenient for "bite-sized" learning
|
||||
|
||||
(a few minutes at a time without spending time on installs and setup)
|
||||
|
||||
- Not-so-great beyond your first steps
|
||||
|
||||
- We recommend to "level up" to a local cluster ASAP!
|
||||
|
||||
---
|
||||
|
||||
## How to obtain it
|
||||
|
||||
- We prepared a "Dev container configuration"
|
||||
|
||||
(that you can use with GitHub Codespaces)
|
||||
|
||||
- This requires a GitHub account
|
||||
|
||||
(no credit card or personal information is needed, though)
|
||||
|
||||
- Option 1: [follow that link][codespaces]
|
||||
|
||||
- Option 2: go to [this repo][repo], click on `<> Code v` and `Create codespace on main`
|
||||
|
||||
---
|
||||
|
||||
## Level 1
|
||||
|
||||
- Install a local Kubernetes dev cluster
|
||||
|
||||
- Pros: free, can work with local tools
|
||||
|
||||
- Cons:
|
||||
|
||||
- usually, you get a one-node cluster
|
||||
<br/>(but some tools let you create multiple nodes)
|
||||
|
||||
- resources can be limited
|
||||
<br/>(depends on how much CPU/RAM you have on your machine)
|
||||
|
||||
- cluster is on a private network
|
||||
<br/>(not great for labs involving load balancers, ingress controllers...)
|
||||
|
||||
- support for persistent volumes might be limited
|
||||
<br/>(or non-existent)
|
||||
|
||||
---
|
||||
|
||||
## When is it a good match?
|
||||
|
||||
- Ideal for most classes and labs
|
||||
|
||||
(from basic to advanced)
|
||||
|
||||
- Notable exceptions:
|
||||
|
||||
- when you need multiple "real" nodes
|
||||
<br/>(e.g. resource scheduling, cluster autoscaling...)
|
||||
|
||||
- when you want to expose things to the outside world
|
||||
<br/>(e.g. ingress, gateway API, cert-manager...)
|
||||
|
||||
- Very easy to reset the environment to a clean slate
|
||||
|
||||
- Great way to prepare a lab or demo before executing it on a "real" cluster
|
||||
|
||||
---
|
||||
|
||||
## How to obtain it
|
||||
|
||||
- There are many options available to run local Kubernetes clusters!
|
||||
|
||||
- If you already have Docker up and running:
|
||||
|
||||
*check [KinD] or [k3d]*
|
||||
|
||||
- Otherwise:
|
||||
|
||||
*check [Docker Desktop][docker-desktop] or [Rancher Desktop][rancher-desktop]*
|
||||
|
||||
- There are also other options; this is just a shortlist!
|
||||
|
||||
[KinD]: https://kind.sigs.k8s.io/
|
||||
[k3d]:https://k3d.io/
|
||||
[docker-desktop]: https://docs.docker.com/desktop/use-desktop/kubernetes/
|
||||
[rancher-desktop]: https://docs.rancherdesktop.io/ui/preferences/kubernetes/
|
||||
|
||||
---
|
||||
|
||||
## Level 2
|
||||
|
||||
- Install a Kubernetes cluster on a few machines
|
||||
|
||||
(physical machines, virtual machines, cloud, on-premises...)
|
||||
|
||||
- Pros:
|
||||
|
||||
- very flexible; works almost anywhere (cloud VMs, home lab...)
|
||||
|
||||
- can even run "real" applications (serving real traffic)
|
||||
|
||||
- Cons:
|
||||
|
||||
- typically costs some money (hardware investment or cloud costs)
|
||||
|
||||
- still missing a few things compared to a "real" cluster
|
||||
<br/>(cloud controller manager, storage class, control plane high availability...)
|
||||
|
||||
---
|
||||
|
||||
## When is it a good match?
|
||||
|
||||
- If you already have a "home lab" or a lab at work
|
||||
|
||||
(because the machines already exist)
|
||||
|
||||
- If you want more visibility and/or control:
|
||||
|
||||
- enable alpha/experimental options and features
|
||||
|
||||
- start, stop, view logs... of individual components
|
||||
|
||||
- If you want multiple nodes to experiment with scheduling, autoscaling...
|
||||
|
||||
- To host applications that remain available when your laptop is offline :)
|
||||
|
||||
---
|
||||
|
||||
## How to obtain it
|
||||
|
||||
- Option 1:
|
||||
|
||||
*provision a few machines; [install `kubeadm`][kubeadm]; use `kubeadm` to install cluster*
|
||||
|
||||
- Option 2:
|
||||
|
||||
*use [`labctl`][labctl] to automate the previous steps*
|
||||
|
||||
*(labctl supports [10+ public and private cloud platforms][labctl-vms])*
|
||||
|
||||
- Option 3:
|
||||
|
||||
*use the Kubernetes distro of your choice!*
|
||||
|
||||
---
|
||||
|
||||
## Level 3
|
||||
|
||||
- Use a managed Kubernetes cluster
|
||||
|
||||
- Pros:
|
||||
|
||||
- it's the real deal!
|
||||
|
||||
- Cons:
|
||||
|
||||
- recurring cloud costs
|
||||
|
||||
---
|
||||
|
||||
## When is it a good match?
|
||||
|
||||
- If you want a highly-available cluster and control plane
|
||||
|
||||
- To have all the cloud features
|
||||
|
||||
(`LoadBalancer` services, `StorageClass` for stateful apps, cluster autoscaling...)
|
||||
|
||||
- To host your first production stacks
|
||||
|
||||
---
|
||||
|
||||
## How to obtain it
|
||||
|
||||
- Option 1:
|
||||
|
||||
*use the CLI / Web UI / Terraform... for your cloud provider*
|
||||
|
||||
- Option 2:
|
||||
|
||||
*use [`labctl`][labctl] to provision a cluster with Terraform/OpenTofu*
|
||||
|
||||
---
|
||||
|
||||
## What's `labctl`?
|
||||
|
||||
- `labctl` is the tool that we use to provision virtual machines and clusters for live classes
|
||||
|
||||
- It can create and configure hundreds of VMs and clusters in a few minutes
|
||||
|
||||
- It supports 10+ cloud providers
|
||||
|
||||
- It's very useful if you need to provision many clusters
|
||||
|
||||
(e.g. to run your own workshop with your team!)
|
||||
|
||||
- It can also be used to provision a single cluster quickly
|
||||
|
||||
(for testing or educational purposes)
|
||||
|
||||
- Its Terraform configurations can also be useful on their own
|
||||
|
||||
(e.g. as a base when building your own infra-as-code)
|
||||
|
||||
---
|
||||
|
||||
## Our Kubernetes toolbox
|
||||
|
||||
- We're going to use a lot of different tools
|
||||
|
||||
(kubectl, stern, helm, k9s, krew, and many more)
|
||||
|
||||
- We suggest that you install them progressively
|
||||
|
||||
(when we introduce them, if you think they'll be useful to you!)
|
||||
|
||||
- We have also prepared a container image: [jpetazzo/shpod]
|
||||
|
||||
- `shpod` contains 30+ Docker and Kubernetes tools
|
||||
|
||||
(along with shell customizations like prompt, completion...)
|
||||
|
||||
- You can use it to work with your Kubernetes clusters
|
||||
|
||||
- It can also be used as an SSH server if needed
|
||||
|
||||
[codespaces]: https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=37004081&skip_quickstart=true
|
||||
[repo]: https://github.com/jpetazzo/container.training
|
||||
[kubeadm]: https://kubernetes.io/docs/reference/setup-tools/kubeadm/
|
||||
[labctl]: https://github.com/jpetazzo/container.training/tree/main/prepare-labs
|
||||
[labctl-vms]: https://github.com/jpetazzo/container.training/tree/main/prepare-labs/terraform/virtual-machines
|
||||
[jpetazzo/shpod]: https://github.com/jpetazzo/shpod
|
||||
@@ -1,42 +0,0 @@
|
||||
## Where are we going to run our containers?
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## If you're attending a live class
|
||||
|
||||
- Each person gets a private lab environment
|
||||
|
||||
(depending on the scenario, this will be one VM, one cluster, multiple clusters...)
|
||||
|
||||
- The instructor will tell you how to connect to your environment
|
||||
|
||||
- Your lab environments will be available for the duration of the workshop
|
||||
|
||||
(check with your instructor to know exactly when they'll be shutdown)
|
||||
|
||||
---
|
||||
|
||||
## Why don't we run containers locally?
|
||||
|
||||
- Setting up a local Kubernetes cluster can take time
|
||||
|
||||
(and the procedure can differ from one system to another)
|
||||
|
||||
- On some systems, it might be impossible
|
||||
|
||||
(due to restrictive IT policies, lack of hardware support...)
|
||||
|
||||
- Some labs require a "real" cluster
|
||||
|
||||
(e.g. multiple nodes to demonstrate failover, placement policies...)
|
||||
|
||||
- For on-site classes, it can stress the local network
|
||||
|
||||
*"The whole team downloaded all these container images from the WiFi!
|
||||
<br/>... and it went great!"* (Literally no-one ever)
|
||||
@@ -1,17 +1,13 @@
|
||||
## Pre-requirements
|
||||
|
||||
- Familiarity with the UNIX command-line
|
||||
|
||||
(navigating directories, editing files, using `kubectl`)
|
||||
|
||||
- Hands-on experience working with containers
|
||||
|
||||
(building images, running them; doesn't matter how exactly)
|
||||
|
||||
- Kubernetes concepts
|
||||
|
||||
(pods, deployments, services, labels, selectors)
|
||||
|
||||
- Ideally, you already have access to a Kubernetes cluster
|
||||
- Hands-on experience working with containers
|
||||
|
||||
(even if it's just a local one with KinD, minikube, etc.)
|
||||
(building images, running them; doesn't matter how exactly)
|
||||
|
||||
- Familiarity with the UNIX command-line
|
||||
|
||||
(navigating directories, editing files, using `kubectl`)
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
|
||||
(without doing `helm repo add` first)
|
||||
|
||||
- Let's use the same name for the release, the namespace...:
|
||||
- Otherwise, keep the same naming strategy:
|
||||
```bash
|
||||
helm upgrade --install kube-prometheus-stack kube-prometheus-stack \
|
||||
--namespace kube-prometheus-stack --create-namespace \
|
||||
@@ -56,39 +56,17 @@
|
||||
|
||||
## Exposing Grafana
|
||||
|
||||
- Let's do this only if we have an ingress controller and a domain name!
|
||||
|
||||
(we can also skip this and come back to it later)
|
||||
|
||||
- Create an Ingress for Grafana:
|
||||
- Let's create an Ingress for Grafana
|
||||
```bash
|
||||
kubectl create ingress --namespace kube-prometheus-stack grafana \
|
||||
--rule=grafana.`cloudnative.party`/*=kube-prometheus-stack-grafana:80
|
||||
```
|
||||
|
||||
(make sure to use *your* domain name above)
|
||||
(as usual, make sure to use *your* domain name above)
|
||||
|
||||
- Connect to Grafana
|
||||
|
||||
---
|
||||
|
||||
## Exposing Grafana without Ingress
|
||||
|
||||
- What if we don't have an ingress controller?
|
||||
|
||||
- We can use a `NodePort` service instead
|
||||
|
||||
- Option 1: `kubectl edit` or `kubectl patch` the service
|
||||
|
||||
(it's `kube-prometheus-stack-grafana`)
|
||||
|
||||
- Option 2: pass the correct value to `helm upgrade --install`
|
||||
|
||||
(check the [chart values][kps-values] to find the right one!)
|
||||
|
||||
- We can also use `kubectl port-forward`, or a `LoadBalacner` service!
|
||||
|
||||
[kps-value]: https://artifacthub.io/packages/helm/prometheus-community/kube-prometheus-stack?modal=values
|
||||
(remember that the DNS record might take a few minutes to come up)
|
||||
|
||||
---
|
||||
|
||||
@@ -131,8 +109,6 @@
|
||||
|
||||
- Feel free to explore the other dashboards!
|
||||
|
||||
- There won't be much data right now, but there will be more later
|
||||
|
||||
???
|
||||
|
||||
:EN:- Installing Prometheus and Grafana
|
||||
|
||||
@@ -61,8 +61,6 @@ class: pic
|
||||
- This is available only when the underlying infrastructure provides some kind of
|
||||
"load balancer as a service"
|
||||
|
||||
(or in some special cases with add-ons like [MetalLB])
|
||||
|
||||
- Each service of that type will typically cost a little bit of money
|
||||
|
||||
(e.g. a few cents per hour on AWS or GCE)
|
||||
@@ -71,8 +69,6 @@ class: pic
|
||||
|
||||
- In practice, it will often flow through a `NodePort` first
|
||||
|
||||
[MetalLB]: https://metallb.io/
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
@@ -167,13 +163,11 @@ class: pic
|
||||
|
||||
- Our code needs to be changed to connect to that new port number
|
||||
|
||||
- Under the hood: `kube-proxy` sets up a bunch of port forwarding rules on our nodes
|
||||
- Under the hood: `kube-proxy` sets up a bunch of `iptables` rules on our nodes
|
||||
|
||||
(using `iptables`, `ipvs`, `nftables`... multiple implementations are available)
|
||||
- Sometimes, it's the only available option for external traffic
|
||||
|
||||
- Very useful option for external traffic when `LoadBalancer` Services aren't available
|
||||
|
||||
(e.g. some clusters deployed on-premises and/or with kubeadm)
|
||||
(e.g. most clusters deployed with kubeadm or on-premises)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,309 +0,0 @@
|
||||
# Taints and tolerations
|
||||
|
||||
- Kubernetes gives us many mechanisms to influence where Pods should run:
|
||||
|
||||
taints and tolerations; node selectors; affinity; resource requests...
|
||||
|
||||
- Taints and tolerations are used to:
|
||||
|
||||
- reserve a Node for special workloads (e.g. control plane, GPU, security...)
|
||||
|
||||
- temporarily block a node (e.g. for maintenance, troubleshooting...)
|
||||
|
||||
- evacuate a node that exhibits an issue
|
||||
|
||||
- In fact, the default failover mechanism on Kubernetes relies on "taint-based evictions"
|
||||
|
||||
- Let's dive into all that!
|
||||
|
||||
---
|
||||
|
||||
## Taints and tolerations
|
||||
|
||||
- A *taint* is an attribute added to a node
|
||||
|
||||
- It prevents pods from running on the node
|
||||
|
||||
- ... Unless they have a matching *toleration*
|
||||
|
||||
- Example: when deploying with `kubeadm`...
|
||||
|
||||
- a taint is placed on the node dedicated to the control plane
|
||||
|
||||
- the pods running the control plane have a matching toleration
|
||||
|
||||
---
|
||||
|
||||
## Checking taints on our nodes
|
||||
|
||||
Here are a few ways to view taints on our nodes:
|
||||
|
||||
```bash
|
||||
kubectl describe nodes | grep ^Taints
|
||||
|
||||
kubectl get nodes -o custom-columns=NAME:metadata.name,TAINTS:spec.taints
|
||||
|
||||
kubectl get nodes -o json | jq '.items[] | [.metadata.name, .spec.taints]'
|
||||
```
|
||||
|
||||
It's possible that your nodes have no taints at all.
|
||||
|
||||
(It's not an error or a problem.)
|
||||
|
||||
---
|
||||
|
||||
## Taint structure
|
||||
|
||||
- As shown by `kubectl explain node.spec.taints`, a taint has:
|
||||
|
||||
- an `effect` (mandatory)
|
||||
|
||||
- a `key` (mandatory)
|
||||
|
||||
- a `value` (optional)
|
||||
|
||||
- Let's see what they mean!
|
||||
|
||||
---
|
||||
|
||||
## Taint `key` (mandatory)
|
||||
|
||||
- The `key` is an arbitrary string to identify a particular taint
|
||||
|
||||
- It can be interpreted in multiple ways, for instance:
|
||||
|
||||
- a reservation for a special set of pods
|
||||
<br/>
|
||||
(e.g. "this node is reserved for the control plane")
|
||||
|
||||
- an (hopefully temporary) error condition on the node
|
||||
<br/>
|
||||
(e.g. "this node disk is full, do not start new pods there!")
|
||||
|
||||
- a temporary "hold" on the node
|
||||
<br/>
|
||||
(e.g. "we're going to do a maintenance operation on that node")
|
||||
|
||||
---
|
||||
|
||||
## Taint `effect` (mandatory)
|
||||
|
||||
- `NoSchedule` = do not place new Pods on that node
|
||||
|
||||
- existing Pods are unaffected
|
||||
|
||||
- `PreferNoSchedule` = try to not place new Pods on that node
|
||||
|
||||
- place Pods on other nodes if possible
|
||||
|
||||
- use case: cluster autoscaler trying to deprovision a node
|
||||
|
||||
- `NoExecute` = stop execution of Pods on that node
|
||||
|
||||
- existing Pods are terminated (technically: evicted)
|
||||
|
||||
- use case: node is in a critical state and workloads should be relocated
|
||||
|
||||
---
|
||||
|
||||
## Taint `value` (optional)
|
||||
|
||||
- This is an optional field
|
||||
|
||||
- A taint can exist with just a `key`, or `key` + `value`
|
||||
|
||||
- Tolerations can match a taint's `key`, or `key` + `value`
|
||||
|
||||
(we're going to explain tolerations in just a minute!)
|
||||
|
||||
---
|
||||
|
||||
## Checking tolerations on our Pods
|
||||
|
||||
Here are a few ways to see tolerations on the Pods in the current Namespace:
|
||||
|
||||
```bash
|
||||
kubectl get pods -o custom-columns=NAME:metadata.name,TOLERATIONS:spec.tolerations
|
||||
|
||||
kubectl get pods -o json |
|
||||
jq '.items[] | {"pod_name": .metadata.name} + .spec.tolerations[]'
|
||||
```
|
||||
|
||||
This output will likely be very verbose.
|
||||
|
||||
Suggestion: try this...
|
||||
|
||||
- in a pod with a few "normal" Pods (created by a Deployment)
|
||||
|
||||
- in `kube-system`, with a selector to see only CoreDNS Pods
|
||||
|
||||
---
|
||||
|
||||
## Toleration structure
|
||||
|
||||
- As shown by `kubectl explain pod.spec.tolerations`, a toleration has:
|
||||
|
||||
- an `effect`
|
||||
|
||||
- a `key`
|
||||
|
||||
- an `operator`
|
||||
|
||||
- a `value`
|
||||
|
||||
|
||||
- a `tolerationSeconds`
|
||||
|
||||
- All fields are optional, but they can't all be empty
|
||||
|
||||
- Let's see what they mean!
|
||||
|
||||
---
|
||||
|
||||
## Toleration `effect`
|
||||
|
||||
- Same meaning as the `effect` for taints
|
||||
|
||||
- If it's omitted, it means "tolerate all kinds of taints"
|
||||
|
||||
---
|
||||
|
||||
## Toleration `key`
|
||||
|
||||
- Same meaning as the `key` for taints
|
||||
|
||||
("tolerate the taints that have that specific `key`")
|
||||
|
||||
- Special case: the `key` can be omitted to indicate "match all keys"
|
||||
|
||||
- In that case, the `operator` must be `Exists`
|
||||
|
||||
(it's not possible to omit both `key` and `operator`)
|
||||
|
||||
---
|
||||
|
||||
## Toleration `operator`
|
||||
|
||||
- Can be either `Equal` (the default value) or `Exists`
|
||||
|
||||
- `Equal` means:
|
||||
|
||||
*match taints with the exact same `key` and `value` as this toleration*
|
||||
|
||||
- `Exists` means:
|
||||
|
||||
*match taints with the same `key` as this toleration, but ignore the taints' `value`*
|
||||
|
||||
- As seen earlier, it's possible to specify `Exists` with an empty `key`; that means:
|
||||
|
||||
*match taints with any `key` or `value`*
|
||||
|
||||
---
|
||||
|
||||
## Toleration `value`
|
||||
|
||||
- Will match taints with the same `value`
|
||||
|
||||
---
|
||||
|
||||
## Toleration `tolerationSeconds`
|
||||
|
||||
- Applies only to `NoExecute` taints and tolerations
|
||||
|
||||
(the ones that provoke an *eviction*, i.e. termination, of Pods)
|
||||
|
||||
- Indicates that a taint will be ignored for the given amount of time
|
||||
|
||||
- After that time has passed, the taint will take place
|
||||
|
||||
(and the Pod will be evicted and terminated)
|
||||
|
||||
- This is used notably for automated failover using *taint-based evictions*
|
||||
|
||||
(and more generally speaking, to tolerate transient problems)
|
||||
|
||||
---
|
||||
|
||||
## Taint-based evictions
|
||||
|
||||
- Pods¹ automatically receive these two tolerations:
|
||||
|
||||
```yaml
|
||||
- key: node.kubernetes.io/not-ready
|
||||
effect: NoExecute
|
||||
operator: Exists
|
||||
tolerationSeconds: 300
|
||||
- key: node.kubernetes.io/unreachable
|
||||
effect: NoExecute
|
||||
operator: Exists
|
||||
tolerationSeconds: 300
|
||||
```
|
||||
|
||||
- So, what's the effect of these tolerations? 🤔
|
||||
|
||||
.footnote[¹Except Pods created by DaemonSets, or Pods already specifying similar tolerations]
|
||||
|
||||
---
|
||||
|
||||
## Node `not-ready` or `unreachable`
|
||||
|
||||
- Nodes are supposed to check in with the control plane at regular intervals
|
||||
|
||||
(by default, every 20 seconds)
|
||||
|
||||
- When a Node fails to report to the control plane:
|
||||
|
||||
*the control plane adds the `node.kubernetes.io/unreachable` taint to that node*
|
||||
|
||||
- The taint is tolerated for 300 seconds
|
||||
|
||||
(i.e. for 5 minutes, nothing happens)
|
||||
|
||||
- After that delay expires, the taint applies fully
|
||||
|
||||
*Pods are evicted*
|
||||
|
||||
*replacement Pods should be scheduled on healthy Nodes*
|
||||
|
||||
---
|
||||
|
||||
## Use-cases
|
||||
|
||||
- By default, ~5 minutes after a Node becomes unresponsive, its Pods get rescheduled
|
||||
|
||||
- That delay can be changed
|
||||
|
||||
(by adding tolerations with the same `key`+`effect`+`operator` combo)
|
||||
|
||||
- This means that we can specify:
|
||||
|
||||
- higher delays for Pods that are "expensive" to move
|
||||
<br/>
|
||||
(e.g. because they hold a lot of state)
|
||||
|
||||
- lower delays for Pods that should failover as quickly as possible
|
||||
|
||||
---
|
||||
|
||||
## Manipulating taints
|
||||
|
||||
- We can add/remove taints with the usual Kubernetes modification commands
|
||||
|
||||
(e.g. `kubectl edit`, `kubectl patch`, `kubectl apply`...)
|
||||
|
||||
- There are also 4 `kubectl` commands specifically for taints:
|
||||
|
||||
`kubectl taint node NodeName key=val:effect` (`val` is optional)
|
||||
|
||||
`kubectl untaint` (with the same arguments)
|
||||
|
||||
`kubectl cordon NodeName` / `kubectl uncordon NodeName`
|
||||
<br/>
|
||||
(adds or remove the taint `node.kubernetes.io/unschedulable:NoSchedule`)
|
||||
|
||||
- The command `kubectl drain` will do a `cordon` and then evict Pods on the Node
|
||||
|
||||
???
|
||||
|
||||
:EN:- Taints and tolerations
|
||||
:FR:- Les "taints" et "tolerations"
|
||||
@@ -19,18 +19,16 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/about-slides.md
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- k8s/labs-live.md
|
||||
- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- k8s/architecture.md
|
||||
#- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
|
||||
@@ -19,19 +19,17 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/about-slides.md
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- k8s/labs-live.md
|
||||
- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
- - k8s/architecture.md
|
||||
- - k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- k8s/architecture.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc-easy.md
|
||||
|
||||
@@ -17,18 +17,16 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- shared/chat-room-im.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/about-slides.md
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- k8s/labs-live.md
|
||||
- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
- shared/toc.md
|
||||
- #1
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- k8s/architecture.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
@@ -67,7 +65,6 @@ content:
|
||||
- k8s/crd.md
|
||||
- #6
|
||||
- k8s/ingress-tls.md
|
||||
#- k8s/ingress-setup.md
|
||||
#- k8s/ingress-advanced.md
|
||||
#- k8s/ingress-canary.md
|
||||
- k8s/gateway-api.md
|
||||
@@ -77,8 +74,6 @@ content:
|
||||
- #7
|
||||
- k8s/admission.md
|
||||
- k8s/kyverno.md
|
||||
- k8s/kyverno-colors.md
|
||||
- k8s/kyverno-ingress.md
|
||||
- #8
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/metrics-server.md
|
||||
|
||||
@@ -18,18 +18,19 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/about-slides.md
|
||||
- k8s/prereqs-basic.md
|
||||
- shared/handson.md
|
||||
- k8s/labs-live.md
|
||||
- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
@@ -65,7 +66,6 @@ content:
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-setup.md
|
||||
#- k8s/gateway-api.md
|
||||
#- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
|
||||
@@ -17,19 +17,22 @@ exclude:
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
#- logistics.md
|
||||
# Bridget-specific; others use logistics.md
|
||||
- logistics-bridget.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/about-slides.md
|
||||
- k8s/prereqs-basic.md
|
||||
- shared/handson.md
|
||||
- k8s/labs-live.md
|
||||
- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
- shared/toc.md
|
||||
- - shared/sampleapp.md
|
||||
- - shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# Bridget doesn't go into as much depth with compose
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
|
||||
@@ -18,18 +18,19 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/about-slides.md
|
||||
- k8s/prereqs-basic.md
|
||||
- shared/handson.md
|
||||
#- k8s/labs-live.md
|
||||
#- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
@@ -70,7 +71,6 @@ content:
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
#- k8s/exercise-yaml.md
|
||||
- k8s/taints-and-tolerations.md
|
||||
-
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
@@ -83,7 +83,6 @@ content:
|
||||
- k8s/kubectlproxy.md
|
||||
-
|
||||
- k8s/ingress.md
|
||||
- k8s/ingress-setup.md
|
||||
- k8s/ingress-advanced.md
|
||||
#- k8s/ingress-canary.md
|
||||
- k8s/ingress-tls.md
|
||||
@@ -102,7 +101,6 @@ content:
|
||||
#- k8s/exercise-helm.md
|
||||
- k8s/gitlab.md
|
||||
- k8s/ytt.md
|
||||
- k8s/harbor.md
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
@@ -156,8 +154,6 @@ content:
|
||||
- k8s/kuik.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/kyverno.md
|
||||
- k8s/kyverno-colors.md
|
||||
- k8s/kyverno-ingress.md
|
||||
- k8s/eck.md
|
||||
- k8s/finalizers.md
|
||||
- k8s/owners-and-dependents.md
|
||||
|
||||
@@ -18,18 +18,19 @@ exclude:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/about-slides.md
|
||||
- k8s/prereqs-basic.md
|
||||
- shared/handson.md
|
||||
- k8s/labs-live.md
|
||||
- shared/connecting.md
|
||||
- k8s/labs-async.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
@@ -80,7 +81,6 @@ content:
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-setup.md
|
||||
#- k8s/ingress-advanced.md
|
||||
#- k8s/ingress-canary.md
|
||||
#- k8s/ingress-tls.md
|
||||
|
||||
@@ -67,11 +67,11 @@
|
||||
|
||||
(sauf le dernier jour)
|
||||
|
||||
- Jeudi: 15h00-16h00
|
||||
- Mardi: 15h00-16h00
|
||||
|
||||
- Vendredi: 14h00-14h30
|
||||
- Mercredi: 15h30-16h30
|
||||
|
||||
- Lundi: 16h00-17h00
|
||||
- Jeudi: 16h00-17h00
|
||||
|
||||
<!--
|
||||
|
||||
|
||||
70
slides/logistics-m5.md
Normal file
70
slides/logistics-m5.md
Normal file
@@ -0,0 +1,70 @@
|
||||
## Introductions (en 🇫🇷)
|
||||
|
||||
- Bonjour !
|
||||
|
||||
- Sur scène : Jérôme + Ludovic
|
||||
|
||||
- En backstage : Alexandre, Antoine, Aurélien (x2), Benjamin (x2), David, Kostas, Nicolas, Paul, Sébastien, Thibault...
|
||||
|
||||
- Horaires : tous les jours de 9h à 13h
|
||||
|
||||
- On fera une pause vers (environ) 11h
|
||||
|
||||
- N'hésitez pas à poser un maximum de questions!
|
||||
|
||||
- Utilisez @@CHAT@@ pour les questions, demander de l'aide, etc.
|
||||
|
||||
[@alexbuisine]: https://twitter.com/alexbuisine
|
||||
[EphemeraSearch]: https://ephemerasearch.com/
|
||||
[@jpetazzo]: https://twitter.com/jpetazzo
|
||||
[@jpetazzo@hachyderm.io]: https://hachyderm.io/@jpetazzo
|
||||
[@s0ulshake]: https://twitter.com/s0ulshake
|
||||
[Quantgene]: https://www.quantgene.com/
|
||||
|
||||
---
|
||||
|
||||
## Les 15 minutes du matin
|
||||
|
||||
- Chaque jour, on commencera à 9h par une mini-présentation de 15 minutes
|
||||
|
||||
(sur un sujet choisi ensemble, pas forcément en relation avec la formation!)
|
||||
|
||||
- L'occasion de s'échauffer les neurones avec 🥐/☕️/🍊
|
||||
|
||||
(avant d'attaquer les choses sérieuses)
|
||||
|
||||
- Puis à 9h15 on rentre dans le vif du sujet
|
||||
|
||||
---
|
||||
|
||||
## Travaux pratiques
|
||||
|
||||
- À la fin de chaque matinée, il y a un exercice pratique concret
|
||||
|
||||
(pour mettre en œuvre ce qu'on a vu)
|
||||
|
||||
- Les exercices font partie de la formation !
|
||||
|
||||
- Ils sont prévus pour prendre entre 15 minutes et 2 heures
|
||||
|
||||
(selon les connaissances et l'aisance de chacun·e)
|
||||
|
||||
- Chaque matinée commencera avec un passage en revue de l'exercice de la veille
|
||||
|
||||
- On est là pour vous aider si vous bloquez sur un exercice !
|
||||
|
||||
---
|
||||
|
||||
## La visio
|
||||
|
||||
- Si vous avez des questions, besoin d'aide personnalisée...
|
||||
|
||||
- Rejoignez-nous sur la visio, plusieurs après-midi dans la semaine!
|
||||
|
||||
- Mardi: 15h-16h
|
||||
|
||||
- Mercredi: 15h30-16h30
|
||||
|
||||
- Jeudi: 15h-16h
|
||||
|
||||
- Sur Jitsi (lien "visioconf" sur le portail de formation)
|
||||
@@ -87,6 +87,26 @@ def flatten(titles):
|
||||
|
||||
|
||||
def generatefromyaml(manifest, filename):
|
||||
manifest = yaml.safe_load(manifest)
|
||||
|
||||
for k in manifest:
|
||||
override = os.environ.get("OVERRIDE_"+k)
|
||||
if override:
|
||||
manifest[k] = override
|
||||
|
||||
for k in ["chat", "gitrepo", "slides", "title"]:
|
||||
if k not in manifest:
|
||||
manifest[k] = ""
|
||||
|
||||
if "zip" not in manifest:
|
||||
if manifest["slides"].endswith('/'):
|
||||
manifest["zip"] = manifest["slides"] + "slides.zip"
|
||||
else:
|
||||
manifest["zip"] = manifest["slides"] + "/slides.zip"
|
||||
|
||||
if "html" not in manifest:
|
||||
manifest["html"] = filename + ".html"
|
||||
|
||||
markdown, titles = processcontent(manifest["content"], filename)
|
||||
logging.debug("Found {} titles.".format(len(titles)))
|
||||
toc = gentoc(titles)
|
||||
@@ -101,39 +121,39 @@ def generatefromyaml(manifest, filename):
|
||||
exclude = ",".join('"{}"'.format(c) for c in exclude)
|
||||
|
||||
# Insert build info. This is super hackish.
|
||||
|
||||
markdown = markdown.replace(
|
||||
".debug[",
|
||||
".debug[\n```\n{}\n```\n\nThese slides have been built from commit: {}\n\n".format(dirtyfiles, commit),
|
||||
1)
|
||||
|
||||
markdown = markdown.replace("@@TITLE@@", manifest["title"].replace("\n", "<br/>"))
|
||||
|
||||
html = open("workshop.html").read()
|
||||
html = html.replace("@@TITLE@@", manifest["title"].replace("\n", " "))
|
||||
html = html.replace("@@MARKDOWN@@", markdown)
|
||||
html = html.replace("@@EXCLUDE@@", exclude)
|
||||
html = html.replace("@@CHAT@@", manifest["chat"])
|
||||
html = html.replace("@@GITREPO@@", manifest["gitrepo"])
|
||||
html = html.replace("@@SLIDES@@", manifest["slides"])
|
||||
html = html.replace("@@ZIP@@", manifest["zip"])
|
||||
html = html.replace("@@HTML@@", manifest["html"])
|
||||
html = html.replace("@@TITLE@@", manifest["title"].replace("\n", " "))
|
||||
html = html.replace("@@SLIDENUMBERPREFIX@@", manifest.get("slidenumberprefix", ""))
|
||||
return html
|
||||
|
||||
def processAtAtStrings(text):
|
||||
text = text.replace("@@CHAT@@", manifest["chat"])
|
||||
text = text.replace("@@GITREPO@@", manifest["gitrepo"])
|
||||
text = text.replace("@@SLIDES@@", manifest["slides"])
|
||||
text = text.replace("@@ZIP@@", manifest["zip"])
|
||||
text = text.replace("@@HTML@@", manifest["html"])
|
||||
text = text.replace("@@TITLE@@", manifest["title"].replace("\n", "<br/>"))
|
||||
# Process @@LINK[file] and @@INCLUDE[file] directives
|
||||
local_anchor_path = ".."
|
||||
# FIXME use dynamic repo and branch?
|
||||
online_anchor_path = "https://github.com/jpetazzo/container.training/tree/main"
|
||||
for atatlink in re.findall(r"@@LINK\[[^]]*\]", text):
|
||||
online_anchor_path = "https://github.com/jpetazzo/container.training/tree/master"
|
||||
for atatlink in re.findall(r"@@LINK\[[^]]*\]", html):
|
||||
logging.debug("Processing {}".format(atatlink))
|
||||
file_name = atatlink[len("@@LINK["):-1]
|
||||
text = text.replace(atatlink, "[{}]({}/{})".format(file_name, online_anchor_path, file_name ))
|
||||
for atatinclude in re.findall(r"@@INCLUDE\[[^]]*\]", text):
|
||||
html = html.replace(atatlink, "[{}]({}/{})".format(file_name, online_anchor_path, file_name ))
|
||||
for atatinclude in re.findall(r"@@INCLUDE\[[^]]*\]", html):
|
||||
logging.debug("Processing {}".format(atatinclude))
|
||||
file_name = atatinclude[len("@@INCLUDE["):-1]
|
||||
file_path = os.path.join(local_anchor_path, file_name)
|
||||
text = text.replace(atatinclude, open(file_path).read())
|
||||
return text
|
||||
html = html.replace(atatinclude, open(file_path).read())
|
||||
return html
|
||||
|
||||
|
||||
# Maps a title (the string just after "^# ") to its position in the TOC
|
||||
@@ -193,14 +213,7 @@ def processcontent(content, filename):
|
||||
content += "\n" + slidefooter
|
||||
return (content, titles)
|
||||
if os.path.isfile(content):
|
||||
markdown = open(content).read()
|
||||
markdown = processAtAtStrings(markdown)
|
||||
fragmentfile = os.path.join("fragments", content)
|
||||
fragmentdir = os.path.dirname(fragmentfile)
|
||||
os.makedirs(fragmentdir, exist_ok=True)
|
||||
with open(fragmentfile, "w") as f:
|
||||
f.write(markdown)
|
||||
return processcontent(markdown, content)
|
||||
return processcontent(open(content).read(), content)
|
||||
logging.warning("Content spans only one line (it's probably a file name) but no file found: {}".format(content))
|
||||
if isinstance(content, list):
|
||||
subparts = [processcontent(c, filename) for c in content]
|
||||
@@ -258,22 +271,5 @@ else:
|
||||
else:
|
||||
manifest = open(filename)
|
||||
logging.info("Processing {}...".format(filename))
|
||||
|
||||
manifest = yaml.safe_load(manifest)
|
||||
for k in manifest:
|
||||
override = os.environ.get("OVERRIDE_"+k)
|
||||
if override:
|
||||
manifest[k] = override
|
||||
for k in ["chat", "gitrepo", "slides", "title"]:
|
||||
if k not in manifest:
|
||||
manifest[k] = ""
|
||||
if "zip" not in manifest:
|
||||
if manifest["slides"].endswith('/'):
|
||||
manifest["zip"] = manifest["slides"] + "slides.zip"
|
||||
else:
|
||||
manifest["zip"] = manifest["slides"] + "/slides.zip"
|
||||
if "html" not in manifest:
|
||||
manifest["html"] = filename + ".html"
|
||||
|
||||
sys.stdout.write(generatefromyaml(manifest, filename))
|
||||
logging.info("Processed {}.".format(filename))
|
||||
|
||||
@@ -23,9 +23,6 @@ content:
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- k8s/prereqs-advanced.md
|
||||
# Note: if we work on this later, we should refactor it
|
||||
# to follow the same pattern as the other classes
|
||||
# (i.e. use the k8s/labs-*.md files)
|
||||
- k8s/handson-mlops.md
|
||||
- shared/connecting.md
|
||||
- k8s/mlops-headsup.md
|
||||
|
||||
@@ -60,8 +60,6 @@
|
||||
|
||||
## These slides are constantly updated
|
||||
|
||||
- They are maintained by [Jérôme Petazzoni](https://hachyderm.io/@jpetazzo/) and [multiple contributors](https://@@GITREPO@@/graphs/contributors)
|
||||
|
||||
- Feel free to check the GitHub repository for updates:
|
||||
|
||||
https://@@GITREPO@@
|
||||
@@ -91,27 +89,3 @@ class: extra-details
|
||||
- you want only the most essential information
|
||||
|
||||
- You can review these slides another time if you want, they'll be waiting for you ☺
|
||||
|
||||
---
|
||||
|
||||
## Slides ≠ documentation
|
||||
|
||||
- We tried to include a lot of information in these slides
|
||||
|
||||
- But they don't replace or compete with the documentation!
|
||||
|
||||
- Treat these slides as yet another pillar to support your learning experience
|
||||
|
||||
- Don't hesitate to frequently read the documentations
|
||||
|
||||
([Docker documentation][docker-docs], [Kubernetes documentation][k8s-docs])
|
||||
|
||||
- And online communities
|
||||
|
||||
(e.g.: [Docker forums][docker-forums], [Docker on StackOverflow][docker-so], [Kubernetes on StackOverflow][k8s-so])
|
||||
|
||||
[docker-docs]: https://docs.docker.com/
|
||||
[k8s-docs]: https://kubernetes.io/docs/
|
||||
[docker-forums]: https://forums.docker.com/
|
||||
[docker-so]: http://stackoverflow.com/questions/tagged/docker
|
||||
[k8s-so]: http://stackoverflow.com/questions/tagged/kubernetes
|
||||
|
||||
@@ -1,25 +1,6 @@
|
||||
## Connecting to our lab environment
|
||||
class: in-person
|
||||
|
||||
- We need an SSH client
|
||||
|
||||
- On Linux, OS X, FreeBSD... you are probably all set; just use `ssh`
|
||||
|
||||
- On Windows, get one of these:
|
||||
|
||||
- [putty](https://putty.software/)
|
||||
- Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH)
|
||||
- [Git BASH](https://git-for-windows.github.io/)
|
||||
- [MobaXterm](http://mobaxterm.mobatek.net/)
|
||||
|
||||
- On Android, [JuiceSSH](https://juicessh.com/)
|
||||
([Play Store](https://play.google.com/store/apps/details?id=com.sonelli.juicessh))
|
||||
works pretty well
|
||||
|
||||
---
|
||||
|
||||
## Testing the connection
|
||||
|
||||
- Your instructor will tell you where to find the IP address, login, and password
|
||||
## Testing the connection to our lab environment
|
||||
|
||||
.lab[
|
||||
|
||||
@@ -50,69 +31,17 @@ You should see a prompt looking like this:
|
||||
[A.B.C.D] (...) user@machine ~
|
||||
$
|
||||
```
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Checking the lab environment
|
||||
|
||||
In Docker classes, run `docker version`.
|
||||
|
||||
The output should look like this:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
Client:
|
||||
Version: 29.1.1
|
||||
API version: 1.52
|
||||
Go version: go1.25.4 X:nodwarf5
|
||||
Git commit: 0aedba58c2
|
||||
Built: Fri Nov 28 14:28:26 2025
|
||||
OS/Arch: linux/amd64
|
||||
Context: default
|
||||
|
||||
Server:
|
||||
Engine:
|
||||
Version: 29.1.1
|
||||
API version: 1.52 (minimum version 1.44)
|
||||
Go version: go1.25.4 X:nodwarf5
|
||||
Git commit: 9a84135d52
|
||||
Built: Fri Nov 28 14:28:26 2025
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: false
|
||||
...
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking the lab environment
|
||||
|
||||
In Kubernetes classes, run `kubectl get nodes`.
|
||||
|
||||
The output should look like this:
|
||||
|
||||
```bash
|
||||
$ k get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
node1 Ready control-plane 7d6h v1.34.0
|
||||
node2 Ready <none> 7d6h v1.34.0
|
||||
node3 Ready <none> 7d6h v1.34.0
|
||||
node4 Ready <none> 7d6h v1.34.0
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## If it doesn't work...
|
||||
|
||||
Ask an instructor or assistant to help you!
|
||||
|
||||
---
|
||||
class: in-person
|
||||
|
||||
## `tailhist`
|
||||
|
||||
- The shell history of the instructor is available online in real time
|
||||
|
||||
- The instructor will share a special URL with you
|
||||
- The instructor will provide you a "magic URL"
|
||||
|
||||
(typically, the instructor's lab address on port 1088 or 30088)
|
||||
|
||||
@@ -128,6 +57,82 @@ Ask an instructor or assistant to help you!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](https://labs.play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/main/prepare-labs))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
## For a consistent Kubernetes experience ...
|
||||
|
||||
- If you are using your own Kubernetes cluster, you can use [jpetazzo/shpod](https://github.com/jpetazzo/shpod)
|
||||
|
||||
- `shpod` provides a shell running in a pod on your own cluster
|
||||
|
||||
- It comes with many tools pre-installed (helm, stern...)
|
||||
|
||||
- These tools are used in many demos and exercises in these slides
|
||||
|
||||
- `shpod` also gives you completion and a fancy prompt
|
||||
|
||||
- It can also be used as an SSH server if needed
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.lab[
|
||||
|
||||
- Go to https://labs.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open https://labs.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We don't need to connect to ALL the nodes
|
||||
|
||||
- If your cluster has multiple nodes (e.g. `node1`, `node2`, ...):
|
||||
|
||||
unless instructed, **all commands must be run from the first node**
|
||||
|
||||
- We don't need to check out/copy code or manifests on other nodes
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
(but we could log into these nodes to troubleshoot or examine stuff)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
|
||||
@@ -12,33 +12,17 @@ Misattributed to Benjamin Franklin
|
||||
|
||||
---
|
||||
|
||||
## Hands-on, you shall practice
|
||||
## Hands-on sections
|
||||
|
||||
- Nobody ever became a Jedi by spending their lives reading Wookiepedia
|
||||
- There will be *a lot* of examples and demos
|
||||
|
||||
- Someone can:
|
||||
- We are going to build, ship, and run containers (and sometimes, clusters!)
|
||||
|
||||
- read all the docs
|
||||
- If you want, you can run all the examples and demos in your environment
|
||||
|
||||
- watch all the videos
|
||||
(but you don't have to; it's up to you!)
|
||||
|
||||
- attend all the workshops and live classes
|
||||
|
||||
- ...This won't be enough to become an expert!
|
||||
|
||||
- We think one needs to *put the concepts in practice* to truly memorize them
|
||||
|
||||
- But, how?
|
||||
|
||||
---
|
||||
|
||||
## Hands-on labs
|
||||
|
||||
- These slides include *tons* of demos, examples, and exercises
|
||||
|
||||
- Don't follow along passively; try to reproduce the demos and examples!
|
||||
|
||||
- Each time you see a gray rectangle like this, it indicates a demo or example
|
||||
- All hands-on sections are clearly identified, like the gray rectangle below
|
||||
|
||||
.lab[
|
||||
|
||||
@@ -49,6 +33,134 @@ Misattributed to Benjamin Franklin
|
||||
|
||||
]
|
||||
|
||||
- Don't hesitate to try them in your environment
|
||||
---
|
||||
|
||||
- Don't hesitate to improvise, deviate from the script... And see what happens!
|
||||
class: in-person
|
||||
|
||||
## Where are we going to run our containers?
|
||||
|
||||
---
|
||||
|
||||
class: in-person, pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## If you're attending a live training or workshop
|
||||
|
||||
- Each person gets a private lab environment
|
||||
|
||||
(depending on the scenario, this will be one VM, one cluster, multiple clusters...)
|
||||
|
||||
- The instructor will tell you how to connect to your environment
|
||||
|
||||
- Your lab environments will be available for the duration of the workshop
|
||||
|
||||
(check with your instructor to know exactly when they'll be shutdown)
|
||||
|
||||
---
|
||||
|
||||
## Running your own lab environments
|
||||
|
||||
- If you are following a self-paced course...
|
||||
|
||||
- Or watching a replay of a recorded course...
|
||||
|
||||
- ...You will need to set up a local environment for the labs
|
||||
|
||||
- If you want to deliver your own training or workshop:
|
||||
|
||||
- deployment scripts are available in the [prepare-labs] directory
|
||||
|
||||
- you can use them to automatically deploy many lab environments
|
||||
|
||||
- they support many different infrastructure providers
|
||||
|
||||
[prepare-labs]: https://github.com/jpetazzo/container.training/tree/main/prepare-labs
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Why don't we run containers locally?
|
||||
|
||||
- Installing this stuff can be hard on some machines
|
||||
|
||||
(32 bits CPU or OS... Laptops without administrator access... etc.)
|
||||
|
||||
- *"The whole team downloaded all these container images from the WiFi!
|
||||
<br/>... and it went great!"* (Literally no-one ever)
|
||||
|
||||
- All you need is a computer (or even a phone or tablet!), with:
|
||||
|
||||
- an Internet connection
|
||||
|
||||
- a web browser
|
||||
|
||||
- an SSH client
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## SSH clients
|
||||
|
||||
- On Linux, OS X, FreeBSD... you are probably all set
|
||||
|
||||
- On Windows, get one of these:
|
||||
|
||||
- [putty](https://putty.software/)
|
||||
- Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH)
|
||||
- [Git BASH](https://git-for-windows.github.io/)
|
||||
- [MobaXterm](http://mobaxterm.mobatek.net/)
|
||||
|
||||
- On Android, [JuiceSSH](https://juicessh.com/)
|
||||
([Play Store](https://play.google.com/store/apps/details?id=com.sonelli.juicessh))
|
||||
works pretty well
|
||||
|
||||
- Nice-to-have: [Mosh](https://mosh.org/) instead of SSH, if your Internet connection tends to lose packets
|
||||
|
||||
---
|
||||
|
||||
class: in-person, extra-details
|
||||
|
||||
## What is this Mosh thing?
|
||||
|
||||
*You don't have to use Mosh or even know about it to follow along.
|
||||
<br/>
|
||||
We're just telling you about it because some of us think it's cool!*
|
||||
|
||||
- Mosh is "the mobile shell"
|
||||
|
||||
- It is essentially SSH over UDP, with roaming features
|
||||
|
||||
- It retransmits packets quickly, so it works great even on lossy connections
|
||||
|
||||
(Like hotel or conference WiFi)
|
||||
|
||||
- It has intelligent local echo, so it works great even in high-latency connections
|
||||
|
||||
(Like hotel or conference WiFi)
|
||||
|
||||
- It supports transparent roaming when your client IP address changes
|
||||
|
||||
(Like when you hop from hotel to conference WiFi)
|
||||
|
||||
---
|
||||
|
||||
class: in-person, extra-details
|
||||
|
||||
## Using Mosh
|
||||
|
||||
- To install it: `(apt|yum|brew) install mosh`
|
||||
|
||||
- It has been pre-installed on the VMs that we are using
|
||||
|
||||
- To connect to a remote machine: `mosh user@host`
|
||||
|
||||
(It is going to establish an SSH connection, then hand off to UDP)
|
||||
|
||||
- It requires UDP ports to be open
|
||||
|
||||
(By default, it uses a UDP port between 60000 and 61000)
|
||||
|
||||
@@ -8,10 +8,12 @@
|
||||
|
||||
- a little bit of bash-fu (environment variables, loops)
|
||||
|
||||
- Hands-on experience working with containers
|
||||
- Some Docker knowledge
|
||||
|
||||
- building images, running them; doesn't matter how exactly
|
||||
- `docker run`, `docker ps`, `docker build`
|
||||
|
||||
- if you know `docker run`, `docker build`, `docker ps`, you're good to go!
|
||||
- ideally, you know how to write a Dockerfile and build it
|
||||
<br/>
|
||||
(even if it's a `FROM` line and a couple of `RUN` commands)
|
||||
|
||||
- It's totally OK if you are not a Docker expert!
|
||||
@@ -16,7 +16,7 @@ Merci !
|
||||
|
||||
(mais pas les clusters cloud ; eux on les éteint très vite)
|
||||
|
||||
- N'oubliez pas de remplir les formulaires d'évaluation
|
||||
- N'oubliez pas de remplier les formulaires d'évaluation
|
||||
|
||||
(c'est pas pour nous, c'est une obligation légale😅)
|
||||
|
||||
|
||||
72
slides/swarm-fullday.yml
Normal file
72
slides/swarm-fullday.yml
Normal file
@@ -0,0 +1,72 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-auto
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/handson.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/gui.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
71
slides/swarm-halfday.yml
Normal file
71
slides/swarm-halfday.yml
Normal file
@@ -0,0 +1,71 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-manual
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/handson.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
#- swarm/hostingregistry.md
|
||||
#- swarm/testingregistry.md
|
||||
#- swarm/btp-manual.md
|
||||
#- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/ipsec.md
|
||||
#- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
#- swarm/secrets.md
|
||||
#- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
#- swarm/stateful.md
|
||||
#- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -24,8 +24,9 @@ content:
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/handson.md
|
||||
#- shared/connecting.md
|
||||
- - shared/prereqs.md
|
||||
- shared/handson.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
75
slides/swarm-video.yml
Normal file
75
slides/swarm-video.yml
Normal file
@@ -0,0 +1,75 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/handson.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
#- swarm/logging.md
|
||||
#- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,60 +1,43 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Training Materials</title>
|
||||
<title>@@TITLE@@</title>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
|
||||
<link rel="stylesheet" href="workshop.css">
|
||||
</head>
|
||||
<body>
|
||||
<!--
|
||||
<div style="position: absolute; left: 20%; right: 20%; top: 30%;">
|
||||
<h1 style="font-size: 3em;">Loading ...</h1>
|
||||
The slides should show up here. If they don't, it might be
|
||||
because you are accessing this file directly from your filesystem.
|
||||
It needs to be served from a web server. You can try this:
|
||||
<pre>
|
||||
docker-compose up -d
|
||||
open http://localhost:8888/workshop.html # on MacOS
|
||||
xdg-open http://localhost:8888/workshop.html # on Linux
|
||||
</pre>
|
||||
Once the slides are loaded, this notice disappears when you
|
||||
go full screen (e.g. by hitting "f").
|
||||
</div>
|
||||
-->
|
||||
<textarea id="source">@@MARKDOWN@@</textarea>
|
||||
<script src="remark.min.js" type="text/javascript">
|
||||
</script>
|
||||
<div id="loading">⏳️ Loading...</div>
|
||||
<textarea id="source" style="display: none;">@@MARKDOWN@@</textarea>
|
||||
<script type="module">
|
||||
if (window.location.search[0] === '?') {
|
||||
const contentName = window.location.search.substr(1);
|
||||
let contentText = "⚠️ Failed to load content!"
|
||||
for (const url of [
|
||||
`fragments/${contentName}.md`,
|
||||
`${contentName}.md`
|
||||
]) {
|
||||
const contentResponse = await fetch(url);
|
||||
if (contentResponse.ok) {
|
||||
contentText = await contentResponse.text();
|
||||
break;
|
||||
}
|
||||
}
|
||||
document.querySelector('#source').textContent = contentText;
|
||||
}
|
||||
/*
|
||||
This tmpl() function helps us to use the same HTML file for entire
|
||||
decks (used for live classes and pre-processed by markmaker.py to
|
||||
replace @@-strings) and to display individual chapters (used for
|
||||
video recording and not pre-processed by markmarker.py, so we still
|
||||
have @@-strings in this HTML file in that case).
|
||||
*/
|
||||
function tmpl(atString, templateValue, defaultValue) {
|
||||
if (atString[0] === "@") {
|
||||
return defaultValue;
|
||||
}
|
||||
return JSON.parse(templateValue);
|
||||
}
|
||||
var tmplEXCLUDE = tmpl('@@EXCLUDE@@', '[@@EXCLUDE@@]', []);
|
||||
var tmplTITLE = tmpl('@@TITLE@@', '"@@TITLE@@"', document.title);
|
||||
var tmplSLIDENUMBERPREFIX = tmpl('@@SLIDENUMBERPREFIX@@', '"@@SLIDENUMBERPREFIX@@"', "");
|
||||
document.title = tmplTITLE;
|
||||
<script type="text/javascript">
|
||||
var slideshow = remark.create({
|
||||
ratio: '16:9',
|
||||
highlightSpans: true,
|
||||
slideNumberFormat: `${tmplSLIDENUMBERPREFIX}%current%/%total%`,
|
||||
excludedClasses: tmplEXCLUDE
|
||||
slideNumberFormat: '@@SLIDENUMBERPREFIX@@%current%/%total%',
|
||||
excludedClasses: [@@EXCLUDE@@]
|
||||
});
|
||||
document.querySelector('#loading').style.display = 'none';
|
||||
</script>
|
||||
<script type="module">
|
||||
import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@11/dist/mermaid.esm.min.mjs';
|
||||
mermaid.initialize({ startOnLoad: false });
|
||||
slideshow.on('afterShowSlide', function (slide) {
|
||||
mermaid.run({
|
||||
nodes: document.querySelectorAll('div.remark-visible.mermaid'),
|
||||
nodes: document.querySelectorAll('div.remark-visible .mermaid'),
|
||||
});
|
||||
});
|
||||
// Reminder, if you want to tinker with mermaid,
|
||||
|
||||
Reference in New Issue
Block a user