Compare commits

..

2 Commits

Author SHA1 Message Date
Jérôme Petazzoni
af86f361aa 💥 HighFive 2022Q1 content update 2022-03-21 08:22:11 +01:00
Jérôme Petazzoni
025b281a2d ♻️ Switch diagram around 2022-03-21 08:21:52 +01:00
113 changed files with 3201 additions and 4230 deletions

8
.gitignore vendored
View File

@@ -6,7 +6,13 @@ prepare-vms/tags
prepare-vms/infra
prepare-vms/www
prepare-tf/tag-*
prepare-tf/.terraform*
prepare-tf/terraform.*
prepare-tf/stage2/*.tf
prepare-tf/stage2/kubeconfig.*
prepare-tf/stage2/.terraform*
prepare-tf/stage2/terraform.*
prepare-tf/stage2/externalips.*
slides/*.yml.html
slides/autopilot/state.yaml

View File

@@ -1,3 +1,2 @@
hostname frr
ip nht resolve-via-default
log stdout

View File

@@ -2,36 +2,30 @@ version: "3"
services:
bgpd:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel --no_zebra
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
restart: always
zebra:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
restart: always
vtysh:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: vtysh
entrypoint: vtysh -c "show ip bgp"
chmod:
image: alpine

View File

@@ -48,25 +48,20 @@ k8s_yaml('../k8s/dockercoins.yaml')
# The following line lets Tilt run with the default kubeadm cluster-admin context.
allow_k8s_contexts('kubernetes-admin@kubernetes')
# Note: the whole section below (to set up ngrok tunnels) is disabled,
# because ngrok now requires to set up an account to serve HTML
# content. So we can still use ngrok for e.g. webhooks and "raw" APIs,
# but not to serve web pages like the Tilt UI.
# This will run an ngrok tunnel to expose Tilt to the outside world.
# This is intended to be used when Tilt runs on a remote machine.
local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
# # This will run an ngrok tunnel to expose Tilt to the outside world.
# # This is intended to be used when Tilt runs on a remote machine.
# local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
# # This will wait until the ngrok tunnel is up, and show its URL to the user.
# # We send the output to /dev/tty so that it doesn't get intercepted by
# # Tilt, and gets displayed to the user's terminal instead.
# # Note: this assumes that the ngrok instance will be running on port 4040.
# # If you have other ngrok instances running on the machine, this might not work.
# local_resource(name='ngrok:showurl', cmd='''
# while sleep 1; do
# TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
# [ "$TUNNELS" ] && break
# done
# printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
# '''
# )
# This will wait until the ngrok tunnel is up, and show its URL to the user.
# We send the output to /dev/tty so that it doesn't get intercepted by
# Tilt, and gets displayed to the user's terminal instead.
# Note: this assumes that the ngrok instance will be running on port 4040.
# If you have other ngrok instances running on the machine, this might not work.
local_resource(name='ngrok:showurl', cmd='''
while sleep 1; do
TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
[ "$TUNNELS" ] && break
done
printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
'''
)

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,8 +253,8 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
@@ -262,7 +262,7 @@ spec:
- --sidecar-host=http://127.0.0.1:8000
- --enable-skip-login
- --enable-insecure-login
image: kubernetesui/dashboard:v2.6.1
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -293,7 +293,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,15 +253,15 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.6.1
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -292,7 +292,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,15 +253,15 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.6.1
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -292,7 +292,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@@ -1,14 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
version: v1alpha1
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz

View File

@@ -1,20 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object

View File

@@ -1,32 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
required: [ sauce, toppings ]
properties:
sauce:
type: string
toppings:
type: array
items:
type: string

View File

@@ -1,39 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
required: [ sauce, toppings ]
properties:
sauce:
type: string
toppings:
type: array
items:
type: string
additionalPrinterColumns:
- jsonPath: .spec.sauce
name: Sauce
type: string
- jsonPath: .spec.toppings
name: Toppings
type: string

View File

@@ -1,40 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pizzas.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: pizzas
singular: pizza
kind: Pizza
shortNames:
- piz
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
required: [ sauce, toppings ]
properties:
sauce:
type: string
enum: [ red, white ]
toppings:
type: array
items:
type: string
additionalPrinterColumns:
- jsonPath: .spec.sauce
name: Sauce
type: string
- jsonPath: .spec.toppings
name: Toppings
type: string

View File

@@ -1,45 +0,0 @@
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: margherita
spec:
sauce: red
toppings:
- mozarella
- basil
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: quatrostagioni
spec:
sauce: red
toppings:
- artichoke
- basil
- mushrooms
- prosciutto
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: mehl31
spec:
sauce: white
toppings:
- goatcheese
- pear
- walnuts
- mozzarella
- rosemary
- honey
---
apiVersion: container.training/v1alpha1
kind: Pizza
metadata:
name: brownie
spec:
sauce: chocolate
toppings:
- nuts

View File

@@ -1,164 +0,0 @@
#! Define and use variables.
---
#@ repository = "dockercoins"
#@ tag = "v0.1"
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ "{}/hasher:{}".format(repository, tag)
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ "{}/rng:{}".format(repository, tag)
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ "{}/webui:{}".format(repository, tag)
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ "{}/worker:{}".format(repository, tag)
name: worker

View File

@@ -1,167 +0,0 @@
#! Define and use a function to set the deployment image.
---
#@ repository = "dockercoins"
#@ tag = "v0.1"
#@ def image(component):
#@ return "{}/{}:{}".format(repository, component, tag)
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ image("hasher")
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ image("rng")
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ image("webui")
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ image("worker")
name: worker

View File

@@ -1,164 +0,0 @@
#! Define and use functions, demonstrating how to generate labels.
---
#@ repository = "dockercoins"
#@ tag = "v0.1"
#@ def image(component):
#@ return "{}/{}:{}".format(repository, component, tag)
#@ end
#@ def labels(component):
#@ return {
#@ "app": component,
#@ "container.training/generated-by": "ytt",
#@ }
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ image("hasher")
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("redis")
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("redis")
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("rng")
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ image("rng")
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("rng")
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("webui")
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ image("webui")
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("webui")
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("worker")
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ image("worker")
name: worker

View File

@@ -1,162 +0,0 @@
---
#@ load("@ytt:data", "data")
#@ def image(component):
#@ return "{}/{}:{}".format(data.values.repository, component, data.values.tag)
#@ end
#@ def labels(component):
#@ return {
#@ "app": component,
#@ "container.training/generated-by": "ytt",
#@ }
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: #@ image("hasher")
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("hasher")
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("redis")
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("redis")
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("rng")
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: #@ image("rng")
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("rng")
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("webui")
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: #@ image("webui")
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels: #@ labels("webui")
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: #@ labels("worker")
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: #@ image("worker")
name: worker

View File

@@ -1,4 +0,0 @@
#@data/values-schema
---
repository: dockercoins
tag: v0.1

View File

@@ -1,54 +0,0 @@
---
#@ load("@ytt:data", "data")
---
#@ def Deployment(component, repository=data.values.repository, tag=data.values.tag):
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ component
container.training/generated-by: ytt
name: #@ component
spec:
replicas: 1
selector:
matchLabels:
app: #@ component
template:
metadata:
labels:
app: #@ component
spec:
containers:
- image: #@ repository + "/" + component + ":" + tag
name: #@ component
#@ end
---
#@ def Service(component, port=80, type="ClusterIP"):
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ component
container.training/generated-by: ytt
name: #@ component
spec:
ports:
- port: #@ port
protocol: TCP
targetPort: #@ port
selector:
app: #@ component
type: #@ type
#@ end
---
--- #@ Deployment("hasher")
--- #@ Service("hasher")
--- #@ Deployment("redis", repository="library", tag="latest")
--- #@ Service("redis", port=6379)
--- #@ Deployment("rng")
--- #@ Service("rng")
--- #@ Deployment("webui")
--- #@ Service("webui", type="NodePort")
--- #@ Deployment("worker")
---

View File

@@ -1,4 +0,0 @@
#@data/values-schema
---
repository: dockercoins
tag: v0.1

View File

@@ -1,56 +0,0 @@
---
#@ load("@ytt:data", "data")
#@ load("@ytt:template", "template")
---
#@ def component(name, repository=data.values.repository, tag=data.values.tag, port=None, type="ClusterIP"):
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
replicas: 1
selector:
matchLabels:
app: #@ name
template:
metadata:
labels:
app: #@ name
spec:
containers:
- image: #@ repository + "/" + name + ":" + tag
name: #@ name
#@ if/end port==80:
readinessProbe:
httpGet:
port: #@ port
#@ if port != None:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
ports:
- port: #@ port
protocol: TCP
targetPort: #@ port
selector:
app: #@ name
type: #@ type
#@ end
#@ end
---
--- #@ template.replace(component("hasher", port=80))
--- #@ template.replace(component("redis", repository="library", tag="latest", port=6379))
--- #@ template.replace(component("rng", port=80))
--- #@ template.replace(component("webui", port=80, type="NodePort"))
--- #@ template.replace(component("worker"))
---

View File

@@ -1,4 +0,0 @@
#@data/values-schema
---
repository: dockercoins
tag: v0.1

View File

@@ -1,65 +0,0 @@
---
#@ load("@ytt:data", "data")
#@ load("@ytt:template", "template")
---
#@ def component(name, repository, tag, port=None, type="ClusterIP"):
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
replicas: 1
selector:
matchLabels:
app: #@ name
template:
metadata:
labels:
app: #@ name
spec:
containers:
- image: #@ repository + "/" + name + ":" + tag
name: #@ name
#@ if/end port==80:
readinessProbe:
httpGet:
port: #@ port
#@ if port != None:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ name
container.training/generated-by: ytt
name: #@ name
spec:
ports:
- port: #@ port
protocol: TCP
targetPort: #@ port
selector:
app: #@ name
type: #@ type
#@ end
#@ end
---
#@ defaults = {}
#@ for name in data.values:
#@ if name.startswith("_"):
#@ defaults.update(data.values[name])
#@ end
#@ end
---
#@ for name in data.values:
#@ if not name.startswith("_"):
#@ values = dict(name=name)
#@ values.update(defaults)
#@ values.update(data.values[name])
--- #@ template.replace(component(**values))
#@ end
#@ end

View File

@@ -1,19 +0,0 @@
#@data/values-schema
#! Entries starting with an underscore will hold default values.
#! Entires NOT starting with an underscore will generate a Deployment
#! (and a Service if a port number is set).
---
_default_:
repository: dockercoins
tag: v0.1
hasher:
port: 80
redis:
repository: library
tag: latest
rng:
port: 80
webui:
port: 80
type: NodePort
worker: {}

View File

@@ -1,26 +0,0 @@
#@ load("@ytt:data", "data")
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
replicas: 1
selector:
matchLabels:
app: #@ data.values.name
template:
metadata:
labels:
app: #@ data.values.name
spec:
containers:
- image: #@ data.values.repository + "/" + data.values.name + ":" + data.values.tag
name: #@ data.values.name
#@ if/end data.values.port==80:
readinessProbe:
httpGet:
port: #@ data.values.port

View File

@@ -1,7 +0,0 @@
#@data/values-schema
---
name: component
repository: dockercoins
tag: v0.1
port: 0
type: ClusterIP

View File

@@ -1,19 +0,0 @@
#@ load("@ytt:data", "data")
#@ if data.values.port > 0:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
ports:
- port: #@ data.values.port
protocol: TCP
targetPort: #@ data.values.port
selector:
app: #@ data.values.name
type: #@ data.values.type
#@ end

View File

@@ -1,20 +0,0 @@
#@ load("@ytt:data", "data")
#@ load("@ytt:library", "library")
#@ load("@ytt:template", "template")
#@
#@ component = library.get("component")
#@
#@ defaults = {}
#@ for name in data.values:
#@ if name.startswith("_"):
#@ defaults.update(data.values[name])
#@ end
#@ end
#@ for name in data.values:
#@ if not name.startswith("_"):
#@ values = dict(name=name)
#@ values.update(defaults)
#@ values.update(data.values[name])
--- #@ template.replace(component.with_data_values(values).eval())
#@ end
#@ end

View File

@@ -1,19 +0,0 @@
#@data/values-schema
#! Entries starting with an underscore will hold default values.
#! Entires NOT starting with an underscore will generate a Deployment
#! (and a Service if a port number is set).
---
_default_:
repository: dockercoins
tag: v0.1
hasher:
port: 80
redis:
repository: library
tag: latest
rng:
port: 80
webui:
port: 80
type: NodePort
worker: {}

View File

@@ -1,26 +0,0 @@
#@ load("@ytt:data", "data")
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
replicas: 1
selector:
matchLabels:
app: #@ data.values.name
template:
metadata:
labels:
app: #@ data.values.name
spec:
containers:
- image: #@ data.values.repository + "/" + data.values.name + ":" + data.values.tag
name: #@ data.values.name
#@ if/end data.values.port==80:
readinessProbe:
httpGet:
port: #@ data.values.port

View File

@@ -1,7 +0,0 @@
#@data/values-schema
---
name: component
repository: dockercoins
tag: v0.1
port: 0
type: ClusterIP

View File

@@ -1,19 +0,0 @@
#@ load("@ytt:data", "data")
#@ if data.values.port > 0:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: #@ data.values.name
container.training/generated-by: ytt
name: #@ data.values.name
spec:
ports:
- port: #@ data.values.port
protocol: TCP
targetPort: #@ data.values.port
selector:
app: #@ data.values.name
type: #@ data.values.type
#@ end

View File

@@ -1,20 +0,0 @@
#@ load("@ytt:data", "data")
#@ load("@ytt:library", "library")
#@ load("@ytt:template", "template")
#@
#@ component = library.get("component")
#@
#@ defaults = {}
#@ for name in data.values:
#@ if name.startswith("_"):
#@ defaults.update(data.values[name])
#@ end
#@ end
#@ for name in data.values:
#@ if not name.startswith("_"):
#@ values = dict(name=name)
#@ values.update(defaults)
#@ values.update(data.values[name])
--- #@ template.replace(component.with_data_values(values).eval())
#@ end
#@ end

View File

@@ -1,20 +0,0 @@
#@ load("@ytt:overlay", "overlay")
#@ def match():
kind: Deployment
metadata:
name: rng
#@ end
#@overlay/match by=overlay.subset(match())
---
spec:
template:
spec:
containers:
#@overlay/match by="name"
- name: rng
readinessProbe:
httpGet:
#@overlay/match missing_ok=True
path: /1

View File

@@ -1,19 +0,0 @@
#@data/values-schema
#! Entries starting with an underscore will hold default values.
#! Entires NOT starting with an underscore will generate a Deployment
#! (and a Service if a port number is set).
---
_default_:
repository: dockercoins
tag: v0.1
hasher:
port: 80
redis:
repository: library
tag: latest
rng:
port: 80
webui:
port: 80
type: NodePort
worker: {}

View File

@@ -1,25 +0,0 @@
#@ load("@ytt:overlay", "overlay")
#@ def match():
kind: Deployment
metadata:
name: worker
#@ end
#! This removes the number of replicas:
#@overlay/match by=overlay.subset(match())
---
spec:
#@overlay/remove
replicas:
#! This overrides it:
#@overlay/match by=overlay.subset(match())
---
spec:
#@overlay/match missing_ok=True
replicas: 10
#! Note that it's not necessary to remove the number of replicas.
#! We're just presenting both options here (for instance, you might
#! want to remove the number of replicas if you're using an HPA).

View File

@@ -2,3 +2,4 @@
base = "slides"
publish = "slides"
command = "./build.sh once"

View File

@@ -1,10 +1,11 @@
---
- hosts: nodes
become: yes
sudo: true
vars_files:
- vagrant.yml
tasks:
- name: clean up the home folder
file:
path: /home/vagrant/{{ item }}
@@ -23,23 +24,25 @@
- name: installing dependencies
apt:
name: apt-transport-https,ca-certificates,python3-pip,tmux
name: apt-transport-https,ca-certificates,python-pip,tmux
state: present
update_cache: true
- name: fetching docker repo key
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
keyserver: hkp://p80.pool.sks-keyservers.net:80
id: 58118E89F3A912897C070ADBF76221572C52609D
- name: adding docker repo
- name: adding package repos
apt_repository:
repo: deb https://download.docker.com/linux/ubuntu focal stable
repo: "{{ item }}"
state: present
with_items:
- deb https://apt.dockerproject.org/repo ubuntu-trusty main
- name: installing docker
apt:
name: docker-ce,docker-ce-cli,containerd.io,docker-compose-plugin
name: docker-engine
state: present
update_cache: true
@@ -53,7 +56,7 @@
lineinfile:
dest: /etc/default/docker
line: DOCKER_OPTS="--host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:55555"
regexp: "^#?DOCKER_OPTS=.*$"
regexp: '^#?DOCKER_OPTS=.*$'
state: present
register: docker_opts
@@ -63,14 +66,22 @@
state: restarted
when: docker_opts is defined and docker_opts.changed
- name: install docker-compose from official github repo
get_url:
url: https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64
dest: /usr/local/bin/docker-compose
mode: "u+x,g+x"
- name: performing pip autoupgrade
pip:
name: pip
state: latest
- name: installing virtualenv
pip:
name: virtualenv
state: latest
- name: Install Docker Compose via PIP
pip: name=docker-compose
- name:
file: path="/usr/local/bin/docker-compose"
file:
path="/usr/local/bin/docker-compose"
state=file
mode=0755
owner=vagrant
@@ -117,3 +128,5 @@
line: "127.0.0.1 localhost {{ inventory_hostname }}"
- regexp: '^127\.0\.1\.1'
line: "127.0.1.1 {{ inventory_hostname }}"

View File

@@ -1,12 +1,13 @@
---
vagrant:
default_box: ubuntu/focal64
default_box: ubuntu/trusty64
default_box_check_update: true
ssh_insert_key: false
min_memory: 256
min_cores: 1
instances:
- hostname: node1
private_ip: 10.10.10.10
memory: 1512
@@ -36,3 +37,6 @@ instances:
private_ip: 10.10.10.50
memory: 512
cores: 1

View File

@@ -1,6 +1,6 @@
resource "random_string" "_" {
length = 4
numeric = false
number = false
special = false
upper = false
}

View File

@@ -53,5 +53,5 @@ variable "location" {
# doctl kubernetes options versions -o json | jq -r .[].slug
variable "k8s_version" {
type = string
default = "1.22.8-do.1"
default = "1.21.5-do.0"
}

View File

@@ -3,7 +3,7 @@ resource "linode_lke_cluster" "_" {
tags = var.common_tags
# "region" is mandatory, so let's provide a default value if none was given.
region = var.location != null ? var.location : "eu-central"
k8s_version = local.k8s_version
k8s_version = var.k8s_version
pool {
type = local.node_type

View File

@@ -51,22 +51,7 @@ variable "location" {
# To view supported versions, run:
# linode-cli lke versions-list --json | jq -r .[].id
data "external" "k8s_version" {
program = [
"sh",
"-c",
<<-EOT
linode-cli lke versions-list --json |
jq -r '{"latest": [.[].id] | sort [-1]}'
EOT
]
}
variable "k8s_version" {
type = string
default = ""
}
locals {
k8s_version = var.k8s_version != "" ? var.k8s_version : data.external.k8s_version.result.latest
default = "1.21"
}

View File

@@ -56,5 +56,5 @@ variable "location" {
# scw k8s version list -o json | jq -r .[].name
variable "k8s_version" {
type = string
default = "1.23.6"
default = "1.22.2"
}

View File

@@ -145,15 +145,23 @@ resource "helm_release" "metrics_server_${index}" {
# but only if it's not already installed.
count = yamldecode(file("./flags.${index}"))["has_metrics_server"] ? 0 : 1
provider = helm.cluster_${index}
repository = "https://kubernetes-sigs.github.io/metrics-server/"
repository = "https://charts.bitnami.com/bitnami"
chart = "metrics-server"
version = "3.8.2"
version = "5.8.8"
name = "metrics-server"
namespace = "metrics-server"
create_namespace = true
set {
name = "args"
value = "{--kubelet-insecure-tls}"
name = "apiService.create"
value = "true"
}
set {
name = "extraArgs.kubelet-insecure-tls"
value = "true"
}
set {
name = "extraArgs.kubelet-preferred-address-types"
value = "InternalIP"
}
}
@@ -193,6 +201,7 @@ resource "tls_private_key" "cluster_admin_${index}" {
}
resource "tls_cert_request" "cluster_admin_${index}" {
key_algorithm = tls_private_key.cluster_admin_${index}.algorithm
private_key_pem = tls_private_key.cluster_admin_${index}.private_key_pem
subject {
common_name = "cluster-admin"

View File

@@ -17,7 +17,6 @@ These tools can help you to create VMs on:
- [Parallel SSH](https://github.com/lilydjwg/pssh)
(should be installable with `pip install git+https://github.com/lilydjwg/pssh`;
on a Mac, try `brew install pssh`)
- [yq](https://github.com/kislyuk/yq)
Depending on the infrastructure that you want to use, you also need to install
the CLI that is specific to that cloud. For OpenStack deployments, you will

View File

@@ -182,23 +182,9 @@ _cmd_clusterize() {
pssh "
if [ -f /etc/iptables/rules.v4 ]; then
sudo sed -i 's/-A INPUT -j REJECT --reject-with icmp-host-prohibited//' /etc/iptables/rules.v4
sudo netfilter-persistent flush
sudo netfilter-persistent start
fi"
# oracle-cloud-agent upgrades pacakges in the background.
# This breaks our deployment scripts, because when we invoke apt-get, it complains
# that the lock already exists (symptom: random "Exited with error code 100").
# Workaround: if we detect oracle-cloud-agent, remove it.
# But this agent seems to also take care of installing/upgrading
# the unified-monitoring-agent package, so when we stop the snap,
# it can leave dpkg in a broken state. We "fix" it with the 2nd command.
pssh "
if [ -d /snap/oracle-cloud-agent ]; then
sudo snap remove oracle-cloud-agent
sudo dpkg --remove --force-remove-reinstreq unified-monitoring-agent
fi"
# Copy settings and install Python YAML parser
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
pssh "
@@ -253,14 +239,6 @@ _cmd_docker() {
sudo ln -sfn /mnt/docker /var/lib/docker
fi
# containerd 1.6 breaks Weave.
# See https://github.com/containerd/containerd/issues/6921
sudo tee /etc/apt/preferences.d/containerd <<EOF
Package: containerd.io
Pin: version 1.5.*
Pin-Priority: 1000
EOF
# This will install the latest Docker.
sudo apt-get -qy install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
@@ -276,14 +254,13 @@ EOF
"
##VERSION## https://github.com/docker/compose/releases
COMPOSE_VERSION=v2.11.1
COMPOSE_PLATFORM='linux-$(uname -m)'
# Just in case you need Compose 1.X, you can use the following lines.
# (But it will probably only work for x86_64 machines.)
#COMPOSE_VERSION=1.29.2
#COMPOSE_PLATFORM='Linux-$(uname -m)'
if [ "$ARCHITECTURE" ]; then
COMPOSE_VERSION=v2.2.3
COMPOSE_PLATFORM='linux-$(uname -m)'
else
COMPOSE_VERSION=1.29.2
COMPOSE_PLATFORM='Linux-$(uname -m)'
fi
pssh "
set -e
### Install docker-compose.
@@ -450,9 +427,6 @@ EOF
pssh "
if i_am_first_node; then
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
#helm upgrade --install metrics-server \
# --repo https://kubernetes-sigs.github.io/metrics-server/ metrics-server \
# --namespace kube-system --set args={--kubelet-insecure-tls}
fi"
}
@@ -493,13 +467,12 @@ _cmd_kubetools() {
# Install kube-ps1
pssh "
set -e
if ! [ -d /opt/kube-ps1 ]; then
if ! [ -f /etc/profile.d/kube-ps1.sh ]; then
cd /tmp
git clone https://github.com/jonmosco/kube-ps1
sudo mv kube-ps1 /opt/kube-ps1
sudo cp kube-ps1/kube-ps1.sh /etc/profile.d/kube-ps1.sh
sudo -u $USER_LOGIN sed -i s/docker-prompt/kube_ps1/ /home/$USER_LOGIN/.bashrc &&
sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc <<EOF
. /opt/kube-ps1/kube-ps1.sh
KUBE_PS1_PREFIX=""
KUBE_PS1_SUFFIX=""
KUBE_PS1_SYMBOL_ENABLE="false"
@@ -622,16 +595,16 @@ EOF
fi"
##VERSION## https://github.com/bitnami-labs/sealed-secrets/releases
KUBESEAL_VERSION=0.17.4
#case $ARCH in
#amd64) FILENAME=kubeseal-linux-amd64;;
#arm64) FILENAME=kubeseal-arm64;;
#*) FILENAME=nope;;
#esac
pssh "
KUBESEAL_VERSION=v0.16.0
case $ARCH in
amd64) FILENAME=kubeseal-linux-amd64;;
arm64) FILENAME=kubeseal-arm64;;
*) FILENAME=nope;;
esac
[ "$FILENAME" = "nope" ] || pssh "
if [ ! -x /usr/local/bin/kubeseal ]; then
curl -fsSL https://github.com/bitnami-labs/sealed-secrets/releases/download/v$KUBESEAL_VERSION/kubeseal-$KUBESEAL_VERSION-linux-$ARCH.tar.gz |
sudo tar -zxvf- -C /usr/local/bin kubeseal
curl -fsSLo kubeseal https://github.com/bitnami-labs/sealed-secrets/releases/download/$KUBESEAL_VERSION/$FILENAME &&
sudo install kubeseal /usr/local/bin
kubeseal --version
fi"
}

View File

@@ -26,24 +26,12 @@ infra_start() {
info " Name: $NAME"
info " Instance type: $LINODE_TYPE"
ROOT_PASS="$(base64 /dev/urandom | cut -c1-20 | head -n 1)"
MAX_TRY=5
TRY=1
WAIT=1
while ! linode-cli linodes create \
linode-cli linodes create \
--type=${LINODE_TYPE} --region=${LINODE_REGION} \
--image=linode/ubuntu18.04 \
--authorized_keys="${LINODE_SSHKEY}" \
--root_pass="${ROOT_PASS}" \
--tags=${TAG} --label=${NAME}; do
warning "Failed to create VM (attempt $TRY/$MAX_TRY)."
if [ $TRY -ge $MAX_TRY ]; then
die "Giving up."
fi
info "Waiting $WAIT seconds and retrying."
sleep $WAIT
TRY=$(($TRY+1))
WAIT=$(($WAIT*2))
done
--tags=${TAG} --label=${NAME}
done
sep

View File

@@ -16,7 +16,7 @@ user_password: training
# For a list of old versions, check:
# https://kubernetes.io/releases/patch-releases/#non-active-branch-history
kubernetes_version: 1.20.15
kubernetes_version: 1.18.20
image:

68
slides/1.yml Normal file
View File

@@ -0,0 +1,68 @@
title: |
Docker Intensif
chat: "[Mattermost](https://highfive.container.training/mattermost)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-02-enix.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- # DAY 1
#- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
#- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- # DAY 2
- containers/Container_Networking_Basics.md
- containers/Local_Development_Workflow.md
- containers/Container_Network_Model.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- # DAY 3
- containers/Start_And_Attach.md
- containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- containers/Dockerfile_Tips.md
- containers/Advanced_Dockerfiles.md
- containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Exercise_Dockerfile_Advanced.md
- # DAY 4
- containers/Buildkit.md
- containers/Network_Drivers.md
- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
- containers/Orchestration_Overview.md
#- containers/Docker_Machine.md
#- containers/Init_Systems.md
#- containers/Application_Configuration.md
#- containers/Logging.md
#- containers/Containers_From_Scratch.md
#- containers/Container_Engines.md
#- containers/Pods_Anatomy.md
#- containers/Ecosystem.md
- shared/thankyou.md
#- containers/links.md

91
slides/2.yml Normal file
View File

@@ -0,0 +1,91 @@
title: |
Fondamentaux Kubernetes
chat: "[Mattermost](https://highfive.container.training/mattermost)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-02-enix.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/prereqs.md
#- shared/webssh.md
- shared/connecting.md
- exercises/k8sfundamentals-brief.md
- exercises/localcluster-brief.md
- exercises/healthchecks-brief.md
- shared/toc.md
- # 1
#- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
- # 2
- k8s/kubectl-run.md
- k8s/kubenet.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- exercises/k8sfundamentals-details.md
- k8s/ourapponkube.md
#- k8s/exercise-wordsmith.md
- # 3
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- k8s/namespaces.md
- k8s/yamldeploy.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- # 4
- k8s/authoring-yaml.md
- k8s/setup-overview.md
- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
- k8s/localkubeconfig.md
- k8s/accessinternal.md
- k8s/kubectlproxy.md
- exercises/localcluster-details.md
- # 5
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/rollout.md
- # 6
- k8s/healthchecks.md
#- k8s/healthchecks-more.md
- k8s/dashboard.md
- k8s/k9s.md
- k8s/tilt.md
- exercises/healthchecks-details.md
- # 7
- k8s/ingress.md
- k8s/ingress-tls.md
- # 8
- k8s/volumes.md
#- k8s/exercise-configmap.md
#- k8s/build-with-docker.md
#- k8s/build-with-kaniko.md
- k8s/configuration.md
- k8s/secrets.md
- k8s/batch-jobs.md
- shared/thankyou.md

44
slides/3.yml Normal file
View File

@@ -0,0 +1,44 @@
title: |
Packaging d'applications
et CI/CD pour Kubernetes
chat: "[Mattermost](https://highfive.container.training/mattermost)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-02-enix.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
#- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/prereqs.md
- shared/webssh.md
- shared/connecting.md
#- shared/chat-room-im.md
#- shared/chat-room-zoom.md
- shared/toc.md
-
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
-
- k8s/helm-create-better-chart.md
- k8s/helm-dependencies.md
- k8s/helm-values-schema-validation.md
- k8s/helm-secrets.md
-
- k8s/cert-manager.md
- k8s/gitlab.md
-
- |
# (Extra content)
- k8s/prometheus.md
- k8s/prometheus-stack.md

64
slides/4.yml Normal file
View File

@@ -0,0 +1,64 @@
title: |
Kubernetes Avancé
chat: "[Mattermost](https://highfive.container.training/mattermost)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-02-enix.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom.md
- shared/prereqs.md
- shared/webssh.md
- shared/connecting.md
- shared/toc.md
- exercises/sealed-secrets-brief.md
- exercises/kyverno-ingress-domain-name-brief.md
- #1
- k8s/demo-apps.md
- k8s/netpol.md
- k8s/authn-authz.md
- k8s/sealed-secrets.md
- k8s/cert-manager.md
- k8s/ingress-tls.md
- exercises/sealed-secrets-details.md
- #2
- k8s/extending-api.md
- k8s/crd.md
- k8s/operators.md
- k8s/admission.md
- k8s/kyverno.md
- exercises/kyverno-ingress-domain-name-details.md
- #3
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/horizontal-pod-autoscaler.md
- k8s/apiserver-deepdive.md
- k8s/aggregation-layer.md
- k8s/hpa-v2.md
- #4
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
#- k8s/eck.md
#- k8s/portworx.md
- k8s/openebs.md
- k8s/stateful-failover.md
- k8s/operators-design.md
- k8s/operators-example.md
- k8s/owners-and-dependents.md
- k8s/events.md
- k8s/finalizers.md

58
slides/5.yml Normal file
View File

@@ -0,0 +1,58 @@
title: |
Opérer Kubernetes
chat: "[Mattermost](https://highfive.container.training/mattermost)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-02-enix.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
# DAY 1
-
- k8s/prereqs-admin.md
- k8s/architecture.md
- k8s/deploymentslideshow.md
- k8s/dmuc.md
-
- k8s/multinode.md
- k8s/cni.md
- k8s/interco.md
-
- k8s/cni-internals.md
- k8s/apilb.md
- k8s/internal-apis.md
- k8s/staticpods.md
- k8s/cluster-upgrade.md
- k8s/cluster-backup.md
#- k8s/cloud-controller-manager.md
-
- k8s/control-plane-auth.md
- k8s/user-cert.md
- k8s/csr-api.md
- k8s/openid-connect.md
- k8s/pod-security-intro.md
- k8s/pod-security-policies.md
- k8s/pod-security-admission.md
- shared/thankyou.md
-
|
# (Extra content)
- k8s/apiserver-deepdive.md
- k8s/setup-overview.md
- k8s/setup-devel.md
- k8s/setup-managed.md
- k8s/setup-selfhosted.md

View File

@@ -2,7 +2,6 @@
#/ /kube-halfday.yml.html 200!
#/ /kube-fullday.yml.html 200!
#/ /kube-twodays.yml.html 200!
/ /kube-adv.yml.html 200!
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
@@ -19,8 +18,8 @@
#/next https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
/next https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
/hi5 https://enix.io/fr/services/formation/online/
/us https://www.ardanlabs.com/live-training-events/deploying-microservices-and-traditional-applications-with-kubernetes-march-28-2022.html
/uk https://skillsmatter.com/workshops/827-deploying-microservices-and-traditional-applications-with-kubernetes-with-jerome-petazzoni
# Survey form
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform
/ /highfive.html 200!

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,6 @@
"version": "0.0.1",
"dependencies": {
"express": "^4.16.2",
"socket.io": "^4.5.1",
"socket.io-client": "^4.5.1"
"socket.io": "^2.4.0"
}
}

View File

@@ -19,7 +19,7 @@ They abstract the connection details for this services, and can help with:
* fail over (how do I know to which instance of a replicated service I should connect?)
* load balancing (how do I spread my requests across multiple instances of a service?)
* load balancing (how to I spread my requests across multiple instances of a service?)
* authentication (what if my service requires credentials, certificates, or otherwise?)

View File

@@ -58,7 +58,7 @@ class: pic
- it uses different concepts (Compose services ≠ Kubernetes services)
- it needs a Docker Engine (although containerd support might be coming)
- it needs a Docker Engine (althought containerd support might be coming)
---

View File

@@ -111,7 +111,7 @@ CMD ["python", "app.py"]
RUN wget http://.../foo.tar.gz \
&& tar -zxf foo.tar.gz \
&& mv foo/fooctl /usr/local/bin \
&& rm -rf foo foo.tar.gz
&& rm -rf foo
...
```

View File

@@ -317,11 +317,9 @@ class: extra-details
## Trash your servers and burn your code
*(This is the title of a
[2013 blog post][immutable-deployments]
[2013 blog post](http://chadfowler.com/2013/06/23/immutable-deployments.html)
by Chad Fowler, where he explains the concept of immutable infrastructure.)*
[immutable-deployments]: https://web.archive.org/web/20160305073617/http://chadfowler.com/blog/2013/06/23/immutable-deployments/
--
* Let's majorly mess up our container.

View File

@@ -13,7 +13,7 @@
- ... Or be comfortable spending some time reading the Docker
[documentation](https://docs.docker.com/) ...
- ... And looking for answers in the [Docker forums](https://forums.docker.com),
- ... And looking for answers in the [Docker forums](forums.docker.com),
[StackOverflow](http://stackoverflow.com/questions/tagged/docker),
and other outlets

View File

@@ -4,6 +4,6 @@
(we will use the `rng` service in the dockercoins app)
- See what happens when the load increases
- See what happens when the load increses
(spoiler alert: it involves timeouts!)

View File

@@ -1,7 +0,0 @@
## Exercise — Network Policies
- Implement a system with 3 levels of security
(private pods, public pods, namespace pods)
- Apply it to the DockerCoins demo app

View File

@@ -1,63 +0,0 @@
# Exercise — Network Policies
We want to to implement a generic network security mechanism.
Instead of creating one policy per service, we want to
create a fixed number of policies, and use a single label
to indicate the security level of our pods.
Then, when adding a new service to the stack, instead
of writing a new network policy for that service, we
only need to add the right label to the pods of that service.
---
## Specifications
We will use the label `security` to classify our pods.
- If `security=private`:
*the pod shouldn't accept any traffic*
- If `security=public`:
*the pod should accept all traffic*
- If `security=namespace`:
*the pod should only accept connections coming from the same namespace*
If `security` isn't set, assume it's `private`.
---
## Test setup
- Deploy a copy of the DockerCoins app in a new namespace
- Modify the pod templates so that:
- `webui` has `security=public`
- `worker` has `security=private`
- `hasher`, `redis`, `rng` have `security=namespace`
---
## Implement and test policies
- Write the network policies
(feel free to draw inspiration from the ones we've seen so far)
- Check that:
- you can connect to the `webui` from outside the cluster
- the application works correctly (shows 3-4 hashes/second)
- you cannot connect to the `hasher`, `redis`, `rng` services
- you cannot connect or even ping the `worker` pods

View File

@@ -1,9 +0,0 @@
## Exercise — RBAC
- Create two namespaces for users `alice` and `bob`
- Give each user full access to their own namespace
- Give each user read-only access to the other's namespace
- Let `alice` view the nodes of the cluster as well

View File

@@ -1,97 +0,0 @@
# Exercise — RBAC
We want to:
- Create two namespaces for users `alice` and `bob`
- Give each user full access to their own namespace
- Give each user read-only access to the other's namespace
- Let `alice` view the nodes of the cluster as well
---
## Initial setup
- Create two namespaces named `alice` and `bob`
- Check that if we impersonate Alice, we can't access her namespace yet:
```bash
kubectl --as alice get pods --namespace alice
```
---
## Access for Alice
- Grant Alice full access to her own namespace
(you can use a pre-existing Cluster Role)
- Check that Alice can create stuff in her namespace:
```bash
kubectl --as alice create deployment hello --image nginx --namespace alice
```
- But that she can't create stuff in Bob's namespace:
```bash
kubectl --as alice create deployment hello --image nginx --namespace bob
```
---
## Access for Bob
- Similarly, grant Bob full access to his own namespace
- Check that Bob can create stuff in his namespace:
```bash
kubectl --as bob create deployment hello --image nginx --namespace bob
```
- But that he can't create stuff in Alice's namespace:
```bash
kubectl --as bob create deployment hello --image nginx --namespace alice
```
---
## Read-only access
- Now, give Alice read-only access to Bob's namespace
- Check that Alice can view Bob's stuff:
```bash
kubectl --as alice get pods --namespace bob
```
- But that she can't touch this:
```bash
kubectl --as alice delete pods --namespace bob --all
```
- Likewise, give Bob read-only access to Alice's namespace
---
## Nodes
- Give Alice read-only access to the cluster nodes
(this will require creating a custom Cluster Role)
- Check that Alice can view the nodes:
```bash
kubectl --as alice get nodes
```
- But that Bob cannot:
```bash
kubectl --as bob get nodes
```
- And that Alice can't update nodes:
```bash
kubectl --as alice label nodes --all hello=world
```

110
slides/highfive.html Normal file
View File

@@ -0,0 +1,110 @@
<?xml version="1.0"?>
<html>
<head>
<style>
td {
background: #ccc;
padding: 1em;
}
</style>
</head>
<body>
<table>
<tr>
<td>Mardi 1er février 2022</td>
<td>
<a href="1.yml.html">Docker Intensif</a>
</td>
</tr>
<tr>
<td>Mercredi 2 février 2022</td>
<td>
<a href="1.yml.html">Docker Intensif</a>
</td>
</tr>
<tr>
<td>Jeudi 3 février 2022</td>
<td>
<a href="1.yml.html">Docker Intensif</a>
</td>
</tr>
<tr>
<td>Vendredi 4 février 2022</td>
<td>
<a href="1.yml.html">Docker Intensif</a>
</td>
</tr>
<tr>
<td>Lundi 7 février 2022 <br/> Mardi 1er mars 2022</td>
<td>
<a href="2.yml.html">Fondamentaux Kubernetes</a>
</td>
</tr>
<tr>
<td>Mardi 8 février 2022 <br/> Mercredi 2 mars 2022</td>
<td>
<a href="2.yml.html">Fondamentaux Kubernetes</a>
</td>
</tr>
<tr>
<td>Mercredi 9 février 2022 <br/> Jeudi 3 mars 2022</td>
<td>
<a href="2.yml.html">Fondamentaux Kubernetes</a>
</td>
</tr>
<tr>
<td>Jeudi 10 février 2022 <br/> Vendredi 4 mars 2022</td>
<td>
<a href="2.yml.html">Fondamentaux Kubernetes</a>
</td>
</tr>
<tr>
<td>Lundi 7 mars 2022</td>
<td>
<a href="3.yml.html">Packaging d'applications et CI/CD pour Kubernetes</a>
</td>
</tr>
<tr>
<td>Mardi 8 mars 2022</td>
<td>
<a href="3.yml.html">Packaging d'applications et CI/CD pour Kubernetes</a>
</td>
</tr>
<td>Lundi 14 mars 2022</td>
<td>
<a href="4.yml.html">Kubernetes Avancé</a>
</td>
</tr>
</tr>
<td>Mardi 15 mars 2022</td>
<td>
<a href="4.yml.html">Kubernetes Avancé</a>
</td>
</tr>
</tr>
<td>Mercredi 16 mars 2022</td>
<td>
<a href="4.yml.html">Kubernetes Avancé</a>
</td>
</tr>
</tr>
<td>Jeudi 17 mars 2022</td>
<td>
<a href="4.yml.html">Kubernetes Avancé</a>
</td>
</tr>
<tr>
<td>Lundi 21 mars 2022</td>
<td>
<a href="5.yml.html">Opérer Kubernetes</a>
</td>
</tr>
<tr>
<td>Mardi 22 mars 2022</td>
<td>
<a href="5.yml.html">Opérer Kubernetes</a>
</td>
</tr>
</table>
</body>
</html>

View File

@@ -1,17 +0,0 @@
# Interlude
- As mentioned earlier:
*the content of this course will be adapted to suit your needs!*
- Please take a look at the form that we just shared in Slack
(you don't need to fill it *right now*)
- If there are parts that you are curious about, ask us now!
- We'll ask you to fill the form after today's session
(before the end of the day, basically, so we can process the results by tomorrow)
- Thank you!

71
slides/intro-fullday.yml Normal file
View File

@@ -0,0 +1,71 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
#- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
#- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
#- containers/Start_And_Attach.md
- containers/Naming_And_Inspecting.md
#- containers/Labels.md
- containers/Getting_Inside.md
- containers/Initial_Images.md
-
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
-
- containers/Container_Networking_Basics.md
#- containers/Network_Drivers.md
- containers/Local_Development_Workflow.md
- containers/Container_Network_Model.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
-
- containers/Multi_Stage_Builds.md
#- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
#- containers/Docker_Machine.md
#- containers/Advanced_Dockerfiles.md
#- containers/Buildkit.md
#- containers/Init_Systems.md
#- containers/Application_Configuration.md
#- containers/Logging.md
#- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
#- containers/Container_Engines.md
#- containers/Pods_Anatomy.md
#- containers/Ecosystem.md
#- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md

View File

@@ -0,0 +1,72 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- in-person
content:
- shared/title.md
# - shared/logistics.md
- containers/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Windows_Containers.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Buildkit.md
- containers/Init_Systems.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Pods_Anatomy.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md

80
slides/intro-twodays.yml Normal file
View File

@@ -0,0 +1,80 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- # DAY 1
- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Initial_Images.md
-
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
-
- containers/Dockerfile_Tips.md
- containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Exercise_Dockerfile_Advanced.md
-
- containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Start_And_Attach.md
- containers/Getting_Inside.md
- containers/Resource_Limits.md
- # DAY 2
- containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
-
- containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
-
- containers/Installing_Docker.md
- containers/Container_Engines.md
- containers/Init_Systems.md
- containers/Advanced_Dockerfiles.md
- containers/Buildkit.md
-
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Orchestration_Overview.md
-
- shared/thankyou.md
- containers/links.md
#-
#- containers/Docker_Machine.md
#- containers/Ambassadors.md
#- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
#- containers/Pods_Anatomy.md
#- containers/Ecosystem.md

View File

@@ -203,12 +203,12 @@ What does that mean?
## Let's experiment a bit!
- The examples in this section require a Kubernetes cluster
(any local development cluster will suffice)
- For this section, connect to the first node of the `test` cluster
.lab[
- SSH to the first node of the test cluster
- Check that the cluster is operational:
```bash
kubectl get nodes

View File

@@ -168,7 +168,7 @@ class: extra-details
(`O=system:nodes`, `CN=system:node:name-of-the-node`)
- The Kubernetes API can act as a CA
- The Kubernetse API can act as a CA
(by wrapping an X509 CSR into a CertificateSigningRequest resource)
@@ -246,7 +246,7 @@ class: extra-details
(they don't require hand-editing a file and restarting the API server)
- A service account can be associated with a set of secrets
- A service account is associated with a set of secrets
(the kind that you can view with `kubectl get secrets`)
@@ -256,28 +256,6 @@ class: extra-details
---
## Service account tokens evolution
- In Kubernetes 1.21 and above, pods use *bound service account tokens*:
- these tokens are *bound* to a specific object (e.g. a Pod)
- they are automatically invalidated when the object is deleted
- these tokens also expire quickly (e.g. 1 hour) and gets rotated automatically
- In Kubernetes 1.24 and above, unbound tokens aren't created automatically
- before 1.24, we would see unbound tokens with `kubectl get secrets`
- with 1.24 and above, these tokens can be created with `kubectl create token`
- ...or with a Secret with the right [type and annotation][create-token]
[create-token]: https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#to-create-additional-api-tokens
---
class: extra-details
## Checking our authentication method
@@ -412,10 +390,6 @@ class: extra-details
It should be named `default-token-XXXXX`.
When running Kubernetes 1.24 and above, this Secret won't exist.
<br/>
Instead, create a token with `kubectl create token default`.
---
class: extra-details

View File

@@ -1,60 +0,0 @@
## CA injector - overview
- The Kubernetes API server can invoke various webhooks:
- conversion webhooks (registered in CustomResourceDefinitions)
- mutation webhooks (registered in MutatingWebhookConfigurations)
- validation webhooks (registered in ValidatingWebhookConfiguration)
- These webhooks must be served over TLS
- These webhooks must use valid TLS certificates
---
## Webhook certificates
- Option 1: certificate issued by a global CA
- doesn't work with internal services
<br/>
(their CN must be `<servicename>.<namespace>.svc`)
- Option 2: certificate issued by private CA + CA certificate in system store
- requires access to API server certificates tore
- generally not doable on managed Kubernetes clusters
- Option 3: certificate issued by private CA + CA certificate in `caBundle`
- pass the CA certificate in `caBundle` field
<br/>
(in CRD or webhook manifests)
- can be managed automatically by cert-manager
---
## CA injector - details
- Add annotation to *injectable* resource
(CustomResouceDefinition, MutatingWebhookConfiguration, ValidatingWebhookConfiguration)
- Annotation refers to the thing holding the certificate:
- `cert-manager.io/inject-ca-from: <namespace>/<certificate>`
- `cert-manager.io/inject-ca-from-secret: <namespace>/<secret>`
- `cert-manager.io/inject-apiserver-ca: true` (use API server CA)
- When injecting from a Secret, the Secret must have a special annotation:
`cert-manager.io/allow-direct-injection: "true"`
- See [cert-manager documentation][docs] for details
[docs]: https://cert-manager.io/docs/concepts/ca-injector/

View File

@@ -81,7 +81,7 @@
## What version are we running anyway?
- When I say, "I'm running Kubernetes 1.20", is that the version of:
- When I say, "I'm running Kubernetes 1.18", is that the version of:
- kubectl
@@ -157,15 +157,15 @@
## Kubernetes uses semantic versioning
- Kubernetes versions look like MAJOR.MINOR.PATCH; e.g. in 1.20.15:
- Kubernetes versions look like MAJOR.MINOR.PATCH; e.g. in 1.18.20:
- MAJOR = 1
- MINOR = 20
- PATCH = 15
- MINOR = 18
- PATCH = 20
- It's always possible to mix and match different PATCH releases
(e.g. 1.20.0 and 1.20.15 are compatible)
(e.g. 1.18.20 and 1.18.15 are compatible)
- It is recommended to run the latest PATCH release
@@ -181,9 +181,9 @@
- All components support a difference of one¹ MINOR version
- This allows live upgrades (since we can mix e.g. 1.20 and 1.21)
- This allows live upgrades (since we can mix e.g. 1.18 and 1.19)
- It also means that going from 1.20 to 1.22 requires going through 1.21
- It also means that going from 1.18 to 1.20 requires going through 1.19
.footnote[¹Except kubelet, which can be up to two MINOR behind API server,
and kubectl, which can be one MINOR ahead or behind API server.]
@@ -254,7 +254,7 @@ and kubectl, which can be one MINOR ahead or behind API server.]
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
```
- Look for the `image:` line, and update it to e.g. `v1.24.0`
- Look for the `image:` line, and update it to e.g. `v1.19.0`
]
@@ -308,11 +308,11 @@ and kubectl, which can be one MINOR ahead or behind API server.]
]
Note 1: kubeadm thinks that our cluster is running 1.24.0.
Note 1: kubeadm thinks that our cluster is running 1.19.0.
<br/>It is confused by our manual upgrade of the API server!
Note 2: kubeadm itself is still version 1.20.15..
<br/>It doesn't know how to upgrade do 1.21.X.
Note 2: kubeadm itself is still version 1.18.20..
<br/>It doesn't know how to upgrade do 1.19.X.
---
@@ -335,28 +335,28 @@ Note 2: kubeadm itself is still version 1.20.15..
]
Problem: kubeadm doesn't know know how to handle
upgrades from version 1.20.
upgrades from version 1.18.
This is because we installed version 1.24 (or even later).
This is because we installed version 1.22 (or even later).
We need to install kubeadm version 1.21.X.
We need to install kubeadm version 1.19.X.
---
## Downgrading kubeadm
- We need to go back to version 1.21.X.
- We need to go back to version 1.19.X.
.lab[
- View available versions for package `kubeadm`:
```bash
apt show kubeadm -a | grep ^Version | grep 1.21
apt show kubeadm -a | grep ^Version | grep 1.19
```
- Downgrade kubeadm:
```
sudo apt install kubeadm=1.21.0-00
sudo apt install kubeadm=1.19.8-00
```
- Check what kubeadm tells us:
@@ -366,7 +366,7 @@ We need to install kubeadm version 1.21.X.
]
kubeadm should now agree to upgrade to 1.21.X.
kubeadm should now agree to upgrade to 1.19.8.
---
@@ -464,9 +464,9 @@ kubeadm should now agree to upgrade to 1.21.X.
```bash
for N in 1 2 3; do
ssh oldversion$N "
sudo apt install kubeadm=1.21.14-00 &&
sudo apt install kubeadm=1.19.8-00 &&
sudo kubeadm upgrade node &&
sudo apt install kubelet=1.21.14-00"
sudo apt install kubelet=1.19.8-00"
done
```
]
@@ -475,7 +475,7 @@ kubeadm should now agree to upgrade to 1.21.X.
## Checking what we've done
- All our nodes should now be updated to version 1.21.14
- All our nodes should now be updated to version 1.19.8
.lab[
@@ -492,7 +492,7 @@ class: extra-details
## Skipping versions
- This example worked because we went from 1.20 to 1.21
- This example worked because we went from 1.18 to 1.19
- If you are upgrading from e.g. 1.16, you will have to go through 1.17 first

View File

@@ -14,20 +14,22 @@
## Creating a CRD
- We will create a CRD to represent different recipes of pizzas
- We will create a CRD to represent the different species of coffee
- We will be able to run `kubectl get pizzas` and it will list the recipes
(arabica, liberica, and robusta)
- Creating/deleting recipes won't do anything else
- We will be able to run `kubectl get coffees` and it will list the species
(because we won't implement a *controller*)
- Then we can label, edit, etc. the species to attach some information
(e.g. the taste profile of the coffee, or whatever we want)
---
## First slice of pizza
## First shot of coffee
```yaml
@@INCLUDE[k8s/pizza-1.yaml]
@@INCLUDE[k8s/coffee-1.yaml]
```
---
@@ -46,9 +48,9 @@
---
## Second slice of pizza
## Second shot of coffee
- The next slide will show file @@LINK[k8s/pizza-2.yaml]
- The next slide will show file @@LINK[k8s/coffee-2.yaml]
- Note the `spec.versions` list
@@ -63,20 +65,20 @@
---
```yaml
@@INCLUDE[k8s/pizza-2.yaml]
@@INCLUDE[k8s/coffee-2.yaml]
```
---
## Baking some pizza
## Creating our Coffee CRD
- Let's create the Custom Resource Definition for our Pizza resource
- Let's create the Custom Resource Definition for our Coffee resource
.lab[
- Load the CRD:
```bash
kubectl apply -f ~/container.training/k8s/pizza-2.yaml
kubectl apply -f ~/container.training/k8s/coffee-2.yaml
```
- Confirm that it shows up:
@@ -93,57 +95,19 @@
The YAML below defines a resource using the CRD that we just created:
```yaml
kind: Pizza
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: napolitana
name: arabica
spec:
toppings: [ mozzarella ]
taste: strong
```
.lab[
- Try to create a few pizza recipes:
- Create a few types of coffee beans:
```bash
kubectl apply -f ~/container.training/k8s/pizzas.yaml
```
]
---
## Type validation
- Older versions of Kubernetes will accept our pizza definition as is
- Newer versions, however, will issue warnings about unknown fields
(and if we use `--validate=false`, these fields will simply be dropped)
- We need to improve our OpenAPI schema
(to add e.g. the `spec.toppings` field used by our pizza resources)
---
## Third slice of pizza
- Let's add a full OpenAPI v3 schema to our Pizza CRD
- We'll require a field `spec.sauce` which will be a string
- And a field `spec.toppings` which will have to be a list of strings
.lab[
- Update our pizza CRD:
```bash
kubectl apply -f ~/container.training/k8s/pizza-3.yaml
```
- Load our pizza recipes:
```bash
kubectl apply -f ~/container.training/k8s/pizzas.yaml
kubectl apply -f ~/container.training/k8s/coffees.yaml
```
]
@@ -156,48 +120,91 @@ spec:
.lab[
- View the pizza recipes that we just created:
- View the coffee beans that we just created:
```bash
kubectl get pizzas
kubectl get coffees
```
]
- Let's see how we can improve that display!
- We'll see in a bit how to improve that
---
## What can we do with CRDs?
There are many possibilities!
- *Operators* encapsulate complex sets of resources
(e.g.: a PostgreSQL replicated cluster; an etcd cluster...
<br/>
see [awesome operators](https://github.com/operator-framework/awesome-operators) and
[OperatorHub](https://operatorhub.io/) to find more)
- Custom use-cases like [gitkube](https://gitkube.sh/)
- creates a new custom type, `Remote`, exposing a git+ssh server
- deploy by pushing YAML or Helm charts to that remote
- Replacing built-in types with CRDs
(see [this lightning talk by Tim Hockin](https://www.youtube.com/watch?v=ji0FWzFwNhA))
---
## What's next?
- Creating a basic CRD is quick and easy
- But there is a lot more that we can (and probably should) do:
- improve input with *data validation*
- improve output with *custom columns*
- And of course, we probably need a *controller* to go with our CRD!
(otherwise, we're just using the Kubernetes API as a fancy data store)
---
## Additional printer columns
- We can tell Kubernetes which columns to show:
```yaml
additionalPrinterColumns:
- jsonPath: .spec.sauce
name: Sauce
type: string
- jsonPath: .spec.toppings
name: Toppings
type: string
```
- We can specify `additionalPrinterColumns` in the CRD
- There is an updated CRD in @@LINK[k8s/pizza-4.yaml]
- This is similar to `-o custom-columns`
(map a column name to a path in the object, e.g. `.spec.taste`)
```yaml
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
```
---
## Using additional printer columns
- Let's update our CRD!
- Let's update our CRD using @@LINK[k8s/coffee-3.yaml]
.lab[
- Update the CRD:
```bash
kubectl apply -f ~/container.training/k8s/pizza-4.yaml
kubectl apply -f ~/container.training/k8s/coffee-3.yaml
```
- Look at our Pizza resources:
- Look at our Coffee resources:
```bash
kubectl get pizzas
kubectl get coffees
```
]
@@ -208,26 +215,50 @@ Note: we can update a CRD without having to re-create the corresponding resource
---
## Better data validation
## Data validation
- Let's change the data schema so that the sauce can only be `red` or `white`
- CRDs are validated with the OpenAPI v3 schema that we specify
- This will be implemented by @@LINK[k8s/pizza-5.yaml]
(with older versions of the API, when the schema was optional,
<br/>
no schema = no validation at all)
.lab[
- Otherwise, we can put anything we want in the `spec`
- Update the Pizza CRD:
```bash
kubectl apply -f ~/container.training/k8s/pizza-5.yaml
```
- More advanced validation can also be done with admission webhooks, e.g.:
]
- consistency between parameters
- advanced integer filters (e.g. odd number of replicas)
- things that can change in one direction but not the other
---
## OpenAPI v3 schema example
This is what we have in @@LINK[k8s/coffee-3.yaml]:
```yaml
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
required: [ taste ]
```
---
## Validation *a posteriori*
- Some of the pizzas that we defined earlier *do not* pass validation
- Some of the "coffees" that we defined earlier *do not* pass validation
- How is that possible?
@@ -295,23 +326,15 @@ Note: we can update a CRD without having to re-create the corresponding resource
---
## Even better data validation
## What's next?
- If we need more complex data validation, we can use a validating webhook
- Generally, when creating a CRD, we also want to run a *controller*
- Use cases:
(otherwise nothing will happen when we create resources of that type)
- validating a "version" field for a database engine
- The controller will typically *watch* our custom resources
- validating that the number of e.g. coordination nodes is even
- preventing inconsistent or dangerous changes
<br/>
(e.g. major version downgrades)
- checking a key or certificate format or validity
- and much more!
(and take action when they are created/updated)
---
@@ -353,24 +376,6 @@ Note: we can update a CRD without having to re-create the corresponding resource
(unrelated to containers, clusters, etc.)
---
## What's next?
- Creating a basic CRD is relatively straightforward
- But CRDs generally require a *controller* to do anything useful
- The controller will typically *watch* our custom resources
(and take action when they are created/updated)
- Most serious use-cases will also require *validation web hooks*
- When our CRD data format evolves, we'll also need *conversion web hooks*
- Doing all that work manually is tedious; use a framework!
???
:EN:- Custom Resource Definitions (CRDs)

View File

@@ -1,62 +1,46 @@
# Healthchecks
- Containers can have *healthchecks* (also called "probes")
- Containers can have *healthchecks*
- There are three kinds of healthchecks, corresponding to different use-cases:
- There are three kinds of healthchecks, corresponding to very different use-cases:
`startupProbe`, `readinessProbe`, `livenessProbe`
- liveness = detect when a container is "dead" and needs to be restarted
- readiness = detect when a container is ready to serve traffic
- startup = detect if a container has finished to boot
- These healthchecks are optional (we can use none, all, or some of them)
- Different probes are available:
- Different probes are available (HTTP request, TCP connection, program execution)
HTTP GET, TCP connection, arbitrary program execution, GRPC
- All these probes have a binary result (success/failure)
- Probes that aren't defined will default to a "success" result
- Let's see the difference and how to use them!
---
## Use-cases in brief
*My container takes a long time to boot before being able to serve traffic.*
→ use a `startupProbe` (but often a `readinessProbe` can also do the job)
*Sometimes, my container is unavailable or overloaded, and needs to e.g. be taken temporarily out of load balancer rotation.*
→ use a `readinessProbe`
*Sometimes, my container enters a broken state which can only be fixed by a restart.*
→ use a `livenessProbe`
---
## Liveness probes
## Liveness probe
*This container is dead, we don't know how to fix it, other than restarting it.*
- Check if the container is dead or alive
- Indicates if the container is dead or alive
- If Kubernetes determines that the container is dead:
- A dead container cannot come back to life
- it terminates the container gracefully
- If the liveness probe fails, the container is killed (destroyed)
- it restarts the container (unless the Pod's `restartPolicy` is `Never`)
(to make really sure that it's really dead; no zombies or undeads!)
- With the default parameters, it takes:
- What happens next depends on the pod's `restartPolicy`:
- up to 30 seconds to determine that the container is dead
- `Never`: the container is not restarted
- up to 30 seconds to terminate it
- `OnFailure` or `Always`: the container is restarted
---
## When to use a liveness probe
- To detect failures that can't be recovered
- To indicate failures that can't be recovered
- deadlocks (causing all requests to time out)
@@ -64,45 +48,47 @@
- Anything where our incident response would be "just restart/reboot it"
---
## Liveness probes gotchas
.warning[**Do not** use liveness probes for problems that can't be fixed by a restart]
- Otherwise we just restart our pods for no reason, creating useless load
.warning[**Do not** depend on other services within a liveness probe]
---
- Otherwise we can experience cascading failures
## Readiness probe (1)
(example: web server liveness probe that makes a requests to a database)
*Make sure that a container is ready before continuing a rolling update.*
.warning[**Make sure** that liveness probes respond quickly]
- Indicates if the container is ready to handle traffic
- The default probe timeout is 1 second (this can be tuned!)
- When doing a rolling update, the Deployment controller waits for Pods to be ready
- If the probe takes longer than that, it will eventually cause a restart
(a Pod is ready when all the containers in the Pod are ready)
- Improves reliability and safety of rolling updates:
- don't roll out a broken version (that doesn't pass readiness checks)
- don't lose processing capacity during a rolling update
---
## Readiness probes
## Readiness probe (2)
*Sometimes, my container "needs a break".*
*Temporarily remove a container (overloaded or otherwise) from a Service load balancer.*
- Check if the container is ready or not
- A container can mark itself "not ready" temporarily
- If the container is not ready, its Pod is not ready
(e.g. if it's overloaded or needs to reload/restart/garbage collect...)
- If the Pod belongs to a Service, it is removed from its Endpoints
- If a container becomes "unready" it might be ready again soon
(it stops receiving new connections but existing ones are not affected)
- If the readiness probe fails:
- If there is a rolling update in progress, it might pause
- the container is *not* killed
(Kubernetes will try to respect the MaxUnavailable parameter)
- if the pod is a member of a service, it is temporarily removed
- As soon as the readiness probe suceeds again, everything goes back to normal
- it is re-added as soon as the readiness probe passes again
---
@@ -116,31 +102,67 @@
- To indicate temporary failure or unavailability
- runtime is busy doing garbage collection or (re)loading data
- application can only service *N* parallel connections
- new connections will be directed to other Pods
- runtime is busy doing garbage collection or initial data load
- To redirect new connections to other Pods
(e.g. fail the readiness probe when the Pod's load is too high)
---
## Startup probes
## Dependencies
*My container takes a long time to boot before being able to serve traffic.*
- If a web server depends on a database to function, and the database is down:
- After creating a container, Kubernetes runs its startup probe
- the web server's liveness probe should succeed
- The container will be considered "unhealthy" until the probe succeeds
- the web server's readiness probe should fail
- As long as the container is "unhealthy", its Pod...:
- Same thing for any hard dependency (without which the container can't work)
- is not added to Services' endpoints
.warning[**Do not** fail liveness probes for problems that are external to the container]
- is not considered as "available" for rolling update purposes
---
- Readiness and liveness probes are enabled *after* startup probe reports success
## Timing and thresholds
(if there is no startup probe, readiness and liveness probes are enabled right away)
- Probes are executed at intervals of `periodSeconds` (default: 10)
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
.warning[If a probe takes longer than that, it is considered as a FAIL]
- A probe is considered successful after `successThreshold` successes (default: 1)
- A probe is considered failing after `failureThreshold` failures (default: 3)
- A probe can have an `initialDelaySeconds` parameter (default: 0)
- Kubernetes will wait that amount of time before running the probe for the first time
(this is important to avoid killing services that take a long time to start)
---
## Startup probe
*The container takes too long to start, and is killed by the liveness probe!*
- By default, probes (including liveness) start immediately
- With the default probe interval and failure threshold:
*a container must respond in less than 30 seconds, or it will be killed!*
- There are two ways to avoid that:
- set `initialDelaySeconds` (a fixed, rigid delay)
- use a `startupProbe`
- Kubernetes will run only the startup probe, and when it succeeds, run the other probes
---
@@ -156,296 +178,121 @@
---
## Startup probes gotchas
- When defining a `startupProbe`, we almost always want to adjust its parameters
(specifically, its `failureThreshold` - this is explained in next slide)
- Otherwise, if the container fails to start within 30 seconds...
*Kubernetes terminates the container and restarts it!*
- Sometimes, it's easier/simpler to use a `readinessProbe` instead
(except when also using a `livenessProbe`)
---
## Timing and thresholds
- Probes are executed at intervals of `periodSeconds` (default: 10)
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
.warning[If a probe takes longer than that, it is considered as a FAIL]
.warning[For liveness probes **and startup probes** this terminates and restarts the container]
- A probe is considered successful after `successThreshold` successes (default: 1)
- A probe is considered failing after `failureThreshold` failures (default: 3)
- All these parameters can be set independently for each probe
---
class: extra-details
## `initialDelaySeconds`
- A probe can have an `initialDelaySeconds` parameter (default: 0)
- Kubernetes will wait that amount of time before running the probe for the first time
- It is generally better to use a `startupProbe` instead
(but this parameter did exist before startup probes were implemented)
---
class: extra-details
## `readinessProbe` vs `startupProbe`
- A lot of blog posts / documentations / tutorials recommend readiness probes...
- ...even in scenarios where a startup probe would seem more appropriate!
- This is because startup probes are relatively recent
(they reached GA status in Kubernetes 1.20)
- When there is no `livenessProbe`, using a `readinessProbe` is simpler:
- a `startupProbe` generally requires to change the `failureThreshold`
- a `startupProbe` generally also requires a `readinessProbe`
- a single `readinessProbe` can fulfill both roles
---
## Different types of probes
- Kubernetes supports the following mechanisms:
- HTTP request
- `exec` (arbitrary program execution)
- specify URL of the request (and optional headers)
- `httpGet` (HTTP GET request)
- any status code between 200 and 399 indicates success
- `tcpSocket` (check if a TCP port is accepting connections)
- TCP connection
- `grpc` (standard [GRPC Health Checking Protocol][grpc])
- the probe succeeds if the TCP port is open
- All probes give binary results ("it works" or "it doesn't")
- arbitrary exec
- Let's see the specific details for each of them!
- a command is executed in the container
[grpc]: https://grpc.github.io/grpc/core/md_doc_health-checking.html
- exit status of zero indicates success
---
## `exec`
## Benefits of using probes
- Runs an arbitrary program *inside* the container
- Rolling updates proceed when containers are *actually ready*
(like with `kubectl exec` or `docker exec`)
(as opposed to merely started)
- The program must be available in the container image
- Containers in a broken state get killed and restarted
- Kubernetes uses the exit status of the program
(instead of serving errors or timeouts)
(standard UNIX convention: 0 = success, anything else = failure)
- Unavailable backends get removed from load balancer rotation
(thus improving response times across the board)
- If a probe is not defined, it's as if there was an "always successful" probe
---
## `exec` example
## Example: HTTP probe
When the worker is ready, it should create `/tmp/ready`.
<br/>
The following probe will give it 5 minutes to do so.
Here is a pod template for the `rng` web service of the DockerCoins app:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: queueworker
name: healthy-app
spec:
containers:
- name: worker
image: myregistry.../worker:v1.0
startupProbe:
exec:
command:
- test
- -f
- /tmp/ready
failureThreshold: 30
```
---
## Using shell constructs
- If we want to use pipes, conditionals, etc. we should invoke a shell
- Example:
```yaml
exec:
command:
- sh
- -c
- "curl http://localhost:5000/status | jq .ready | grep true"
```
---
## `httpGet`
- Make an HTTP GET request to the container
- The request will be made by Kubelet
(doesn't require extra binaries in the container image)
- `port` must be specified
- `path` and extra `httpHeaders` can be specified optionally
- Kubernetes uses HTTP status code of the response:
- 200-399 = success
- anything else = failure
---
## `httpGet` example
The following liveness probe restarts the container if it stops responding on `/healthz`:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: frontend
spec:
containers:
- name: frontend
image: myregistry.../frontend:v1.0
- name: myapp
image: myregistry.io/myapp:v1.0
livenessProbe:
httpGet:
path: /health
port: 80
path: /healthz
periodSeconds: 5
```
---
## `tcpSocket`
- Kubernetes checks if the indicated TCP port accepts connections
- There is no additional check
.warning[It's quite possible for a process to be broken, but still accept TCP connections!]
If the backend serves an error, or takes longer than 1s, 3 times in a row, it gets killed.
---
## `grpc`
## Example: exec probe
<!-- ##VERSION## -->
Here is a pod template for a Redis server:
- Available in beta since Kubernetes 1.24
```yaml
apiVersion: v1
kind: Pod
metadata:
name: redis-with-liveness
spec:
containers:
- name: redis
image: redis
livenessProbe:
exec:
command: ["redis-cli", "ping"]
```
- Leverages standard [GRPC Health Checking Protocol][grpc]
[grpc]: https://grpc.github.io/grpc/core/md_doc_health-checking.html
If the Redis process becomes unresponsive, it will be killed.
---
## Best practices for healthchecks
## Questions to ask before adding healthchecks
- Readiness probes are almost always beneficial
- Do we want liveness, readiness, both?
- don't hesitate to add them early!
(sometimes, we can use the same check, but with different failure thresholds)
- we can even make them *mandatory*
- Do we have existing HTTP endpoints that we can use?
- Be more careful with liveness and startup probes
- Do we need to add new endpoints, or perhaps use something else?
- they aren't always necessary
- Are our healthchecks likely to use resources and/or slow down the app?
- they can even cause harm
- Do they depend on additional services?
(this can be particularly tricky, see next slide)
---
## Readiness probes
## Healthchecks and dependencies
- Almost always beneficial
- Liveness checks should not be influenced by the state of external services
- Exceptions:
- All checks should reply quickly (by default, less than 1 second)
- web service that doesn't have a dedicated "health" or "ping" route
- Otherwise, they are considered to fail
- ...and all requests are "expensive" (e.g. lots of external calls)
- This might require to check the health of dependencies asynchronously
---
## Liveness probes
- If we're not careful, we end up restarting containers for no reason
(which can cause additional load on the cluster, cascading failures, data loss, etc.)
- Suggestion:
- don't add liveness probes immediately
- wait until you have a bit of production experience with that code
- then add narrow-scoped healthchecks to detect specific failure modes
- Readiness and liveness probes should be different
(different check *or* different timeouts *or* different thresholds)
---
## Startup probes
- Only beneficial for containers that need a long time to start
(more than 30 seconds)
- If there is no liveness probe, it's simpler to just use a readiness probe
(since we probably want to have a readiness probe anyway)
- In other words, startup probes are useful in one situation:
*we have a liveness probe, AND the container needs a lot of time to start*
- Don't forget to change the `failureThreshold`
(otherwise the container will fail to start and be killed)
---
## Recap of the gotchas
- The default timeout is 1 second
- if a probe takes longer than 1 second to reply, Kubernetes considers that it fails
- this can be changed by setting the `timeoutSeconds` parameter
<br/>(or refactoring the probe)
- Liveness probes should not be influenced by the state of external services
- Liveness probes and readiness probes should have different paramters
- For startup probes, remember to increase the `failureThreshold`
(e.g. if a database or API might be healthy but still take more than
1 second to reply, we should check the status asynchronously and report
a cached status)
---
@@ -453,21 +300,21 @@ spec:
(In that context, worker = process that doesn't accept connections)
- A relatively easy solution is to use files
- Readiness is useful mostly for rolling updates
- For a startup or readiness probe:
(because workers aren't backends for a service)
- worker creates `/tmp/ready` when it's ready
- probe checks the existence of `/tmp/ready`
- Liveness may help us restart a broken worker, but how can we check it?
- For a liveness probe:
- Embedding an HTTP server is a (potentially expensive) option
- worker touches `/tmp/alive` regularly
<br/>(e.g. just before starting to work on a job)
- probe checks that the timestamp on `/tmp/alive` is recent
- if the timestamp is old, it means that the worker is stuck
- Using a "lease" file can be relatively easy:
- Sometimes it can also make sense to embed a web server in the worker
- touch a file during each iteration of the main loop
- check the timestamp of that file from an exec probe
- Writing logs (and checking them from the probe) also works
???

View File

@@ -157,7 +157,7 @@ class: extra-details
(as opposed to, e.g., installing a new release each time we run it)
- Other example: `kubectl apply -f some-file.yaml`
- Other example: `kubectl -f some-file.yaml`
---

View File

@@ -66,7 +66,7 @@
Where do that `repository` and `version` come from?
We're assuming here that we did our research,
We're assuming here that we did our reserach,
or that our resident Helm expert advised us to
use Bitnami's Redis chart.

View File

@@ -167,7 +167,7 @@ Let's try one more round of decoding!
--
... OK, that was *a lot* of binary data. What should we do with it?
... OK, that was *a lot* of binary data. What sould we do with it?
---

View File

@@ -1,148 +0,0 @@
## Ingress and canary releases
- Let's see how to implement *canary releases*
- The example here will use Traefik v1
(which is obsolete)
- It won't work on your Kubernetes cluster!
(unless you're running an oooooold version of Kubernetes)
(and an equally oooooooold version of Traefik)
- We've left it here just as an example!
---
## Canary releases
- A *canary release* (or canary launch or canary deployment) is a release that will process only a small fraction of the workload
- After deploying the canary, we compare its metrics to the normal release
- If the metrics look good, the canary will progressively receive more traffic
(until it gets 100% and becomes the new normal release)
- If the metrics aren't good, the canary is automatically removed
- When we deploy a bad release, only a tiny fraction of traffic is affected
---
## Various ways to implement canary
- Example 1: canary for a microservice
- 1% of all requests (sampled randomly) are sent to the canary
- the remaining 99% are sent to the normal release
- Example 2: canary for a web app
- 1% of users are sent to the canary web site
- the remaining 99% are sent to the normal release
- Example 3: canary for shipping physical goods
- 1% of orders are shipped with the canary process
- the remaining 99% are shipped with the normal process
- We're going to implement example 1 (per-request routing)
---
## Canary releases with Traefik v1
- We need to deploy the canary and expose it with a separate service
- Then, in the Ingress resource, we need:
- multiple `paths` entries (one for each service, canary and normal)
- an extra annotation indicating the weight of each service
- If we want, we can send requests to more than 2 services
---
## The Ingress resource
.small[
```yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: rgb
annotations:
traefik.ingress.kubernetes.io/service-weights: |
red: 50%
green: 25%
blue: 25%
spec:
rules:
- host: rgb.`A.B.C.D`.nip.io
http:
paths:
- path: /
backend:
serviceName: red
servicePort: 80
- path: /
backend:
serviceName: green
servicePort: 80
- path: /
backend:
serviceName: blue
servicePort: 80
```
]
---
class: extra-details
## Other ingress controllers
*Just to illustrate how different things are ...*
- With the NGINX ingress controller:
- define two ingress ressources
<br/>
(specifying rules with the same host+path)
- add `nginx.ingress.kubernetes.io/canary` annotations on each
- With Linkerd2:
- define two services
- define an extra service for the weighted aggregate of the two
- define a TrafficSplit (this is a CRD introduced by the SMI spec)
---
class: extra-details
## We need more than that
What we saw is just one of the multiple building blocks that we need to achieve a canary release.
We also need:
- metrics (latency, performance ...) for our releases
- automation to alter canary weights
(increase canary weight if metrics look good; decrease otherwise)
- a mechanism to manage the lifecycle of the canary releases
(create them, promote them, delete them ...)
For inspiration, check [flagger by Weave](https://github.com/weaveworks/flagger).

View File

@@ -1,36 +1,34 @@
# Exposing HTTP services with Ingress resources
- Service = layer 4 (TCP, UDP, SCTP)
- HTTP services are typically exposed on port 80
- works with every TCP/UDP/SCTP protocol
(and 443 for HTTPS)
- doesn't "see" or interpret HTTP
- `NodePort` services are great, but they are *not* on port 80
- Ingress = layer 7 (HTTP)
(by default, they use port range 30000-32767)
- only for HTTP
- can route requests depending on URI or host header
- can handle TLS
- How can we get *many* HTTP services on port 80? 🤔
---
## Why should we use Ingress resources?
## Various ways to expose something on port 80
A few use-cases:
- Service with `type: LoadBalancer`
- URI routing (e.g. for single page apps)
*costs a little bit of money; not always available*
`/api` → service `api:5000`
- Service with one (or multiple) `ExternalIP`
everything else → service `static:80`
*requires public nodes; limited by number of nodes*
- Cost optimization
- Service with `hostPort` or `hostNetwork`
(because individual `LoadBalancer` services typically cost money)
*same limitations as `ExternalIP`; even harder to manage*
- Automatic handling of TLS certificates
- Ingress resources
*addresses all these limitations, yay!*
---
@@ -183,70 +181,20 @@ class: extra-details
---
## Accepting connections on port 80 (and 443)
- Web site users don't want to specify port numbers
(e.g. "connect to https://blahblah.whatever:31550")
- Our ingress controller needs to actually be exposed on port 80
(and 443 if we want to handle HTTPS)
- Let's see how we can achieve that!
---
## Various ways to expose something on port 80
- Service with `type: LoadBalancer`
*costs a little bit of money; not always available*
- Service with one (or multiple) `ExternalIP`
*requires public nodes; limited by number of nodes*
- Service with `hostPort` or `hostNetwork`
*same limitations as `ExternalIP`; even harder to manage*
---
## Deploying pods listening on port 80
- We are going to run Traefik in Pods with `hostNetwork: true`
- We want our ingress load balancer to be available on port 80
(so that our load balancer can use the "real" port 80 of our nodes)
- The best way to do that would be with a `LoadBalancer` service
- Traefik Pods will be created by a DaemonSet
... but it requires support from the underlying infrastructure
(so that we get one instance of Traefik on every node of the cluster)
- Instead, we are going to use the `hostNetwork` mode on the Traefik pods
- This means that we will be able to connect to any node of the cluster on port 80
.warning[This is not typical of a production setup!]
- Let's see what this `hostNetwork` mode is about ...
---
## Doing it in production
- When running "on cloud", the easiest option is a `LoadBalancer` service
- When running "on prem", it depends:
- [MetalLB] is a good option if a pool of public IP addresses is available
- otherwise, using `externalIPs` on a few nodes (2-3 for redundancy)
- Many variations/optimizations are possible depending on our exact scenario!
[MetalLB]: https://metallb.org/
---
class: extra-details
## Without `hostNetwork`
- Normally, each pod gets its own *network namespace*
@@ -263,8 +211,6 @@ class: extra-details
---
class: extra-details
## With `hostNetwork: true`
- No network namespace gets created
@@ -283,6 +229,26 @@ class: extra-details
---
class: extra-details
## Other techniques to expose port 80
- We could use pods specifying `hostPort: 80`
... but with most CNI plugins, this [doesn't work or requires additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
- We could use a `NodePort` service
... but that requires [changing the `--service-node-port-range` flag in the API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/)
- We could create a service with an external IP
... this would work, but would require a few extra steps
(figuring out the IP address and adding it to the service)
---
## Running Traefik
- The [Traefik documentation][traefikdoc] recommends to use a Helm chart
@@ -304,8 +270,6 @@ class: extra-details
---
class: extra-details
## Taints and tolerations
- A *taint* is an attribute added to a node
@@ -532,6 +496,10 @@ This is normal: we haven't provided any ingress rule yet.
## Creating ingress resources
- Before Kubernetes 1.19, we must use YAML manifests
(see example on next slide)
- Since Kubernetes 1.19, we can use `kubectl create ingress`
```bash
@@ -566,21 +534,7 @@ This is normal: we haven't provided any ingress rule yet.
---
## Before Kubernetes 1.19
- Before Kubernetes 1.19:
- `kubectl create ingress` wasn't available
- `apiVersion: networking.k8s.io/v1` wasn't supported
- It was necessary to use YAML, and `apiVersion: networking.k8s.io/v1beta1`
(see example on next slide)
---
## YAML for old ingress resources
## Ingress resources in YAML
Here is a minimal host-based ingress resource:
@@ -601,15 +555,23 @@ spec:
```
(It is in `k8s/ingress.yaml`.)
---
## YAML for new ingress resources
class: extra-details
## Ingress API version
- The YAML on the previous slide uses `apiVersion: networking.k8s.io/v1beta1`
- Starting with Kubernetes 1.19, `networking.k8s.io/v1` is available
- And we can use `kubectl create ingress` 🎉
- However, with Kubernetes 1.19 (and later), we can use `kubectl create ingress`
- We can see "modern" YAML with `-o yaml --dry-run=client`:
- We chose to keep an "old" (deprecated!) YAML example for folks still using older versions of Kubernetes
- If we want to see "modern" YAML, we can use `-o yaml --dry-run=client`:
```bash
kubectl create ingress red -o yaml --dry-run=client \
@@ -679,6 +641,157 @@ class: extra-details
- It is still in alpha stage
---
## Vendor-specific example
- Let's see how to implement *canary releases*
- The example here will use Traefik v1
(which is obsolete)
- It won't work on your Kubernetes cluster!
(unless you're running an oooooold version of Kubernetes)
(and an equally oooooooold version of Traefik)
- We've left it here just as an example!
---
## Canary releases
- A *canary release* (or canary launch or canary deployment) is a release that will process only a small fraction of the workload
- After deploying the canary, we compare its metrics to the normal release
- If the metrics look good, the canary will progressively receive more traffic
(until it gets 100% and becomes the new normal release)
- If the metrics aren't good, the canary is automatically removed
- When we deploy a bad release, only a tiny fraction of traffic is affected
---
## Various ways to implement canary
- Example 1: canary for a microservice
- 1% of all requests (sampled randomly) are sent to the canary
- the remaining 99% are sent to the normal release
- Example 2: canary for a web app
- 1% of users are sent to the canary web site
- the remaining 99% are sent to the normal release
- Example 3: canary for shipping physical goods
- 1% of orders are shipped with the canary process
- the remaining 99% are shipped with the normal process
- We're going to implement example 1 (per-request routing)
---
## Canary releases with Traefik v1
- We need to deploy the canary and expose it with a separate service
- Then, in the Ingress resource, we need:
- multiple `paths` entries (one for each service, canary and normal)
- an extra annotation indicating the weight of each service
- If we want, we can send requests to more than 2 services
---
## The Ingress resource
.small[
```yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: rgb
annotations:
traefik.ingress.kubernetes.io/service-weights: |
red: 50%
green: 25%
blue: 25%
spec:
rules:
- host: rgb.`A.B.C.D`.nip.io
http:
paths:
- path: /
backend:
serviceName: red
servicePort: 80
- path: /
backend:
serviceName: green
servicePort: 80
- path: /
backend:
serviceName: blue
servicePort: 80
```
]
---
class: extra-details
## Other ingress controllers
*Just to illustrate how different things are ...*
- With the NGINX ingress controller:
- define two ingress ressources
<br/>
(specifying rules with the same host+path)
- add `nginx.ingress.kubernetes.io/canary` annotations on each
- With Linkerd2:
- define two services
- define an extra service for the weighted aggregate of the two
- define a TrafficSplit (this is a CRD introduced by the SMI spec)
---
class: extra-details
## We need more than that
What we saw is just one of the multiple building blocks that we need to achieve a canary release.
We also need:
- metrics (latency, performance ...) for our releases
- automation to alter canary weights
(increase canary weight if metrics look good; decrease otherwise)
- a mechanism to manage the lifecycle of the canary releases
(create them, promote them, delete them ...)
For inspiration, check [flagger by Weave](https://github.com/weaveworks/flagger).
???
:EN:- The Ingress resource

View File

@@ -460,9 +460,9 @@ class: extra-details
(i.e. node regularly pinging the control plane to say "I'm alive!")
- For more details, see [Efficient Node Heartbeats KEP] or the [node controller documentation]
- For more details, see [KEP-0009] or the [node controller documentation]
[Efficient Node Heartbeats KEP]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/589-efficient-node-heartbeats/README.md
[KEP-0009]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0009-node-heartbeat.md
[node controller documentation]: https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller
---

View File

@@ -554,28 +554,6 @@ Note: the `apiVersion` field appears to be optional.
---
class: extra-details
## Managing `ownerReferences`
- By default, the generated object and triggering object have independent lifecycles
(deleting the triggering object doesn't affect the generated object)
- It is possible to associate the generated object with the triggering object
(so that deleting the triggering object also deletes the generated object)
- This is done by adding the triggering object information to `ownerReferences`
(in the generated object `metadata`)
- See [Linking resources with ownerReferences][ownerref] for an example
[ownerref]: https://kyverno.io/docs/writing-policies/generate/#linking-resources-with-ownerreferences
---
## Asynchronous creation
- Kyverno creates resources asynchronously

View File

@@ -99,9 +99,9 @@ Pros:
- That Pod will fetch metrics from all our Nodes
- It will expose them through the Kubernetes API aggregation layer
- It will expose them through the Kubernetes API agregation layer
(we won't say much more about that aggregation layer; that's fairly advanced stuff!)
(we won't say much more about that agregation layer; that's fairly advanced stuff!)
---
@@ -128,7 +128,7 @@ Pros:
- `apiService.create=true`
register `metrics-server` with the Kubernetes aggregation layer
register `metrics-server` with the Kubernetes agregation layer
(create an entry that will show up in `kubectl get apiservices`)
@@ -192,7 +192,7 @@ Pros:
- kube-resource-report can generate HTML reports
(https://codeberg.org/hjacobs/kube-resource-report)
(https://github.com/hjacobs/kube-resource-report)
???

View File

@@ -190,25 +190,19 @@ EOF
---
## `WaitForFirstConsumer`
## Making sure that a PV was created for our PVC
- Did OpenEBS create a PV for our PVC?
- Normally, the `openebs-hostpath` StorageClass created a PV for our PVC
.lab[
- Find out:
- Look at the PV and PVC:
```bash
kubectl get pv,pvc
```
]
--
- No!
- This is because that class is `WaitForFirstConsumer` instead of `Immediate`
---
## Create a Pod to consume the PV
@@ -237,21 +231,6 @@ EOF
---
## Making sure that a PV was created for our PVC
- At that point, the `openebs-hostpath` StorageClass created a PV for our PVC
.lab[
- Look at the PV and PVC:
```bash
kubectl get pv,pvc
```
]
---
## Verify that data is written on the node
- Let's find the file written by the Pod on the node where the Pod is running
@@ -356,4 +335,4 @@ EOF
:EN:- Deploying stateful apps with OpenEBS
:FR:- Comprendre le "Container Attached Storage" (CAS)
:FR:- Déployer une application "stateful" avec OpenEBS
:FR:- Déployer une application "stateful" avec OpenEBS

View File

@@ -127,7 +127,7 @@ class: extra-details
- either directly
- or by extending the API server
<br/>(for instance by using the aggregation layer, like [metrics server](https://github.com/kubernetes-incubator/metrics-server) does)
<br/>(for instance by using the agregation layer, like [metrics server](https://github.com/kubernetes-incubator/metrics-server) does)
---

View File

@@ -151,8 +151,3 @@ on my needs) to be deployed into its specific Kubernetes Namespace.*
- Improvement idea: this operator could generate *events*
(visible with `kubectl get events` and `kubectl describe`)
???
:EN:- How to write a simple operator with shell scripts
:FR:- Comment écrire un opérateur simple en shell script

View File

@@ -1,58 +1,19 @@
# Operators
The Kubernetes documentation describes the [Operator pattern] as follows:
*Operators are software extensions to Kubernetes that make use of custom resources to manage applications and their components. Operators follow Kubernetes principles, notably the control loop.*
Another good definition from [CoreOS](https://coreos.com/blog/introducing-operators.html):
*An operator represents **human operational knowledge in software,**
<br/>
to reliably manage an application.*
to reliably manage an application.
— [CoreOS](https://coreos.com/blog/introducing-operators.html)*
There are many different use cases spanning different domains; but the general idea is:
Examples:
*Manage some resources (that reside inside our outside the cluster),
<br/>
using Kubernetes manifests and tooling.*
- Deploying and configuring replication with MySQL, PostgreSQL ...
[Operator pattern]: https://kubernetes.io/docs/concepts/extend-kubernetes/operator/
- Setting up Elasticsearch, Kafka, RabbitMQ, Zookeeper ...
---
- Reacting to failures when intervention is needed
## Some uses cases
- Managing external resources ([AWS], [GCP], [KubeVirt]...)
- Setting up database replication or distributed systems
<br/>
(Cassandra, Consul, CouchDB, ElasticSearch, etcd, Kafka, MongoDB, MySQL, PostgreSQL, RabbitMQ, Redis, ZooKeeper...)
- Running and configuring CI/CD
<br/>
([ArgoCD], [Flux]), backups ([Velero]), policies ([Gatekeeper], [Kyverno])...
- Automating management of certificates and secrets
<br/>
([cert-manager]), secrets ([External Secrets Operator], [Sealed Secrets]...)
- Configuration of cluster components ([Istio], [Prometheus])
- etc.
[ArgoCD]: https://github.com/argoproj/argo-cd
[AWS]: https://aws-controllers-k8s.github.io/community/docs/community/services/
[cert-manager]: https://cert-manager.io/
[External Secrets Operator]: https://external-secrets.io/
[Flux]: https://fluxcd.io/
[Gatekeeper]: https://open-policy-agent.github.io/gatekeeper/website/docs/
[GCP]: https://github.com/paulczar/gcp-cloud-compute-operator
[Istio]: https://istio.io/latest/docs/setup/install/operator/
[KubeVirt]: https://kubevirt.io/
[Kyverno]: https://kyverno.io/
[Prometheus]: https://prometheus-operator.dev/
[Sealed Secrets]: https://github.com/bitnami-labs/sealed-secrets
[Velero]: https://velero.io/
- Scaling up and down these systems
---
@@ -76,7 +37,7 @@ using Kubernetes manifests and tooling.*
---
## Operators for e.g. replicated databases
## Why use operators?
- Kubernetes gives us Deployments, StatefulSets, Services ...
@@ -98,6 +59,38 @@ using Kubernetes manifests and tooling.*
---
## Use-cases for operators
- Systems with primary/secondary replication
Examples: MariaDB, MySQL, PostgreSQL, Redis ...
- Systems where different groups of nodes have different roles
Examples: ElasticSearch, MongoDB ...
- Systems with complex dependencies (that are themselves managed with operators)
Examples: Flink or Kafka, which both depend on Zookeeper
---
## More use-cases
- Representing and managing external resources
(Example: [AWS S3 Operator](https://operatorhub.io/operator/awss3-operator-registry))
- Managing complex cluster add-ons
(Example: [Istio operator](https://operatorhub.io/operator/istio))
- Deploying and managing our applications' lifecycles
(more on that later)
---
## How operators work
- An operator creates one or more CRDs
@@ -112,6 +105,38 @@ using Kubernetes manifests and tooling.*
---
## Deploying our apps with operators
- It is very simple to deploy with `kubectl create deployment` / `kubectl expose`
- We can unlock more features by writing YAML and using `kubectl apply`
- Kustomize or Helm let us deploy in multiple environments
(and adjust/tweak parameters in each environment)
- We can also use an operator to deploy our application
---
## Pros and cons of deploying with operators
- The app definition and configuration is persisted in the Kubernetes API
- Multiple instances of the app can be manipulated with `kubectl get`
- We can add labels, annotations to the app instances
- Our controller can execute custom code for any lifecycle event
- However, we need to write this controller
- We need to be careful about changes
(what happens when the resource `spec` is updated?)
---
## Operators are not magic
- Look at this ElasticSearch resource definition:

View File

@@ -6,7 +6,7 @@
- Easier to use
(doesn't require complex interaction between policies and RBAC)
(doesn't require complex interaction bewteen policies and RBAC)
---
@@ -206,7 +206,7 @@ class: extra-details
- If new namespaces are created, they will get default permissions
- We can change that by using an *admission configuration*
- We can change that be using an *admission configuration*
- Step 1: write an "admission configuration file"
@@ -232,7 +232,7 @@ Let's use @@LINK[k8s/admission-configuration.yaml]:
- For convenience, let's copy it do `/etc/kubernetes/pki`
(it's definitely not where it *should* be, but that'll do!)
(it's definitely where it *should* be, but that'll do!)
.lab[

View File

@@ -40,9 +40,7 @@
- Each person gets their own private set of VMs
- The connection information is on a shared spreadsheet
(URL to be shared through portal and chat)
- Each person should have a printed card with connection information
- We will connect to these VMs with SSH

View File

@@ -14,20 +14,32 @@
- CPU is a *compressible resource*
- it can be preempted immediately without adverse effect
- if we have N CPU and need 2N, we run at 50% speed
(it can be preempted immediately without adverse effect)
- Memory is an *incompressible resource*
- it needs to be swapped out to be reclaimed; and this is costly
- if we have N GB RAM and need 2N, we might run at... 0.1% speed!
(it needs to be swapped out to be reclaimed; and this is costly)
- As a result, exceeding limits will have different consequences for CPU and memory
---
## Exceeding CPU limits
- CPU can be reclaimed instantaneously
(in fact, it is preempted hundreds of times per second, at each context switch)
- If a container uses too much CPU, it can be throttled
(it will be scheduled less often)
- The processes in that container will run slower
(or rather: they will not run faster)
---
class: extra-details
## CPU limits implementation details
@@ -134,59 +146,39 @@ For more details, check [this blog post](https://erickhun.com/posts/kubernetes-f
---
## Running low on memory
## Exceeding memory limits
- When the system runs low on memory, it starts to reclaim used memory
- Memory needs to be swapped out before being reclaimed
(we talk about "memory pressure")
- "Swapping" means writing memory pages to disk, which is very slow
- Option 1: free up some buffers and caches
- On a classic system, a process that swaps can get 1000x slower
(fastest option; might affect performance if cache memory runs very low)
(because disk I/O is 1000x slower than memory I/O)
- Option 2: swap, i.e. write to disk some memory of one process to give it to another
- Exceeding the memory limit (even by a small amount) can reduce performance *a lot*
(can have a huge negative impact on performance because disks are slow)
- Kubernetes *does not support swap* (more on that later!)
- Option 3: terminate a process and reclaim all its memory
(OOM or Out Of Memory Killer on Linux)
- Exceeding the memory limit will cause the container to be killed
---
## Memory limits on Kubernetes
## Limits vs requests
- Kubernetes *does not support swap*
(but it may support it in the future, thanks to [KEP 2400])
- If a container exceeds its memory *limit*, it gets killed immediately
- If a node is overcommitted and under memory pressure, it will terminate some pods
(see next slide for some details about what "overcommit" means here!)
[KEP 2400]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2400-node-swap/README.md#implementation-history
---
## Overcommitting resources
- *Limits* are "hard limits" (a container *cannot* exceed its limits)
- Limits are "hard limits" (they can't be exceeded)
- a container exceeding its memory limit is killed
- a container exceeding its CPU limit is throttled
- On a given node, the sum of pod *limits* can be higher than the node size
- Requests are used for scheduling purposes
- *Requests* are used for scheduling purposes
- a container using *less* than what it requested will never be killed or throttled
- a container can use more than its requested CPU or RAM amounts
- the scheduler uses the requested sizes to determine placement
- a container using *less* than what it requested should never be killed or throttled
- On a given node, the sum of pod *requests* cannot be higher than the node size
- the resources requested by all pods on a node will never exceed the node size
---
@@ -230,31 +222,9 @@ Each pod is assigned a QoS class (visible in `status.qosClass`).
---
class: extra-details
## Where is my swap?
## CPU and RAM reservation
- Kubernetes passes resources requests and limits to the container engine
- The container engine applies these requests and limits with specific mechanisms
- Example: on Linux, this is typically done with control groups aka cgroups
- Most systems use cgroups v1, but cgroups v2 are slowly being rolled out
(e.g. available in Ubuntu 22.04 LTS)
- Cgroups v2 have new, interesting features for memory control:
- ability to set "minimum" memory amounts (to effectively reserve memory)
- better control on the amount of swap used by a container
---
class: extra-details
## What's the deal with swap?
- The semantics of memory and swap limits on Linux cgroups are complex
- With cgroups v1, it's not possible to disable swap for a cgroup
@@ -268,8 +238,6 @@ class: extra-details
- The simplest solution was to disable swap entirely
- Kubelet will refuse to start if it detects that swap is enabled!
---
## Alternative point of view
@@ -300,7 +268,7 @@ class: extra-details
- You will need to add the flag `--fail-swap-on=false` to kubelet
(remember: it won't otherwise start if it detects that swap is enabled)
(otherwise, it won't start!)
---
@@ -698,18 +666,6 @@ class: extra-details
---
## Underutilization
- Remember: when assigning a pod to a node, the scheduler looks at *requests*
(not at current utilization on the node)
- If pods request resources but don't use them, this can lead to underutilization
(because the scheduler will consider that the node is full and can't fit new pods)
---
## Viewing a namespace limits and quotas
- `kubectl describe namespace` will display resource limits and quotas
@@ -741,13 +697,13 @@ class: extra-details
- gives PromQL expressions to compute good values
<br/>(our app needs to be running for a while)
- [Kube Resource Report](https://codeberg.org/hjacobs/kube-resource-report)
- [Kube Resource Report](https://github.com/hjacobs/kube-resource-report/)
- generates web reports on resource usage
- [nsinjector](https://github.com/blakelead/nsinjector)
- controller to automatically populate a Namespace when it is created
- [static demo](https://hjacobs.github.io/kube-resource-report/sample-report/output/index.html)
|
[live demo](https://kube-resource-report.demo.j-serv.de/applications.html)
???

View File

@@ -1,18 +1,14 @@
# Rolling updates
- How should we update a running application?
- By default (without rolling updates), when a scaled resource is updated:
- Strategy 1: delete old version, then deploy new version
- new pods are created
(not great, because it obviously provokes downtime!)
- old pods are terminated
- Strategy 2: deploy new version, then delete old version
- ... all at the same time
(uses a lot of resources; also how do we shift traffic?)
- Strategy 3: replace running pods one at a time
(sounds interesting; and good news, Kubernetes does it for us!)
- if something goes wrong, ¯\\\_(ツ)\_/¯
---

View File

@@ -54,7 +54,9 @@
- The official installation is done through a single YAML file
- There is also a Helm chart if you prefer that (see next slide!)
- There is also a Helm chart if you prefer that
(if you're using Kubernetes 1.22+, see next slide!)
<!-- #VERSION# -->
@@ -64,7 +66,7 @@
.small[
```bash
kubectl apply -f \
https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.17.5/controller.yaml
https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.16.0/controller.yaml
```
]
@@ -78,9 +80,15 @@ If you change that, you will also need to inform `kubeseal` later on.
class: extra-details
## Installing with Helm
## Sealed Secrets on Kubernetes 1.22
- The Sealed Secrets controller can be installed like this:
- As of version 0.16, Sealed Secrets manifests uses RBAC v1beta1
- RBAC v1beta1 isn't supported anymore in Kubernetes 1.22
- Sealed Secerets Helm chart provides manifests using RBAC v1
- Conclusion: to install Sealed Secrets on Kubernetes 1.22, use the Helm chart:
```bash
helm install --repo https://bitnami-labs.github.io/sealed-secrets/ \
@@ -328,4 +336,4 @@ class: extra-details
???
:EN:- The Sealed Secrets Operator
:FR:- L'opérateur *Sealed Secrets*
:FR:- L'opérateur *Sealed Secrets*

View File

@@ -72,7 +72,7 @@
## Accessing private repositories
- Let's see how to access an image on a private registry!
- Let's see how to access an image on private registry!
- These images are protected by a username + password
@@ -243,7 +243,7 @@ class: extra-details
## Encryption at rest
- It is possible to [encrypt secrets at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/)
- It is possible to [encrypted secrets at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/)
- This means that secrets will be safe if someone ...

View File

@@ -20,17 +20,13 @@
## Docker Desktop
- Available on Linux, Mac, and Windows
- Free for personal use and small businesses
(less than 250 employees and less than $10 millions in annual revenue)
- Available on Mac and Windows
- Gives you one cluster with one node
- Streamlined installation and user experience
- Very easy to use if you are already using Docker Desktop:
- Great integration with various network stacks and e.g. corporate VPNs
go to Docker Desktop preferences and enable Kubernetes
- Ideal for Docker users who need good integration between both platforms
@@ -44,11 +40,13 @@
- Runs Kubernetes nodes in Docker containers
- Can deploy multiple clusters, with multiple nodes
- Can deploy multiple clusters, with multiple nodes, and multiple master nodes
- Runs the control plane on Kubernetes nodes
- As of June 2020, two versions co-exist: stable (1.7) and beta (3.0)
- Control plane can also run on multiple nodes
- They have different syntax and options, this can be confusing
(but don't let that stop you!)
---
@@ -86,7 +84,7 @@
- More advanced scenarios require writing a short [config file](https://kind.sigs.k8s.io/docs/user/quick-start#configuring-your-kind-cluster)
(to define multiple nodes, multiple control plane nodes, set Kubernetes versions ...)
(to define multiple nodes, multiple master nodes, set Kubernetes versions ...)
- Can deploy multiple clusters
@@ -126,9 +124,7 @@
## [Rancher Desktop](https://rancherdesktop.io/)
- Available on Linux, Mac, and Windows
- Free and open-source
- Available on Mac and Windows
- Runs a single cluster with a single node
@@ -138,7 +134,7 @@
- Emphasis on ease of use (like Docker Desktop)
- Relatively young product (first release in May 2021)
- Very young product (first release in May 2021)
- Based on k3s and other proven components

View File

@@ -210,54 +210,25 @@ Ah, right ...
## Running Tilt on a remote machine
- If Tilt runs remotely, we can't access `http://localhost:10350`
- If Tilt runs remotely, we can't access http://localhost:10350
- We'll need to tell Tilt to listen to `0.0.0.0`
- Our Tiltfile includes an ngrok tunnel, let's use that
(instead of just `localhost`)
- If we run Tilt in a Pod, we need to expose port 10350 somehow
(and Tilt needs to listen on `0.0.0.0`, too)
---
## Telling Tilt to listen in `0.0.0.0`
- This can be done with the `--host` flag:
- Start Tilt:
```bash
tilt --host=0.0.0.0
```
- Or by setting the `TILT_HOST` environment variable:
```bash
export TILT_HOST=0.0.0.0
tilt up
```
---
- The ngrok URL should appear in the Tilt output
## Running Tilt in a Pod
(something like `https://xxxx-aa-bb-cc-dd.ngrok.io/`)
If you use `shpod`, you can use the following command:
- Open that URL in your browser
```bash
kubectl patch service shpod --namespace shpod -p "
spec:
ports:
- name: tilt
port: 10350
targetPort: 10350
nodePort: 30150
protocol: TCP
"
```
Then connect to port 30150 on any of your nodes.
If you use something else than `shpod`, adapt these instructions!
*Note: it's also possible to run `tilt up --host=0.0.0.0`.*
---
class: extra-details
## Kubernetes contexts

View File

@@ -1,635 +0,0 @@
# YTT
- YAML Templating Tool
- Part of [Carvel]
(a set of tools for Kubernetes application building, configuration, and deployment)
- Can be used for any YAML
(Kubernetes, Compose, CI pipelines...)
[Carvel]: https://carvel.dev/
---
## Features
- Manipulate data structures, not text (≠ Helm)
- Deterministic, hermetic execution
- Define variables, blocks, functions
- Write code in Starlark (dialect of Python)
- Define and override values (Helm-style)
- Patch resources arbitrarily (Kustomize-style)
---
## Getting started
- Install `ytt` ([binary download][download])
- Start with one (or multiple) Kubernetes YAML files
*(without comments; no `#` allowed at this point!)*
- `ytt -f one.yaml -f two.yaml | kubectl apply -f-`
- `ytt -f. | kubectl apply -f-`
[download]: https://github.com/vmware-tanzu/carvel-ytt/releases/latest
---
## No comments?!?
- Replace `#` with `#!`
- `#@` is used by ytt
- It's a kind of template tag, for instance:
```yaml
#! This is a comment
#@ a = 42
#@ b = "*"
a: #@ a
b: #@ b
operation: multiply
result: #@ a*b
```
- `#@` at the beginning of a line = instruction
- `#@` somewhere else = value
---
## Building strings
- Concatenation:
```yaml
#@ repository = "dockercoins"
#@ tag = "v0.1"
containers:
- name: worker
image: #@ repository + "/worker:" + tag
```
- Formatting:
```yaml
#@ repository = "dockercoins"
#@ tag = "v0.1"
containers:
- name: worker
image: #@ "{}/worker:{}".format(repository, tag)
```
---
## Defining functions
- Reusable functions can be written in Starlark (=Python)
- Blocks (`def`, `if`, `for`...) must be terminated with `#@ end`
- Example:
```yaml
#@ def image(component, repository="dockercoins", tag="v0.1"):
#@ return "{}/{}:{}".format(repository, component, tag)
#@ end
containers:
- name: worker
image: #@ image("worker")
- name: hasher
image: #@ image("hasher")
```
---
## Structured data
- Functions can return complex types
- Example: defining a common set of labels
```yaml
#@ name = "worker"
#@ def labels(component):
#@ return {
#@ "app": component,
#@ "container.training/generated-by": "ytt",
#@ }
#@ end
kind: Pod
apiVersion: v1
metadata:
name: #@ name
labels: #@ labels(name)
```
---
## YAML functions
- Function body can also be straight YAML:
```yaml
#@ name = "worker"
#@ def labels(component):
app: #@ component
container.training/generated-by: ytt
#@ end
kind: Pod
apiVersion: v1
metadata:
name: #@ name
labels: #@ labels(name)
```
- The return type of the function is then a [YAML fragment][fragment]
[fragment]: https://carvel.dev/ytt/docs/v0.41.0/
---
## More YAML functions
- We can load library functions:
```yaml
#@ load("@ytt:sha256", "sha256")
```
- This is (sort of) equivalent fo `from ytt.sha256 import sha256`
- Functions can contain a mix of code and YAML fragment:
```yaml
#@ load("@ytt:sha256", "sha256")
#@ def annotations():
#@ author = "Jérôme Petazzoni"
author: #@ author
author_hash: #@ sha256.sum(author)[:8]
#@ end
annotations: #@ annotations()
```
---
## Data values
- We can define a *schema* in a separate file:
```yaml
#@data/values-schema
--- #! there must be a "---" here!
repository: dockercoins
tag: v0.1
```
- This defines the data values (=customizable parameters),
as well as their *types* and *default values*
- Technically, `#@data/values-schema` is an annotation,
and it applies to a YAML document; so the following
element must be a YAML document
- This is conceptually similar to Helm's *values* file
<br/>
(but with type enforcement as a bonus)
---
## Using data values
- Requires loading `@ytt:data`
- Values are then available in `data.values`
- Example:
```yaml
#@ load("@ytt:data", "data")
#@ def image(component):
#@ return "{}/{}:{}".format(data.values.repository, component, data.values.tag)
#@ end
#@ name = "worker"
containers:
- name: #@ name
image: #@ image(name)
```
---
## Overriding data values
- There are many ways to set and override data values:
- plain YAML files
- data value overlays
- environment variables
- command-line flags
- Precedence of the different methods is defined in the [docs]
[docs]: https://carvel.dev/ytt/docs/v0.41.0/ytt-data-values/#data-values-merge-order
---
## Values in plain YAML files
- Content of `values.yaml`:
```yaml
tag: latest
```
- Values get merged with `--data-values-file`:
```bash
ytt -f config/ --data-values-file values.yaml
```
- Multiple files can be specified
- These files can also be URLs!
---
## Data value overlay
- Content of `values.yaml`:
```yaml
#@data/values
--- #! must have --- here
tag: latest
```
- Values get merged by being specified like "normal" files:
```bash
ytt -f config/ -f values.yaml
```
- Multiple files can be specified
---
## Set a value with a flag
- Set a string value:
```bash
ytt -f config/ --data-value tag=latest
```
- Set a YAML value (useful to parse it as e.g. integer, boolean...):
```bash
ytt -f config/ --data-value-yaml replicas=10
```
- Read a string value from a file:
```bash
ytt -f config/ --data-value-file ca_cert=cert.pem
```
---
## Set values from environment variables
- Set environment variables with a prefix:
```bash
export VAL_tag=latest
export VAL_repository=ghcr.io/dockercoins
```
- Use the variables as strings:
```bash
ytt -f config/ --data-values-env VAL
```
- Or parse them as YAML:
```bash
ytt -f config/ --data-values-env-yaml VAL
```
---
## Lines starting with `#@`
- This generates an empty document:
```yaml
#@ def hello():
hello: world
#@ end
#@ hello()
```
- Do this instead:
```yaml
#@ def hello():
hello: world
#@ end
--- #@ hello()
```
---
## Generating multiple documents, take 1
- This won't work:
```yaml
#@ def app():
kind: Deployment
apiVersion: apps/v1
--- #! separate from next document
kind: Service
apiVersion: v1
#@ end
--- #@ app()
```
---
## Generating multiple documents, take 2
- This won't work either:
```yaml
#@ def app():
--- #! the initial separator indicates "this is a Document Set"
kind: Deployment
apiVersion: apps/v1
--- #! separate from next document
kind: Service
apiVersion: v1
#@ end
--- #@ app()
```
---
## Generating multiple documents, take 3
- We must use the `template` module:
```yaml
#@ load("@ytt:template", "template")
#@ def app():
--- #! the initial separator indicates "this is a Document Set"
kind: Deployment
apiVersion: apps/v1
--- #! separate from next document
kind: Service
apiVersion: v1
#@ end
--- #@ template.replace(app())
```
- `template.replace(...)` is the only way (?) to replace one element with many
---
## Libraries
- A reusable ytt configuration can be transformed into a library
- Put it in a subdirectory named `_ytt_lib/whatever`, then:
```yaml
#@ load("@ytt:library", "library")
#@ load("@ytt:template", "template")
#@ whatever = library.get("whatever")
#@ my_values = {"tag": "latest", "registry": "..."}
#@ output = whatever.with_data_values(my_values).eval()
--- #@ template.replace(output)
```
- The `with_data_values()` step is optional, but useful to "configure" the library
- Note the whole combo:
```yaml
template.replace(library.get("...").with_data_values(...).eval())
```
---
## Overlays
- Powerful, but complex, but powerful! 💥
- Define transformations that are applied after generating the whole document set
- General idea:
- select YAML nodes to be transformed with an `#@overlay/match` decorator
- write a YAML snippet with the modifications to be applied
<br/>
(a bit like a strategic merge patch)
---
## Example
```yaml
#@ load("@ytt:overlay", "overlay")
#@ selector = {"kind": "Deployment", "metadata": {"name": "worker"}}
#@overlay/match by=overlay.subset(selector)
---
spec:
replicas: 10
```
- By default, `#@overlay/match` must find *exactly* one match
(that can be changed by specifying `expects=...`, `missing_ok=True`... see [docs])
- By default, the specified fields (here, `spec.replicas`) must exist
(that can also be changed by annotating the optional fields)
[docs]: https://carvel.dev/ytt/docs/v0.41.0/lang-ref-ytt-overlay/#overlaymatch
---
## Matching using a YAML document
```yaml
#@ load("@ytt:overlay", "overlay")
#@ def match():
kind: Deployment
metadata:
name: worker
#@ end
#@overlay/match by=overlay.subset(match())
---
spec:
replicas: 10
```
- This is equivalent to the subset match of the previous slide
- It will find YAML nodes having all the listed fields
---
## Removing a field
```yaml
#@ load("@ytt:overlay", "overlay")
#@ def match():
kind: Deployment
metadata:
name: worker
#@ end
#@overlay/match by=overlay.subset(match())
---
spec:
#@overlay/remove
replicas:
```
- This would remove the `replicas:` field from a specific Deployment spec
- This could be used e.g. when enabling autoscaling
---
## Selecting multiple nodes
```yaml
#@ load("@ytt:overlay", "overlay")
#@ def match():
kind: Deployment
#@ end
#@overlay/match by=overlay.subset(match()), expects="1+"
---
spec:
#@overlay/remove
replicas:
```
- This would match all Deployments
<br/>
(assuming that *at least one* exists)
- It would remove the `replicas:` field from their spec
<br/>
(the field must exist!)
---
## Adding a field
```yaml
#@ load("@ytt:overlay", "overlay")
#@overlay/match by=overlay.all, expects="1+"
---
metadata:
#@overlay/match missing_ok=True
annotations:
#@overlay/match expects=0
rainbow: 🌈
```
- `#@overlay/match missing_ok=True`
<br/>
*will match whether our resources already have annotations or not*
- `#@overlay/match expects=0`
<br/>
*will only match if the `rainbow` annotation doesn't exist*
<br/>
*(to make sure that we don't override/replace an existing annotation)*
---
## Overlays vs data values
- The documentation has a [detailed discussion][docs] about this question
- In short:
- values = for parameters that are exposed to the user
- overlays = for arbitrary extra modifications
- Values are easier to use (use them when possible!)
- Fallback to overlays when values don't expose what you need
(keeping in mind that overlays are harder to write/understand/maintain)
[docs]: https://carvel.dev/ytt/docs/v0.41.0/data-values-vs-overlays/
---
## Gotchas
- Reminder: put your `#@` at the right place!
```yaml
#! This will generate "hello, world!"
--- #@ "{}, {}!".format("hello", "world")
```
```yaml
#! But this will generate an empty document
---
#@ "{}, {}!".format("hello", "world")
```
- Also, don't use YAML anchors (`*foo` and `&foo`)
- They don't mix well with ytt
- Remember to use `template.render(...)` when generating multiple nodes
(or to update lists or arrays without replacing them entirely)
---
## Next steps with ytt
- Read this documentation page about [injecting secrets][secrets]
- Check the [FAQ], it gives some insights about what's possible with ytt
- Exercise idea: write an overlay that will find all ConfigMaps mounted in Pods...
...and annotate the Pod with a hash of the ConfigMap
[FAQ]: https://carvel.dev/ytt/docs/v0.41.0/faq/
[secrets]: https://carvel.dev/ytt/docs/v0.41.0/injecting-secrets/
???
:EN:- YTT
:FR:- YTT

62
slides/kadm-fullday.yml Normal file
View File

@@ -0,0 +1,62 @@
title: |
Kubernetes
for Admins and Ops
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
- static-pods-exercise
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
- k8s/prereqs-admin.md
- k8s/architecture.md
#- k8s/internal-apis.md
- k8s/deploymentslideshow.md
- k8s/dmuc.md
-
- k8s/multinode.md
- k8s/cni.md
- k8s/cni-internals.md
- k8s/interco.md
-
- k8s/apilb.md
#- k8s/setup-overview.md
#- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
- k8s/cluster-upgrade.md
- k8s/cluster-backup.md
- k8s/staticpods.md
-
#- k8s/cloud-controller-manager.md
#- k8s/bootstrap.md
- k8s/control-plane-auth.md
- k8s/pod-security-intro.md
- k8s/pod-security-policies.md
- k8s/pod-security-admission.md
- k8s/user-cert.md
- k8s/csr-api.md
- k8s/openid-connect.md
-
#- k8s/lastwords-admin.md
- k8s/links.md
- shared/thankyou.md

Some files were not shown because too many files have changed in this diff Show More