Compare commits

..

14 Commits

Author SHA1 Message Date
Gerry S
5af30c64bb Friday 2022-09-23 08:06:14 -04:00
Gerry S
75c5964c30 Thursday 2022-09-22 07:53:28 -04:00
Gerry S
b112c1fae6 Wednesday 2022-09-21 10:23:23 -04:00
Gerry S
b4d837bbf5 Tuesday 2022-09-20 07:57:41 -04:00
Gerry S
dda21fee01 Day 1 2022-09-19 08:10:59 -04:00
Gerry S
da2806ea93 Day 1 2022-09-19 08:04:46 -04:00
Gerry S
d983592ddc Post-Friday 2022-09-18 13:43:09 -04:00
Gerry S
d759703f9a Friday/Day 5 2022-08-26 11:05:06 -04:00
Gerry S
ffbecd9e04 Thurs Day-4 2022-08-24 15:40:52 -04:00
Gerry S
6a235fae44 Wednesday/Day-3 2022-08-24 11:05:11 -04:00
Gerry S
d83a6232c4 Day 2 TOC 2022-08-23 10:50:36 -04:00
Jérôme Petazzoni
7b7c755b95 Bump up runtime version to fix Netlify deployment 2022-08-22 17:22:18 +02:00
Gerry S
6d0849eebb New Relic Gerry version (day 1) 2022-08-22 10:28:39 -04:00
Jérôme Petazzoni
b46dcd5157 🧭 New Relic August 2022 content (for Gerry) 2022-08-03 15:10:25 +02:00
103 changed files with 26052 additions and 1123 deletions

22
k8s/affinity-pod.yaml Normal file
View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
name: aff-pod
spec:
terminationGracePeriodSeconds: 30
affinity:
containers:
- name: aff-pod
image: alpine
command:
- sleep
args:
- "1000"
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cow
operator: In
values:
- elsie

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,8 +253,8 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
@@ -262,7 +262,7 @@ spec:
- --sidecar-host=http://127.0.0.1:8000
- --enable-skip-login
- --enable-insecure-login
image: kubernetesui/dashboard:v2.6.1
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -293,7 +293,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,15 +253,15 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.6.1
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -292,7 +292,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,15 +253,15 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.6.1
helm.sh/chart: kubernetes-dashboard-5.10.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.6.1
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -292,7 +292,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

22
k8s/init-container.yaml Normal file
View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
name: initty
spec:
volumes:
- name: preFetched
emptyDir: {}
containers:
- name: main
image: main
volumeMounts:
- name: preFetched
mountPath: /usr/share/nginx/html/
initContainers:
- name: git-cloner
image: alpine
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /preFetched" ]
volumeMounts:
- name: preFetched
mountPath: /preFetched/

View File

@@ -0,0 +1,18 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURKekNDQWcrZ0F3SUJBZ0lDQm5Vd0RRWUpLb1pJaHZjTkFRRUxCUUF3TXpFVk1CTUdBMVVFQ2hNTVJHbG4KYVhSaGJFOWpaV0Z1TVJvd0dBWURWUVFERXhGck9ITmhZWE1nUTJ4MWMzUmxjaUJEUVRBZUZ3MHlNakE1TVRneQpNekV6TWpGYUZ3MDBNakE1TVRneU16RXpNakZhTURNeEZUQVRCZ05WQkFvVERFUnBaMmwwWVd4UFkyVmhiakVhCk1CZ0dBMVVFQXhNUmF6aHpZV0Z6SUVOc2RYTjBaWElnUTBFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUIKRHdBd2dnRUtBb0lCQVFEYnVlN1MzRS9hdFpvQVJjSUllRFJNMG5vMThvaDNEL3cyV3VWQmNaQWppZXhmNGw4VQpldEZlWDBWQmZFZGJqUndIWTYva2VHdHVzS0dXUzNZdUN5RHd3WFNhMEV5NS9LM0ZLUHhEUkdyUWJSNXJkUWg5CmI4NW1IbXVIcUYvQXJHMWJVV2JYQmFRVVhBdXNtMVpjMnNtOXdWQm0vRlRJSTJDdEpReTViVXVIQnY3N01BNHEKUzV3b1liMXkwUHo0OXNuVldiY3BXZ1FxR080SE9JelFJc2crakxYR0lhWi96L0lneHR2M0ZYaVJVUlVIZWhERwplTTVuRDErY1JuUkorcDlLQU9VMUdOZzQwVENoN3hjaGo3UHNJMDV1Q0xVQWFhYVJ4M0pVRFBpRXgxWjVjOHQwCll6aTBXTVVTUVpkTjlUc3UrNGZZaXAyTFpkZGxXOW1ma0NYREFnTUJBQUdqUlRCRE1BNEdBMVVkRHdFQi93UUUKQXdJQmhqQVNCZ05WSFJNQkFmOEVDREFHQVFIL0FnRUFNQjBHQTFVZERnUVdCQlNpcEo3SHZQTkRZMWcrcDNEdwp0TUEvNThmUmFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFuYkNYSHUvM3YrbXRlU3N4TXFxUndJd1c0T015CkdRdzE0aERtYkFRcmovYVo0WkFvZUJIdFJSMGYxTFFXQnVIQTBtTFJvSTFSenpBQWw3V2lNMDd6VU1ETlV2enUKR0FCVmtwOEV6b2RneTlNclFkN2VtZkNJRFA3SkhZV1FzL1VxcGVVZW4zcHljQ3dXZFFXY3ZDR0FtTEZZSzI3TApKcnFKV1JXNGErWTVDUkhqVytzTGJpeTNNMTdrOHVWM1pzMktNS0FUaVNXWUZTUzUrSkg5Tk5WdXNKd1lUZVZPCmJOZG5PbS9ub1NLejYrbHUvUm1NK0NsUFdXakdXcUlHdHZyNFl6b0puZk52UDNXL01FQXlzY3Zlck9jcXUxWTAKa1dmRkg2azVlY3NsK2k1RTFkaE02U0JRaFZzV1crMjFlN1plbVJwc1htNkNyYUZqek4vSFlaMEMzdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://8f36cb5d-e565-452a-a09c-81760683c1f9.k8s.ondigitalocean.com
name: do-sfo3-k8s-nr
contexts:
- context:
cluster: do-sfo3-k8s-nr
user: do-sfo3-k8s-nr-admin
name: do-sfo3-k8s-nr
current-context: do-sfo3-k8s-nr
kind: Config
preferences: {}
users:
- name: do-sfo3-k8s-nr-admin
user:
token: dop_v1_dc6f141491e1e3447a52ec192c3424c0481622f5430cf219fb38458280e1ff88

23
k8s/multiLine.yaml Normal file
View File

@@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
labels:
run: busybox
name: busybox
spec:
terminationGracePeriodSeconds: 0
containers:
- command:
- /bin/sh
- -c
- |
echo "running below scripts"
i=0;
while true;
do
echo "$i: $(date)";
i=$((i+1));
sleep 1;
done
name: busybox
image: busybox

22
k8s/multiLine2.yaml Normal file
View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
labels:
run: busybox
name: busybox
spec:
terminationGracePeriodSeconds: 0
containers:
- command: ["/bin/sh", "-c"]
args:
- |
echo "running below scripts"
i=0;
while true;
do
echo "$i: $(date)";
i=$((i+1));
sleep 1;
done
name: busybox
image: busybox

View File

@@ -3,11 +3,13 @@ kind: Pod
metadata:
name: nginx-with-volume
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
volumes:
- name: www
emptyDir: {}

View File

@@ -3,8 +3,9 @@ kind: Pod
metadata:
name: nginx-with-git
spec:
volumes:
- name: www
terminationGracePeriodSeconds: 0
restartPolicy: OnFailure
containers:
- name: nginx
image: nginx
@@ -17,5 +18,9 @@ spec:
volumeMounts:
- name: www
mountPath: /www/
restartPolicy: OnFailure
volumes:
- name: www
emptyDir: {}

View File

@@ -3,14 +3,8 @@ kind: Pod
metadata:
name: nginx-with-init
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
terminationGracePeriodSeconds: 0
initContainers:
- name: git
image: alpine
@@ -18,3 +12,15 @@ spec:
volumeMounts:
- name: www
mountPath: /www/
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
volumes:
- name: www
emptyDir: {}

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: Pod
metadata:
name: hostpath-nginx
spec:
terminationGracePeriodSeconds: 30
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
volumes:
- name: www
hostPath:
path: /home/k8s/myFiles
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: myData
operator: In
values:
- present

27
k8s/nginx-git.yaml Normal file
View File

@@ -0,0 +1,27 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-git
spec:
terminationGracePeriodSeconds: 0
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
- name: git
image: alpine
command:
- /bin/sh
- -c
- |
apk add git &&
git clone https://github.com/octocat/Spoon-Knife /www
volumeMounts:
- name: www
mountPath: /www/
volumes:
- name: www
emptyDir: {}

28
k8s/nginx-init.yaml Normal file
View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-git
spec:
terminationGracePeriodSeconds: 0
initContainers:
- name: git
image: alpine
command:
- /bin/sh
- -c
- |
apk add git &&
git clone https://github.com/octocat/Spoon-Knife /www
volumeMounts:
- name: www
mountPath: /www/
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
volumes:
- name: www
emptyDir: {}

8
k8s/nginx.yaml Normal file
View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Pod
metadata:
name: my-web
spec:
containers:
- name: nginx
image: nginx

19
k8s/ping.yaml Normal file
View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: ping
name: ping
spec:
terminationGracePeriodSeconds: 0
containers:
- command:
- ping
args:
- 127.0.0.1
image: alpine
name: ping
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}

18
k8s/sampleYaml.yaml Normal file
View File

@@ -0,0 +1,18 @@
name: gerry
citizenship: US
height-in-cm: 197
coder: true
friends:
- Moe
- Larry
- Curly
employees:
- name: Moe
position: dev
- name: Larry
position: ops
- name: Curly
position: devOps
poem: |
Mary had a little lamb
It was very cute

26
k8s/sampleYamlAsJson.json Normal file
View File

@@ -0,0 +1,26 @@
{
"name": "gerry",
"citizenship": "US",
"height-in-cm": 197,
"coder": true,
"friends": [
"Moe",
"Larry",
"Curly"
],
"employees": [
{
"name": "Moe",
"position": "dev"
},
{
"name": "Larry",
"position": "ops"
},
{
"name": "Curly",
"position": "devOps"
}
],
"poem": "Mary had a little lamb\nIt was very cute\n"
}

View File

@@ -2,3 +2,4 @@
base = "slides"
publish = "slides"
command = "./build.sh once"

View File

@@ -1,10 +1,11 @@
---
- hosts: nodes
become: yes
sudo: true
vars_files:
- vagrant.yml
tasks:
- name: clean up the home folder
file:
path: /home/vagrant/{{ item }}
@@ -23,23 +24,25 @@
- name: installing dependencies
apt:
name: apt-transport-https,ca-certificates,python3-pip,tmux
name: apt-transport-https,ca-certificates,python-pip,tmux
state: present
update_cache: true
- name: fetching docker repo key
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
keyserver: hkp://p80.pool.sks-keyservers.net:80
id: 58118E89F3A912897C070ADBF76221572C52609D
- name: adding docker repo
- name: adding package repos
apt_repository:
repo: deb https://download.docker.com/linux/ubuntu focal stable
repo: "{{ item }}"
state: present
with_items:
- deb https://apt.dockerproject.org/repo ubuntu-trusty main
- name: installing docker
apt:
name: docker-ce,docker-ce-cli,containerd.io,docker-compose-plugin
name: docker-engine
state: present
update_cache: true
@@ -53,7 +56,7 @@
lineinfile:
dest: /etc/default/docker
line: DOCKER_OPTS="--host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:55555"
regexp: "^#?DOCKER_OPTS=.*$"
regexp: '^#?DOCKER_OPTS=.*$'
state: present
register: docker_opts
@@ -63,14 +66,22 @@
state: restarted
when: docker_opts is defined and docker_opts.changed
- name: install docker-compose from official github repo
get_url:
url: https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64
dest: /usr/local/bin/docker-compose
mode: "u+x,g+x"
- name: performing pip autoupgrade
pip:
name: pip
state: latest
- name: installing virtualenv
pip:
name: virtualenv
state: latest
- name: Install Docker Compose via PIP
pip: name=docker-compose
- name:
file: path="/usr/local/bin/docker-compose"
file:
path="/usr/local/bin/docker-compose"
state=file
mode=0755
owner=vagrant
@@ -117,3 +128,5 @@
line: "127.0.0.1 localhost {{ inventory_hostname }}"
- regexp: '^127\.0\.1\.1'
line: "127.0.1.1 {{ inventory_hostname }}"

View File

@@ -1,12 +1,13 @@
---
vagrant:
default_box: ubuntu/focal64
default_box: ubuntu/trusty64
default_box_check_update: true
ssh_insert_key: false
min_memory: 256
min_cores: 1
instances:
- hostname: node1
private_ip: 10.10.10.10
memory: 1512
@@ -36,3 +37,6 @@ instances:
private_ip: 10.10.10.50
memory: 512
cores: 1

View File

@@ -17,7 +17,6 @@ These tools can help you to create VMs on:
- [Parallel SSH](https://github.com/lilydjwg/pssh)
(should be installable with `pip install git+https://github.com/lilydjwg/pssh`;
on a Mac, try `brew install pssh`)
- [yq](https://github.com/kislyuk/yq)
Depending on the infrastructure that you want to use, you also need to install
the CLI that is specific to that cloud. For OpenStack deployments, you will

View File

@@ -182,23 +182,9 @@ _cmd_clusterize() {
pssh "
if [ -f /etc/iptables/rules.v4 ]; then
sudo sed -i 's/-A INPUT -j REJECT --reject-with icmp-host-prohibited//' /etc/iptables/rules.v4
sudo netfilter-persistent flush
sudo netfilter-persistent start
fi"
# oracle-cloud-agent upgrades pacakges in the background.
# This breaks our deployment scripts, because when we invoke apt-get, it complains
# that the lock already exists (symptom: random "Exited with error code 100").
# Workaround: if we detect oracle-cloud-agent, remove it.
# But this agent seems to also take care of installing/upgrading
# the unified-monitoring-agent package, so when we stop the snap,
# it can leave dpkg in a broken state. We "fix" it with the 2nd command.
pssh "
if [ -d /snap/oracle-cloud-agent ]; then
sudo snap remove oracle-cloud-agent
sudo dpkg --remove --force-remove-reinstreq unified-monitoring-agent
fi"
# Copy settings and install Python YAML parser
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
pssh "
@@ -276,14 +262,13 @@ EOF
"
##VERSION## https://github.com/docker/compose/releases
COMPOSE_VERSION=v2.11.1
COMPOSE_PLATFORM='linux-$(uname -m)'
# Just in case you need Compose 1.X, you can use the following lines.
# (But it will probably only work for x86_64 machines.)
#COMPOSE_VERSION=1.29.2
#COMPOSE_PLATFORM='Linux-$(uname -m)'
if [ "$ARCHITECTURE" ]; then
COMPOSE_VERSION=v2.2.3
COMPOSE_PLATFORM='linux-$(uname -m)'
else
COMPOSE_VERSION=1.29.2
COMPOSE_PLATFORM='Linux-$(uname -m)'
fi
pssh "
set -e
### Install docker-compose.
@@ -493,13 +478,12 @@ _cmd_kubetools() {
# Install kube-ps1
pssh "
set -e
if ! [ -d /opt/kube-ps1 ]; then
if ! [ -f /etc/profile.d/kube-ps1.sh ]; then
cd /tmp
git clone https://github.com/jonmosco/kube-ps1
sudo mv kube-ps1 /opt/kube-ps1
sudo cp kube-ps1/kube-ps1.sh /etc/profile.d/kube-ps1.sh
sudo -u $USER_LOGIN sed -i s/docker-prompt/kube_ps1/ /home/$USER_LOGIN/.bashrc &&
sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc <<EOF
. /opt/kube-ps1/kube-ps1.sh
KUBE_PS1_PREFIX=""
KUBE_PS1_SUFFIX=""
KUBE_PS1_SYMBOL_ENABLE="false"

View File

@@ -2,7 +2,7 @@
#/ /kube-halfday.yml.html 200!
#/ /kube-fullday.yml.html 200!
#/ /kube-twodays.yml.html 200!
/ /kube-adv.yml.html 200!
/ /kube.yml.html 200!
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack

1
slides/b Executable file
View File

@@ -0,0 +1 @@
./markmaker.py kube.yml >out.html

View File

@@ -19,7 +19,7 @@ They abstract the connection details for this services, and can help with:
* fail over (how do I know to which instance of a replicated service I should connect?)
* load balancing (how do I spread my requests across multiple instances of a service?)
* load balancing (how to I spread my requests across multiple instances of a service?)
* authentication (what if my service requires credentials, certificates, or otherwise?)

View File

@@ -100,7 +100,11 @@ _We will give more details about namespaces and cgroups later._
* But it is easier to use `docker exec`.
```bash
$ docker exec -ti ticktock sh
$ docker ps -lq # Get Last Container ID
17e4e95e2702
$ docker exec 17
$ docker exec -ti $(docker ps -lq) sh # bash-fu version
```
* This creates a new process (running `sh`) _inside_ the container.

View File

@@ -0,0 +1,20 @@
class: title
# High Level Discussion
![image](images/title-understanding-docker-images.png)
---
## White Board Topics
* What is the real problem that containers solve?
* What are the inputs to a Unix Process?
* What is the init Process?
* Userland vs Kernel
* The Root File System
* What is an Overlay File System?
* Wrapping it all up to represent a container image
* Deploying Container images

View File

@@ -0,0 +1,318 @@
class: title
# A Macroscopic View
---
## Macroscopic Items
* The business case for containers
* The problem containers are solving
* What applications need
* What is the OS doing provides?
---
## What do CIOs worry about?
Who are the CIO's customers?
* Business Units: Need Computers to Run Applications
* Peak Capacity
* CFO: Demanding Budget Justifications
* Spend Less
---
## History of Solutions
For Each Business Application Buy a Machine
* Buy a machine for each application
* Big enough for Peak Load (CPU, Memory, Disk)
The Age of VMs
* Buy bigger machines and chop them up into logical machines
* Distribute your applications as VMs theses machines
* Observe what and when the application load actually is
* Possibly rebalance be to inform possibly moving
But Maintaining Machines (Bare Metal or VM) is hard (Patches, Packages, Drivers, etc)
---
## What Developers and Ops worry about
* Getting Software deployed
* Mysterious reasons why deployed application doesn't work
* Developer to Ops:
* "Hey it works on my development machine..."
* "I don't know why it isn't working for ***you***"
* "Everything ***looks*** the same"
* "I have no idea what could be different"
---
## The History of Software Deployment
Software Deployment is just a reproducible way to install files:
* Cards
* Tapes
* Floppy Disks
* Zip/Tar Files
* Installation "Files" (rpm/deb/msi)
* VM Images
---
## What is the Problem Containers are Solving?
It depends on who you are:
* For the CIO: Better resource utilization
* For Ops: Software Distribution
* For the Developer & Ops: Reproducible Environment
<BR><BR>
Ummm, but what exactly are containers....
* Wait a few more slides...
---
## Macroscopic view: Applications and the OS
Applications:
* What are the inputs/outputs to a program?
The OS:
* What does the OS provide?
---
## What are the inputs/outputs to a program?
Explicitly:
* Command Line Arguments
* Environment Variables
* Standard In
* Standard Out/Err
Implicitly (via the File System):
* Configuration Files
* Other Installed Applications
* Any other files
Also Implicitly
* Memory
* Network
---
## What does the OS provide?
* OS Kernel
* Kernel loded at boot time
* Sets up disk drives, network cards, other hardware, etc
* Manages all hardware, processes, memory, etc
* Kernel Space
* Low level innards of Kernel (fluid internal API)
* No direct access by applications of most Kernel functionality
* User Space (userland) Processes
* Code running outside the Kernel
* Very stable shim library access from User Space to Kernel Space (Think "fopen")
* The "init" Process
* User Space Process run after Kernel has booted
* Always PID 1
---
## OS Processes
* Created when an application is launched
* Each has a unique Process ID (PID)
* Provides it its own logical 'view' of all implicit inputs/output when launching app
* File System ( root directory, / )
* Memory
* Network Adaptors
* Other running processes
---
## What do we mean by "The OS"
Different Linux's
* Ubuntu / Debian; Centos / RHEL; Raspberry Pi; etc
What do they have in common?
* They all have a kernel that provides access to Userland (ie fopen)
* They typically have all the commands (bash, sh, ls, grep, ...)
What may be different?
* May use different versions of the Kernel (4.18, 5.4, ...)
* Internally different, but providing same Userland API
* Many other bundled commands, packages and package management tools
* Namely what makes it 'Debian' vs 'Centos'
---
## What might a 'Minimal' Linux be?
You could actually just have:
* A Linux Kernel
* An application (for simplicity a statically linked C program)
* The kernel configured to run that application as its 'init' process
Would you ever do this?
* Why not?
* It certainly would be very secure
---
## So Finally... What are Containers?
Containers just a Linux process that 'thinks' it is it's own machine
* With its own 'view' of things like:
* File System ( root directory, / ), Memory, Network Adaptors, Other running processes
* Leverages our understanding that a (logical) Linux Machine is
* A kernel
* A bunch of files ( Maybe a few Environment Variables )
Since it is a process running on a host machine
* It uses the kernel of the host machine
* And of course you need some tools to create the running container process
---
## Container Runtimes and Container Images
The Linux kernel actually has no concept of a container.
* There have been many 'container' technologies
* See [A Brief History of containers: From the 1970's till now](https://blog.aquasec.com/a-brief-history-of-containers-from-1970s-chroot-to-docker-2016)
* Over the years more capabilities have been added to the kernel to make it easier
<BR>
A 'Container technology' is:
* A Container Image Format of the unit of software deployment
* A bundle of all the files and miscellaneous configuration
* A Container Runtime Engine
* Software that takes a Container Image and creates a running container
---
## The Container Runtime War is now Over
The Cloud Native Computing Foundation (CNCF) has standardized containers
* A standard container image format
* A standard for building and configuring container runtimes
* A standard REST API for loading/downloading container image to a registries
There primary Container Runtimes are:
* containerd: using the 'docker' Command Line Interface (or Kubernetes)
* CRI-O: using the 'podman' Command Line Interface (or Kubernetes/OpenShift)
* Others exists, for example Singularity which has a history in HPC
---
## Linux Namespaces Makes Containers Possible
- Provide processes with their own isolated view of the system.
- Namespaces limit what you can see (and therefore, what you can use).
- These namespaces are available in modern kernels:
- pid: processes
- net: network
- mnt: root file system (ie chroot)
- uts: hostname
- ipc
- user: UID/GID mapping
- time: time
- cgroup: Resource Monitoring and Limiting
- Each process belongs to one namespace of each type.
---
## Namespaces are always active
- Namespaces exist even when you don't use containers.
- This is a bit similar to the UID field in UNIX processes:
- all processes have the UID field, even if no user exists on the system
- the field always has a value / the value is always defined
<br/>
(i.e. any process running on the system has some UID)
- the value of the UID field is used when checking permissions
<br/>
(the UID field determines which resources the process can access)
- You can replace "UID field" with "namespace" above and it still works!
- In other words: even when you don't use containers,
<br/>there is one namespace of each type, containing all the processes on the system.

View File

@@ -0,0 +1,224 @@
class: title
# Our training environment
![SSH terminal](images/title-our-training-environment.jpg)
---
class: in-person
## Connecting to your Virtual Machine
You need an SSH client.
* On OS X, Linux, and other UNIX systems, just use `ssh`:
```bash
$ ssh <login>@<ip-address>
```
* On Windows, if you don't have an SSH client, you can download:
* Putty (www.putty.org)
* Git BASH (https://git-for-windows.github.io/)
* MobaXterm (https://mobaxterm.mobatek.net/)
---
class: in-person
## Connecting to our lab environment
.lab[
- Log into your VM with your SSH client:
```bash
ssh `user`@`A.B.C.D`
```
(Replace `user` and `A.B.C.D` with the user and IP address provided to you)
]
You should see a prompt looking like this:
```
[A.B.C.D] (...) user@node1 ~
$
```
If anything goes wrong — ask for help!
---
## Our Docker VM
About the Lab VM
- The VM is created just before the training.
- It will stay up during the whole training.
- It will be destroyed shortly after the training.
- It comes pre-loaded with Docker and some other useful tools.
---
## Why don't we run Docker locally?
- I can log into your VMs to help you with labs
- Installing docker is out of the scope of this class (lots of online docs)
- It's better to spend time learning containers than fiddling with the installer!
---
class: in-person
## `tailhist`
- The shell history of the instructor is available online in real time
- Note the IP address of the instructor's virtual machine (A.B.C.D)
- Open http://A.B.C.D:1088 in your browser and you should see the history
- The history is updated in real time (using a WebSocket connection)
- It should be green when the WebSocket is connected
(if it turns red, reloading the page should fix it)
- If you want to play with it on your lab machine, tailhist is installed
- sudo apt install firewalld
- sudo firewall-cmd --add-port=1088/tcp
---
## Checking your Virtual Machine
Once logged in, make sure that you can run a basic Docker command:
.small[
```bash
$ docker version
Client:
Version: 18.03.0-ce
API version: 1.37
Go version: go1.9.4
Git commit: 0520e24
Built: Wed Mar 21 23:10:06 2018
OS/Arch: linux/amd64
Experimental: false
Orchestrator: swarm
Server:
Engine:
Version: 18.03.0-ce
API version: 1.37 (minimum version 1.12)
Go version: go1.9.4
Git commit: 0520e24
Built: Wed Mar 21 23:08:35 2018
OS/Arch: linux/amd64
Experimental: false
```
]
If this doesn't work, raise your hand so that an instructor can assist you!
???
:EN:Container concepts
:FR:Premier contact avec les conteneurs
:EN:- What's a container engine?
:FR:- Qu'est-ce qu'un *container engine* ?
---
## Doing or re-doing the workshop on your own?
- Use something like
[Play-With-Docker](http://play-with-docker.com/) or
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
Zero setup effort; but environment are short-lived and
might have limited resources
- Create your own cluster (local or cloud VMs)
Small setup effort; small cost; flexible environments
- Create a bunch of clusters for you and your friends
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
Bigger setup effort; ideal for group training
---
class: self-paced
## Get your own Docker nodes
- If you already have some Docker nodes: great!
- If not: let's get some thanks to Play-With-Docker
.lab[
- Go to http://www.play-with-docker.com/
- Log in
- Create your first node
<!-- ```open http://www.play-with-docker.com/``` -->
]
You will need a Docker ID to use Play-With-Docker.
(Creating a Docker ID is free.)
---
## Terminals
Once in a while, the instructions will say:
<br/>"Open a new terminal."
There are multiple ways to do this:
- create a new window or tab on your machine, and SSH into the VM;
- use screen or tmux on the VM and open a new window from there.
You are welcome to use the method that you feel the most comfortable with.
---
## Tmux cheat sheet
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
*You don't have to use it or even know about it to follow along.
<br/>
But some of us like to use it to switch between terminals.
<br/>
It has been preinstalled on your workshop nodes.*
- Ctrl-b c → creates a new window
- Ctrl-b n → go to next window
- Ctrl-b p → go to previous window
- Ctrl-b " → split window top/bottom
- Ctrl-b % → split window left/right
- Ctrl-b Alt-1 → rearrange windows in columns
- Ctrl-b Alt-2 → rearrange windows in rows
- Ctrl-b arrows → navigate to other windows
- Ctrl-b d → detach session
- tmux attach → re-attach to session

View File

@@ -0,0 +1,27 @@
```bash
$ docker run -it debian
root@ef22f9437171:/# apt-get update
root@ef22f9437171:/# apt-get install skopeo
root@ef22f9437171:/# apt-get wget curl jq
root@ef22f9437171:/# skopeo login docker.io -u containertraining -p testaccount
$ docker commit $(docker ps -lq) skop
```
```bash
root@0ab665194c4f:~# skopeo copy docker://docker.io/containertraining/test-image-0 dir:/root/test-image-0
root@0ab665194c4f:~# cd /root/test-image-0
root@0ab665194c4f:~# jq <manifest.json .layers[].digest
```
Stuff in Exploring-images
image-test-0/1/2 + jpg

View File

@@ -0,0 +1,20 @@
FROM busybox
ADD verifyImageFiles.sh /
WORKDIR /play
RUN echo "== LAYER 0 ==" && \
echo "A is for Aardvark" >A && \
echo "B is for Beetle" >B && \
mkdir C/ && \
echo "A is for Cowboy Allan" >C/CA && \
mkdir -p C/CB && \
echo "A is for Cowboy Buffalo Alex" >C/CB/CBA && \
echo "B is for Cowboy Buffalo Bill" >C/CB/CBB && \
echo "Z is for Cowboy Zeke" >> C/CZ && \
mkdir D/ && \
echo "A is for Detective Alisha" >D/DA && \
echo "B is for Detective Betty" >D/DB && \
echo "E is for Elephant" >E && \
find . >../state.layer-0

View File

@@ -0,0 +1,17 @@
FROM test-image-0
WORKDIR /play
RUN echo "== LAYER 1 == Change File B, Create File C/CC, Add Dir C/CD, Remove File E, Create Dir F, Add File G, Create Empty Dir H" && \
echo "B is for Butterfly" >B && \
echo "C is for Cowboy Chuck">C/CC && \
mkdir -p C/CD && \
echo "A is for Cowboy Dandy Austin" >C/CD/CDA && \
rm E && \
mkdir F && \
echo "A is for Ferret Albert" >F/FA && \
echo "G is for Gorilla" >G && \
mkdir H && \
find . >../state.layer-1

View File

@@ -0,0 +1,18 @@
FROM test-image-1
WORKDIR /play
RUN echo "== LAYER 2 == Remove File C/CA, Remove Dir G, Remove Dir D / Replace with new Dir D, Remove Dir C/CB, Remove Dir C/CB, Remove Dir F, Add File G, Remove Dir H / Create File H" && \
rm C/CA && \
rm -rf C/CB && \
echo "Z is for Cowboy Zoe" >> CZ && \
rm -rf D && \
mkdir -p D && \
echo "A is for Duplicitous Albatros" >D/DA && \
rm -rf F && \
rm G && \
echo "G is for Geccos" >G && \
rmdir H \
echo "H is for Human" >H && \
find . >../state.layer-2

View File

@@ -0,0 +1,87 @@
clear
baseDir=$(pwd)
rm -rf /tmp/exploringImags
mkdir -p /tmp/exploringImags
cd /tmp/exploringImags
echo "== LAYER 0 =="
echo "A is for Aardvark" >A
echo "B is for Beetle" >B
mkdir C/
echo "A is for Cowboy Allan" >C/CA
mkdir -p C/CB
echo "A is for Cowboy Buffalo Alex" >C/CB/CBA
echo "B is for Cowboy Buffalo Bill" >C/CB/CBB
echo "Z is for Cowboy Zeke" >C/CZ
mkdir D/
echo "A is for Detective Alisha" >D/DA
echo "B is for Detective Betty" >D/DB
echo "E is for Elephant" >E
find . >../state.layer-0
tree | grep -v directories | tee ../tree.layer-0
$baseDir/verifyImageFiles.sh 0 $(pwd)
echo "== LAYER 1 == Change File B, Create File C/CC, Add Dir C/CD, Remove File E, Create Dir F, Add File G, Create Empty Dir H"
echo "B is for Butterfly" >B
echo "C is for Cowboy Chuck">C/CC
mkdir -p C/CD
echo "A is for Cowboy Dandy Austin" >C/CD/CDA
rm E
mkdir F
echo "A is for Ferret Albert" >F/FA
echo "G is for Gorilla" >G
mkdir H
find . >../state.layer-1
tree | grep -v directories | tee ../tree.layer-1
$baseDir/verifyImageFiles.sh 1 $(pwd)
echo "== LAYER 2 == Remove File C/CA, Remove Dir G, Remove Dir D Replace with new Dir D, Remove Dir C/CB, Remove Dir C/CB, Add File H/HA, Add File, Create Dir I"
rm C/CA
rm -rf C/CB
echo "Z is for Cowboy Zoe" >C/CZ
rm -rf D
mkdir -p D
echo "A is for Duplicitous Albatros" >D/DA
rm -rf F
rm -rf G
echo "G is for Geccos" >G
rmdir H
echo "H is for Human" >H
find . >../state.layer-2
tree | grep -v directories | tee ../tree.layer-2
$baseDir/verifyImageFiles.sh 2 $(pwd)

View File

@@ -0,0 +1,88 @@
fileContentsCompare() {
layer=$1
text=$2
file=$(pwd)/$3
if [ -f "$file" ]; then
fileContents=$(cat $file)
if [ "$fileContents" != "$text" ]; then
echo In Layer $layer Unexpected contents in file: $file
echo -- Contents: $fileContents
echo -- Expected: $text
fi
else
echo Missing File $file in Layer $layer
fi
}
checkLayer() {
layer=$1
find . >/tmp/state
if [[ $(diff /tmp/state $targetDir/../state.layer-$layer) ]]; then
echo Directory Structure mismatch in layer: $layer
diff /tmp/state $targetDir/../state.layer-$layer
fi
case $layer in
0)
fileContentsCompare $layer "A is for Aardvark" A
fileContentsCompare $layer "B is for Beetle" B
fileContentsCompare $layer "A is for Cowboy Allan" C/CA
fileContentsCompare $layer "A is for Cowboy Buffalo Alex" C/CB/CBA
fileContentsCompare $layer "B is for Cowboy Buffalo Bill" C/CB/CBB
fileContentsCompare $layer "Z is for Cowboy Zeke" C/CZ
fileContentsCompare $layer "A is for Detective Alisha" D/DA
fileContentsCompare $layer "B is for Detective Betty" D/DB
fileContentsCompare $layer "E is for Elephant" E
;;
# echo "== LAYER 1 == Change File B, Create File C/CC, Add Dir C/CD, Remove File E, Create Dir F, Add File G, Create Empty Dir H"
1)
fileContentsCompare $layer "A is for Aardvark" A
fileContentsCompare $layer "B is for Butterfly" B ## CHANGED FILE B
fileContentsCompare $layer "A is for Cowboy Allan" C/CA
fileContentsCompare $layer "A is for Cowboy Buffalo Alex" C/CB/CBA
fileContentsCompare $layer "B is for Cowboy Buffalo Bill" C/CB/CBB
fileContentsCompare $layer "C is for Cowboy Chuck" C/CC ## ADDED FILE C/CC
fileContentsCompare $layer "A is for Cowboy Dandy Austin" C/CD/CDA ## ADDED DIR C/CD, ADDED FILE C/CD/CDA
fileContentsCompare $layer "Z is for Cowboy Zeke" C/CZ
fileContentsCompare $layer "A is for Detective Alisha" D/DA
fileContentsCompare $layer "B is for Detective Betty" D/DB
## REMOVED FILE E
fileContentsCompare $layer "A is for Ferret Albert" F/FA ## ADDED DIR F, ADDED FILE F/A
fileContentsCompare $layer "G is for Gorilla" G ## ADDED G
## CREATED EMPTY DIR H
;;
# echo "== LAYER 2 == Remove File C/CA, Remove Dir C/CB, Remove Dir C/CB, Remove Dir D Replace with new Dir D, Delete and Recreatee File G, Add File H/HA Create Dir I"
2)
fileContentsCompare $layer "A is for Aardvark" A
fileContentsCompare $layer "B is for Butterfly" B
## REMOVED FILE C/CA
## REMOVED DIR C/CB
fileContentsCompare $layer "C is for Cowboy Chuck" C/CC
fileContentsCompare $layer "A is for Cowboy Dandy Austin" C/CD/CDA
fileContentsCompare $layer "Z is for Cowboy Zoe" C/CZ ## CHANGED FILE C/CZ
## REMOVE DIR D
fileContentsCompare $layer "A is for Duplicitous Albatros" D/DA ## RECREATE DIR D, ADD FILE D/DA
fileContentsCompare $layer "G is for Geccos" G ## DELETED FILE G, ADDED FILE G (Implicit CHANGED)
fileContentsCompare $layer "H is for Human" H ## ADDED FILE H
;;
esac
}
layer=$1
targetDir=$2
echo VERIFYING LAYER $layer
checkLayer $layer

Binary file not shown.

After

Width:  |  Height:  |  Size: 219 KiB

View File

@@ -0,0 +1,120 @@
# Container Based Software Deployment
---
class: pic
![dummmy](containers/software-deployment/slide-1.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-2.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-3.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-4.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-5.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-6.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-7.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-8.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-9.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-10.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-11.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-12.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-13.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-14.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-15.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-16.jpg)
---
class: pic
![dummmy](containers/software-deployment/slide-17.jpg)

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 129 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 123 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 138 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 139 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 148 KiB

View File

@@ -4,6 +4,6 @@
(we will use the `rng` service in the dockercoins app)
- See what happens when the load increases
- See what happens when the load increses
(spoiler alert: it involves timeouts!)

View File

@@ -1,7 +0,0 @@
## Exercise — Network Policies
- Implement a system with 3 levels of security
(private pods, public pods, namespace pods)
- Apply it to the DockerCoins demo app

View File

@@ -1,63 +0,0 @@
# Exercise — Network Policies
We want to to implement a generic network security mechanism.
Instead of creating one policy per service, we want to
create a fixed number of policies, and use a single label
to indicate the security level of our pods.
Then, when adding a new service to the stack, instead
of writing a new network policy for that service, we
only need to add the right label to the pods of that service.
---
## Specifications
We will use the label `security` to classify our pods.
- If `security=private`:
*the pod shouldn't accept any traffic*
- If `security=public`:
*the pod should accept all traffic*
- If `security=namespace`:
*the pod should only accept connections coming from the same namespace*
If `security` isn't set, assume it's `private`.
---
## Test setup
- Deploy a copy of the DockerCoins app in a new namespace
- Modify the pod templates so that:
- `webui` has `security=public`
- `worker` has `security=private`
- `hasher`, `redis`, `rng` have `security=namespace`
---
## Implement and test policies
- Write the network policies
(feel free to draw inspiration from the ones we've seen so far)
- Check that:
- you can connect to the `webui` from outside the cluster
- the application works correctly (shows 3-4 hashes/second)
- you cannot connect to the `hasher`, `redis`, `rng` services
- you cannot connect or even ping the `worker` pods

View File

@@ -1,9 +0,0 @@
## Exercise — RBAC
- Create two namespaces for users `alice` and `bob`
- Give each user full access to their own namespace
- Give each user read-only access to the other's namespace
- Let `alice` view the nodes of the cluster as well

View File

@@ -1,97 +0,0 @@
# Exercise — RBAC
We want to:
- Create two namespaces for users `alice` and `bob`
- Give each user full access to their own namespace
- Give each user read-only access to the other's namespace
- Let `alice` view the nodes of the cluster as well
---
## Initial setup
- Create two namespaces named `alice` and `bob`
- Check that if we impersonate Alice, we can't access her namespace yet:
```bash
kubectl --as alice get pods --namespace alice
```
---
## Access for Alice
- Grant Alice full access to her own namespace
(you can use a pre-existing Cluster Role)
- Check that Alice can create stuff in her namespace:
```bash
kubectl --as alice create deployment hello --image nginx --namespace alice
```
- But that she can't create stuff in Bob's namespace:
```bash
kubectl --as alice create deployment hello --image nginx --namespace bob
```
---
## Access for Bob
- Similarly, grant Bob full access to his own namespace
- Check that Bob can create stuff in his namespace:
```bash
kubectl --as bob create deployment hello --image nginx --namespace bob
```
- But that he can't create stuff in Alice's namespace:
```bash
kubectl --as bob create deployment hello --image nginx --namespace alice
```
---
## Read-only access
- Now, give Alice read-only access to Bob's namespace
- Check that Alice can view Bob's stuff:
```bash
kubectl --as alice get pods --namespace bob
```
- But that she can't touch this:
```bash
kubectl --as alice delete pods --namespace bob --all
```
- Likewise, give Bob read-only access to Alice's namespace
---
## Nodes
- Give Alice read-only access to the cluster nodes
(this will require creating a custom Cluster Role)
- Check that Alice can view the nodes:
```bash
kubectl --as alice get nodes
```
- But that Bob cannot:
```bash
kubectl --as bob get nodes
```
- And that Alice can't update nodes:
```bash
kubectl --as alice label nodes --all hello=world
```

View File

@@ -1,17 +0,0 @@
# Interlude
- As mentioned earlier:
*the content of this course will be adapted to suit your needs!*
- Please take a look at the form that we just shared in Slack
(you don't need to fill it *right now*)
- If there are parts that you are curious about, ask us now!
- We'll ask you to fill the form after today's session
(before the end of the day, basically, so we can process the results by tomorrow)
- Thank you!

71
slides/intro-fullday.yml Normal file
View File

@@ -0,0 +1,71 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
#- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
#- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
#- containers/Start_And_Attach.md
- containers/Naming_And_Inspecting.md
#- containers/Labels.md
- containers/Getting_Inside.md
- containers/Initial_Images.md
-
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
-
- containers/Container_Networking_Basics.md
#- containers/Network_Drivers.md
- containers/Local_Development_Workflow.md
- containers/Container_Network_Model.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
-
- containers/Multi_Stage_Builds.md
#- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
#- containers/Docker_Machine.md
#- containers/Advanced_Dockerfiles.md
#- containers/Buildkit.md
#- containers/Init_Systems.md
#- containers/Application_Configuration.md
#- containers/Logging.md
#- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
#- containers/Container_Engines.md
#- containers/Pods_Anatomy.md
#- containers/Ecosystem.md
#- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md

View File

@@ -0,0 +1,72 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- in-person
content:
- shared/title.md
# - shared/logistics.md
- containers/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Windows_Containers.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Buildkit.md
- containers/Init_Systems.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Pods_Anatomy.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md

80
slides/intro-twodays.yml Normal file
View File

@@ -0,0 +1,80 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- # DAY 1
- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Initial_Images.md
-
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
-
- containers/Dockerfile_Tips.md
- containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Exercise_Dockerfile_Advanced.md
-
- containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Start_And_Attach.md
- containers/Getting_Inside.md
- containers/Resource_Limits.md
- # DAY 2
- containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
-
- containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
-
- containers/Installing_Docker.md
- containers/Container_Engines.md
- containers/Init_Systems.md
- containers/Advanced_Dockerfiles.md
- containers/Buildkit.md
-
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Orchestration_Overview.md
-
- shared/thankyou.md
- containers/links.md
#-
#- containers/Docker_Machine.md
#- containers/Ambassadors.md
#- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
#- containers/Pods_Anatomy.md
#- containers/Ecosystem.md

View File

@@ -0,0 +1,46 @@
# External References && kubectl Aliases
Class Slides: https://2022-09-nr1.container.training/
Kubectl Cheat Sheet: https://kubernetes.io/docs/reference/kubectl/cheatsheet/
Kubernetes API Object and kubectl Explorers
- https://github.com/GerrySeidman/Kubernetes-Explorer
Gerry Kubernetes Storage Converence Talks
- Vault '20: https://www.usenix.org/conference/vault20/presentation/seidman
- Data and Dev '21: https://www.youtube.com/watch?v=k_8rWPwJ_38
Gerry Seidmans Info
- gerry.seidman@ardanlabs.com
- https://www.linkedin.com/in/gerryseidman/
---
## Kubectl Aliases
```bash
alias k='kubectl'
alias kg='kubectl get'
alias kl='kubectl logs'
alias ka='kubectl apply -f'
alias kd='kubectl delete'
alias kdf='kubectl delete -f'
alias kb='kubectl describe'
alias kex='kubectl explain'
alias kx='kubectl expose'
alias kr='kubectl run'
alias ke='kubectl edit'
```
Note the below is only because of a quirk in how the lab VMs were installed:
```bash
echo 'kubectl exec -it $1 -- /bin/sh' >kx
chmod +x kx
sudo mv kx /usr/local/bin/kx
```

View File

@@ -203,12 +203,12 @@ What does that mean?
## Let's experiment a bit!
- The examples in this section require a Kubernetes cluster
(any local development cluster will suffice)
- For this section, connect to the first node of the `test` cluster
.lab[
- SSH to the first node of the test cluster
- Check that the cluster is operational:
```bash
kubectl get nodes

View File

@@ -246,7 +246,7 @@ class: extra-details
(they don't require hand-editing a file and restarting the API server)
- A service account can be associated with a set of secrets
- A service account is associated with a set of secrets
(the kind that you can view with `kubectl get secrets`)
@@ -256,28 +256,6 @@ class: extra-details
---
## Service account tokens evolution
- In Kubernetes 1.21 and above, pods use *bound service account tokens*:
- these tokens are *bound* to a specific object (e.g. a Pod)
- they are automatically invalidated when the object is deleted
- these tokens also expire quickly (e.g. 1 hour) and gets rotated automatically
- In Kubernetes 1.24 and above, unbound tokens aren't created automatically
- before 1.24, we would see unbound tokens with `kubectl get secrets`
- with 1.24 and above, these tokens can be created with `kubectl create token`
- ...or with a Secret with the right [type and annotation][create-token]
[create-token]: https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#to-create-additional-api-tokens
---
class: extra-details
## Checking our authentication method
@@ -412,10 +390,6 @@ class: extra-details
It should be named `default-token-XXXXX`.
When running Kubernetes 1.24 and above, this Secret won't exist.
<br/>
Instead, create a token with `kubectl create token default`.
---
class: extra-details

View File

@@ -0,0 +1,370 @@
# Kubernetes Architecture
- The Kubernetes Architecture is minimal
- Kubernetes runs in Kubernetes (for the most part)
- Orchestration is done by a collection of Software Operators
- You can even write your own operators
---
class: pic
![haha only kidding](images/k8s-arch1.png)
---
## Kubernetes architecture
- Ha ha ha ha ha
- OK, I was trying to scare you, it's much simpler than that ❤️
---
class: pic
![that one is more like the real thing](images/k8s-arch2.png)
---
class: pic
![haha only kidding](images/k8s-arch1.png)
---
## Kubernetes Architecture
- Ha ha ha ha
- OK, I was trying to scare you, it's much simpler than that ❤️
---
class: pic
![that one is more like the real thing](images/k8s-arch2.png)
---
## Credits
- The first schema is a Kubernetes cluster with storage backed by multi-path iSCSI
(Courtesy of [Yongbok Kim](https://www.yongbok.net/blog/))
- The second one is a simplified representation of a Kubernetes cluster
(Courtesy of [Imesh Gunaratne](https://medium.com/containermind/a-reference-architecture-for-deploying-wso2-middleware-on-kubernetes-d4dee7601e8e))
---
## Kubernetes architecture: the nodes
- The nodes executing our containers run a collection of services:
- a container Engine (typically Docker)
- kubelet (the "node agent")
- kube-proxy (a necessary but not sufficient network component)
- Nodes were formerly called "minions"
(You might see that word in older articles or documentation)
---
## Kubernetes architecture: the control plane
- The Kubernetes logic (its "brains") is a collection of services:
- the API server (our point of entry to everything!)
- core services like the scheduler and controller manager
- `etcd` (a highly available key/value store; the "database" of Kubernetes)
- Together, these services form the control plane of our cluster
- The control plane is also called the "master"
---
class: pic
![One of the best Kubernetes architecture diagrams available](images/k8s-arch4-thanks-luxas.png)
---
class: extra-details
## Running the control plane on special nodes
- It is common to reserve a dedicated node for the control plane
(Except for single-node development clusters, like when using minikube)
- This node is then called a "master"
(Yes, this is ambiguous: is the "master" a node, or the whole control plane?)
- Normal applications are restricted from running on this node
(By using a mechanism called ["taints"](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/))
- When high availability is required, each service of the control plane must be resilient
- The control plane is then replicated on multiple nodes
(This is sometimes called a "multi-master" setup)
---
class: extra-details
## Running the control plane outside containers
- The services of the control plane can run in or out of containers
- For instance: since `etcd` is a critical service, some people
deploy it directly on a dedicated cluster (without containers)
(This is illustrated on the first "super complicated" schema)
- In some hosted Kubernetes offerings (e.g. AKS, GKE, EKS), the control plane is invisible
(We only "see" a Kubernetes API endpoint)
- In that case, there is no "master node"
*For this reason, it is more accurate to say "control plane" rather than "master."*
---
class: pic
![](images/control-planes/single-node-dev.svg)
---
class: pic
![](images/control-planes/managed-kubernetes.svg)
---
class: pic
![](images/control-planes/single-control-and-workers.svg)
---
class: pic
![](images/control-planes/stacked-control-plane.svg)
---
class: pic
![](images/control-planes/non-dedicated-stacked-nodes.svg)
---
class: pic
![](images/control-planes/advanced-control-plane.svg)
---
class: pic
![](images/control-planes/advanced-control-plane-split-events.svg)
---
class: extra-details
## How many nodes should a cluster have?
- There is no particular constraint
(no need to have an odd number of nodes for quorum)
- A cluster can have zero node
(but then it won't be able to start any pods)
- For testing and development, having a single node is fine
- For production, make sure that you have extra capacity
(so that your workload still fits if you lose a node or a group of nodes)
- Kubernetes is tested with [up to 5000 nodes](https://kubernetes.io/docs/setup/best-practices/cluster-large/)
(however, running a cluster of that size requires a lot of tuning)
---
class: extra-details
## Do we need to run Docker at all?
No!
--
- By default, Kubernetes uses the Docker Engine to run containers
- We can leverage other pluggable runtimes through the *Container Runtime Interface*
- <del>We could also use `rkt` ("Rocket") from CoreOS</del> (deprecated)
---
class: extra-details
## Some runtimes available through CRI
- [containerd](https://github.com/containerd/containerd/blob/master/README.md)
- maintained by Docker, IBM, and community
- used by Docker Engine, microk8s, k3s, GKE; also standalone
- comes with its own CLI, `ctr`
- [CRI-O](https://github.com/cri-o/cri-o/blob/master/README.md):
- maintained by Red Hat, SUSE, and community
- used by OpenShift and Kubic
- designed specifically as a minimal runtime for Kubernetes
- [And more](https://kubernetes.io/docs/setup/production-environment/container-runtimes/)
---
class: extra-details
## Do we need to run Docker at all?
Yes!
--
- In this workshop, we run our app on a single node first
- We will need to build images and ship them around
- We can do these things without Docker
<br/>
(and get diagnosed with NIH¹ syndrome)
- Docker is still the most stable container engine today
<br/>
(but other options are maturing very quickly)
.footnote[¹[Not Invented Here](https://en.wikipedia.org/wiki/Not_invented_here)]
---
class: extra-details
## Do we need to run Docker at all?
- On our development environments, CI pipelines ... :
*Yes, almost certainly*
- On our production servers:
*Yes (today)*
*Probably not (in the future)*
.footnote[More information about CRI [on the Kubernetes blog](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes)]
---
## Interacting with Kubernetes
- We will interact with our Kubernetes cluster through the Kubernetes API
- The Kubernetes API is (mostly) RESTful
- It allows us to create, read, update, delete *resources*
- A few common resource types are:
- node (a machine — physical or virtual — in our cluster)
- pod (group of containers running together on a node)
- service (stable network endpoint to connect to one or multiple containers)
---
class: pic
![Node, pod, container](images/k8s-arch3-thanks-weave.png)
---
## Scaling
- How would we scale the pod shown on the previous slide?
- **Do** create additional pods
- each pod can be on a different node
- each pod will have its own IP address
- **Do not** add more NGINX containers in the pod
- all the NGINX containers would be on the same node
- they would all have the same IP address
<br/>(resulting in `Address alreading in use` errors)
---
## Together or separate
- Should we put e.g. a web application server and a cache together?
<br/>
("cache" being something like e.g. Memcached or Redis)
- Putting them **in the same pod** means:
- they have to be scaled together
- they can communicate very efficiently over `localhost`
- Putting them **in different pods** means:
- they can be scaled separately
- they must communicate over remote IP addresses
<br/>(incurring more latency, lower performance)
- Both scenarios can make sense, depending on our goals
---
## Credits
- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
- it's one of the best Kubernetes architecture diagrams available!
- The second diagram is courtesy of Weave Works
- a *pod* can have multiple containers working together
- IP addresses are associated with *pods*, not with individual containers
Both diagrams used with permission.
???
:EN:- Kubernetes concepts
:FR:- Kubernetes en théorie

View File

@@ -0,0 +1,101 @@
# Kubernetes concepts
- Kubernetes is a container management system
- It runs and manages containerized applications on a cluster
--
- What does that really mean?
---
## What can we do with Kubernetes?
- Let's imagine that we have a 3-tier e-commerce app:
- web frontend
- API backend
- database (that we will keep out of Kubernetes for now)
- We have built images for our frontend and backend components
(e.g. with Dockerfiles and `docker build`)
- We are running them successfully with a local environment
(e.g. with Docker Compose)
- Let's see how we would deploy our app on Kubernetes!
---
## Basic things we can ask Kubernetes to do
--
- Start 5 containers using image `atseashop/api:v1.3`
--
- Place an internal load balancer in front of these containers
--
- Start 10 containers using image `atseashop/webfront:v1.3`
--
- Place a public load balancer in front of these containers
--
- It's Black Friday (or Christmas), traffic spikes, grow our cluster and add containers
--
- New release! Replace my containers with the new image `atseashop/webfront:v1.4`
--
- Keep processing requests during the upgrade; update my containers one at a time
---
## Other things that Kubernetes can do for us
- Autoscaling
(straightforward on CPU; more complex on other metrics)
- Resource management and scheduling
(reserve CPU/RAM for containers; placement constraints)
- Advanced rollout patterns
(blue/green deployment, canary deployment)
---
## More things that Kubernetes can do for us
- Batch jobs
(one-off; parallel; also cron-style periodic execution)
- Fine-grained access control
(defining *what* can be done by *whom* on *which* resources)
- Stateful services
(databases, message queues, etc.)
- Automating complex tasks with *operators*
(e.g. database replication, failover, etc.)

View File

@@ -1,62 +1,46 @@
# Healthchecks
- Containers can have *healthchecks* (also called "probes")
- Containers can have *healthchecks*
- There are three kinds of healthchecks, corresponding to different use-cases:
- There are three kinds of healthchecks, corresponding to very different use-cases:
`startupProbe`, `readinessProbe`, `livenessProbe`
- liveness = detect when a container is "dead" and needs to be restarted
- readiness = detect when a container is ready to serve traffic
- startup = detect if a container has finished to boot
- These healthchecks are optional (we can use none, all, or some of them)
- Different probes are available:
- Different probes are available (HTTP request, TCP connection, program execution)
HTTP GET, TCP connection, arbitrary program execution, GRPC
- All these probes have a binary result (success/failure)
- Probes that aren't defined will default to a "success" result
- Let's see the difference and how to use them!
---
## Use-cases in brief
*My container takes a long time to boot before being able to serve traffic.*
→ use a `startupProbe` (but often a `readinessProbe` can also do the job)
*Sometimes, my container is unavailable or overloaded, and needs to e.g. be taken temporarily out of load balancer rotation.*
→ use a `readinessProbe`
*Sometimes, my container enters a broken state which can only be fixed by a restart.*
→ use a `livenessProbe`
---
## Liveness probes
## Liveness probe
*This container is dead, we don't know how to fix it, other than restarting it.*
- Check if the container is dead or alive
- Indicates if the container is dead or alive
- If Kubernetes determines that the container is dead:
- A dead container cannot come back to life
- it terminates the container gracefully
- If the liveness probe fails, the container is killed (destroyed)
- it restarts the container (unless the Pod's `restartPolicy` is `Never`)
(to make really sure that it's really dead; no zombies or undeads!)
- With the default parameters, it takes:
- What happens next depends on the pod's `restartPolicy`:
- up to 30 seconds to determine that the container is dead
- `Never`: the container is not restarted
- up to 30 seconds to terminate it
- `OnFailure` or `Always`: the container is restarted
---
## When to use a liveness probe
- To detect failures that can't be recovered
- To indicate failures that can't be recovered
- deadlocks (causing all requests to time out)
@@ -64,45 +48,47 @@
- Anything where our incident response would be "just restart/reboot it"
---
## Liveness probes gotchas
.warning[**Do not** use liveness probes for problems that can't be fixed by a restart]
- Otherwise we just restart our pods for no reason, creating useless load
.warning[**Do not** depend on other services within a liveness probe]
---
- Otherwise we can experience cascading failures
## Readiness probe (1)
(example: web server liveness probe that makes a requests to a database)
*Make sure that a container is ready before continuing a rolling update.*
.warning[**Make sure** that liveness probes respond quickly]
- Indicates if the container is ready to handle traffic
- The default probe timeout is 1 second (this can be tuned!)
- When doing a rolling update, the Deployment controller waits for Pods to be ready
- If the probe takes longer than that, it will eventually cause a restart
(a Pod is ready when all the containers in the Pod are ready)
- Improves reliability and safety of rolling updates:
- don't roll out a broken version (that doesn't pass readiness checks)
- don't lose processing capacity during a rolling update
---
## Readiness probes
## Readiness probe (2)
*Sometimes, my container "needs a break".*
*Temporarily remove a container (overloaded or otherwise) from a Service load balancer.*
- Check if the container is ready or not
- A container can mark itself "not ready" temporarily
- If the container is not ready, its Pod is not ready
(e.g. if it's overloaded or needs to reload/restart/garbage collect...)
- If the Pod belongs to a Service, it is removed from its Endpoints
- If a container becomes "unready" it might be ready again soon
(it stops receiving new connections but existing ones are not affected)
- If the readiness probe fails:
- If there is a rolling update in progress, it might pause
- the container is *not* killed
(Kubernetes will try to respect the MaxUnavailable parameter)
- if the pod is a member of a service, it is temporarily removed
- As soon as the readiness probe suceeds again, everything goes back to normal
- it is re-added as soon as the readiness probe passes again
---
@@ -116,31 +102,67 @@
- To indicate temporary failure or unavailability
- runtime is busy doing garbage collection or (re)loading data
- application can only service *N* parallel connections
- new connections will be directed to other Pods
- runtime is busy doing garbage collection or initial data load
- To redirect new connections to other Pods
(e.g. fail the readiness probe when the Pod's load is too high)
---
## Startup probes
## Dependencies
*My container takes a long time to boot before being able to serve traffic.*
- If a web server depends on a database to function, and the database is down:
- After creating a container, Kubernetes runs its startup probe
- the web server's liveness probe should succeed
- The container will be considered "unhealthy" until the probe succeeds
- the web server's readiness probe should fail
- As long as the container is "unhealthy", its Pod...:
- Same thing for any hard dependency (without which the container can't work)
- is not added to Services' endpoints
.warning[**Do not** fail liveness probes for problems that are external to the container]
- is not considered as "available" for rolling update purposes
---
- Readiness and liveness probes are enabled *after* startup probe reports success
## Timing and thresholds
(if there is no startup probe, readiness and liveness probes are enabled right away)
- Probes are executed at intervals of `periodSeconds` (default: 10)
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
.warning[If a probe takes longer than that, it is considered as a FAIL]
- A probe is considered successful after `successThreshold` successes (default: 1)
- A probe is considered failing after `failureThreshold` failures (default: 3)
- A probe can have an `initialDelaySeconds` parameter (default: 0)
- Kubernetes will wait that amount of time before running the probe for the first time
(this is important to avoid killing services that take a long time to start)
---
## Startup probe
*The container takes too long to start, and is killed by the liveness probe!*
- By default, probes (including liveness) start immediately
- With the default probe interval and failure threshold:
*a container must respond in less than 30 seconds, or it will be killed!*
- There are two ways to avoid that:
- set `initialDelaySeconds` (a fixed, rigid delay)
- use a `startupProbe`
- Kubernetes will run only the startup probe, and when it succeeds, run the other probes
---
@@ -156,296 +178,121 @@
---
## Startup probes gotchas
- When defining a `startupProbe`, we almost always want to adjust its parameters
(specifically, its `failureThreshold` - this is explained in next slide)
- Otherwise, if the container fails to start within 30 seconds...
*Kubernetes terminates the container and restarts it!*
- Sometimes, it's easier/simpler to use a `readinessProbe` instead
(except when also using a `livenessProbe`)
---
## Timing and thresholds
- Probes are executed at intervals of `periodSeconds` (default: 10)
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
.warning[If a probe takes longer than that, it is considered as a FAIL]
.warning[For liveness probes **and startup probes** this terminates and restarts the container]
- A probe is considered successful after `successThreshold` successes (default: 1)
- A probe is considered failing after `failureThreshold` failures (default: 3)
- All these parameters can be set independently for each probe
---
class: extra-details
## `initialDelaySeconds`
- A probe can have an `initialDelaySeconds` parameter (default: 0)
- Kubernetes will wait that amount of time before running the probe for the first time
- It is generally better to use a `startupProbe` instead
(but this parameter did exist before startup probes were implemented)
---
class: extra-details
## `readinessProbe` vs `startupProbe`
- A lot of blog posts / documentations / tutorials recommend readiness probes...
- ...even in scenarios where a startup probe would seem more appropriate!
- This is because startup probes are relatively recent
(they reached GA status in Kubernetes 1.20)
- When there is no `livenessProbe`, using a `readinessProbe` is simpler:
- a `startupProbe` generally requires to change the `failureThreshold`
- a `startupProbe` generally also requires a `readinessProbe`
- a single `readinessProbe` can fulfill both roles
---
## Different types of probes
- Kubernetes supports the following mechanisms:
- HTTP request
- `exec` (arbitrary program execution)
- specify URL of the request (and optional headers)
- `httpGet` (HTTP GET request)
- any status code between 200 and 399 indicates success
- `tcpSocket` (check if a TCP port is accepting connections)
- TCP connection
- `grpc` (standard [GRPC Health Checking Protocol][grpc])
- the probe succeeds if the TCP port is open
- All probes give binary results ("it works" or "it doesn't")
- arbitrary exec
- Let's see the specific details for each of them!
- a command is executed in the container
[grpc]: https://grpc.github.io/grpc/core/md_doc_health-checking.html
- exit status of zero indicates success
---
## `exec`
## Benefits of using probes
- Runs an arbitrary program *inside* the container
- Rolling updates proceed when containers are *actually ready*
(like with `kubectl exec` or `docker exec`)
(as opposed to merely started)
- The program must be available in the container image
- Containers in a broken state get killed and restarted
- Kubernetes uses the exit status of the program
(instead of serving errors or timeouts)
(standard UNIX convention: 0 = success, anything else = failure)
- Unavailable backends get removed from load balancer rotation
(thus improving response times across the board)
- If a probe is not defined, it's as if there was an "always successful" probe
---
## `exec` example
## Example: HTTP probe
When the worker is ready, it should create `/tmp/ready`.
<br/>
The following probe will give it 5 minutes to do so.
Here is a pod template for the `rng` web service of the DockerCoins app:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: queueworker
name: healthy-app
spec:
containers:
- name: worker
image: myregistry.../worker:v1.0
startupProbe:
exec:
command:
- test
- -f
- /tmp/ready
failureThreshold: 30
```
---
## Using shell constructs
- If we want to use pipes, conditionals, etc. we should invoke a shell
- Example:
```yaml
exec:
command:
- sh
- -c
- "curl http://localhost:5000/status | jq .ready | grep true"
```
---
## `httpGet`
- Make an HTTP GET request to the container
- The request will be made by Kubelet
(doesn't require extra binaries in the container image)
- `port` must be specified
- `path` and extra `httpHeaders` can be specified optionally
- Kubernetes uses HTTP status code of the response:
- 200-399 = success
- anything else = failure
---
## `httpGet` example
The following liveness probe restarts the container if it stops responding on `/healthz`:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: frontend
spec:
containers:
- name: frontend
image: myregistry.../frontend:v1.0
- name: myapp
image: myregistry.io/myapp:v1.0
livenessProbe:
httpGet:
path: /health
port: 80
path: /healthz
periodSeconds: 5
```
---
## `tcpSocket`
- Kubernetes checks if the indicated TCP port accepts connections
- There is no additional check
.warning[It's quite possible for a process to be broken, but still accept TCP connections!]
If the backend serves an error, or takes longer than 1s, 3 times in a row, it gets killed.
---
## `grpc`
## Example: exec probe
<!-- ##VERSION## -->
Here is a pod template for a Redis server:
- Available in beta since Kubernetes 1.24
```yaml
apiVersion: v1
kind: Pod
metadata:
name: redis-with-liveness
spec:
containers:
- name: redis
image: redis
livenessProbe:
exec:
command: ["redis-cli", "ping"]
```
- Leverages standard [GRPC Health Checking Protocol][grpc]
[grpc]: https://grpc.github.io/grpc/core/md_doc_health-checking.html
If the Redis process becomes unresponsive, it will be killed.
---
## Best practices for healthchecks
## Questions to ask before adding healthchecks
- Readiness probes are almost always beneficial
- Do we want liveness, readiness, both?
- don't hesitate to add them early!
(sometimes, we can use the same check, but with different failure thresholds)
- we can even make them *mandatory*
- Do we have existing HTTP endpoints that we can use?
- Be more careful with liveness and startup probes
- Do we need to add new endpoints, or perhaps use something else?
- they aren't always necessary
- Are our healthchecks likely to use resources and/or slow down the app?
- they can even cause harm
- Do they depend on additional services?
(this can be particularly tricky, see next slide)
---
## Readiness probes
## Healthchecks and dependencies
- Almost always beneficial
- Liveness checks should not be influenced by the state of external services
- Exceptions:
- All checks should reply quickly (by default, less than 1 second)
- web service that doesn't have a dedicated "health" or "ping" route
- Otherwise, they are considered to fail
- ...and all requests are "expensive" (e.g. lots of external calls)
- This might require to check the health of dependencies asynchronously
---
## Liveness probes
- If we're not careful, we end up restarting containers for no reason
(which can cause additional load on the cluster, cascading failures, data loss, etc.)
- Suggestion:
- don't add liveness probes immediately
- wait until you have a bit of production experience with that code
- then add narrow-scoped healthchecks to detect specific failure modes
- Readiness and liveness probes should be different
(different check *or* different timeouts *or* different thresholds)
---
## Startup probes
- Only beneficial for containers that need a long time to start
(more than 30 seconds)
- If there is no liveness probe, it's simpler to just use a readiness probe
(since we probably want to have a readiness probe anyway)
- In other words, startup probes are useful in one situation:
*we have a liveness probe, AND the container needs a lot of time to start*
- Don't forget to change the `failureThreshold`
(otherwise the container will fail to start and be killed)
---
## Recap of the gotchas
- The default timeout is 1 second
- if a probe takes longer than 1 second to reply, Kubernetes considers that it fails
- this can be changed by setting the `timeoutSeconds` parameter
<br/>(or refactoring the probe)
- Liveness probes should not be influenced by the state of external services
- Liveness probes and readiness probes should have different paramters
- For startup probes, remember to increase the `failureThreshold`
(e.g. if a database or API might be healthy but still take more than
1 second to reply, we should check the status asynchronously and report
a cached status)
---
@@ -453,21 +300,21 @@ spec:
(In that context, worker = process that doesn't accept connections)
- A relatively easy solution is to use files
- Readiness is useful mostly for rolling updates
- For a startup or readiness probe:
(because workers aren't backends for a service)
- worker creates `/tmp/ready` when it's ready
- probe checks the existence of `/tmp/ready`
- Liveness may help us restart a broken worker, but how can we check it?
- For a liveness probe:
- Embedding an HTTP server is a (potentially expensive) option
- worker touches `/tmp/alive` regularly
<br/>(e.g. just before starting to work on a job)
- probe checks that the timestamp on `/tmp/alive` is recent
- if the timestamp is old, it means that the worker is stuck
- Using a "lease" file can be relatively easy:
- Sometimes it can also make sense to embed a web server in the worker
- touch a file during each iteration of the main loop
- check the timestamp of that file from an exec probe
- Writing logs (and checking them from the probe) also works
???

View File

@@ -316,6 +316,7 @@ class: extra-details
## How to find charts, the new way
- Go to the [Artifact Hub](https://artifacthub.io/packages/search?kind=0) (https://artifacthub.io)
https://artifacthub.io/packages/helm/securecodebox/juice-shop
- Or use `helm search hub ...` from the CLI
@@ -343,7 +344,8 @@ class: extra-details
]
Then go to → https://artifacthub.io/packages/helm/seccurecodebox/juice-shop
Then go to → https://artifacthub.io/packages/helm/securecodebox/juice-shop
---

View File

@@ -1,148 +0,0 @@
## Ingress and canary releases
- Let's see how to implement *canary releases*
- The example here will use Traefik v1
(which is obsolete)
- It won't work on your Kubernetes cluster!
(unless you're running an oooooold version of Kubernetes)
(and an equally oooooooold version of Traefik)
- We've left it here just as an example!
---
## Canary releases
- A *canary release* (or canary launch or canary deployment) is a release that will process only a small fraction of the workload
- After deploying the canary, we compare its metrics to the normal release
- If the metrics look good, the canary will progressively receive more traffic
(until it gets 100% and becomes the new normal release)
- If the metrics aren't good, the canary is automatically removed
- When we deploy a bad release, only a tiny fraction of traffic is affected
---
## Various ways to implement canary
- Example 1: canary for a microservice
- 1% of all requests (sampled randomly) are sent to the canary
- the remaining 99% are sent to the normal release
- Example 2: canary for a web app
- 1% of users are sent to the canary web site
- the remaining 99% are sent to the normal release
- Example 3: canary for shipping physical goods
- 1% of orders are shipped with the canary process
- the remaining 99% are shipped with the normal process
- We're going to implement example 1 (per-request routing)
---
## Canary releases with Traefik v1
- We need to deploy the canary and expose it with a separate service
- Then, in the Ingress resource, we need:
- multiple `paths` entries (one for each service, canary and normal)
- an extra annotation indicating the weight of each service
- If we want, we can send requests to more than 2 services
---
## The Ingress resource
.small[
```yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: rgb
annotations:
traefik.ingress.kubernetes.io/service-weights: |
red: 50%
green: 25%
blue: 25%
spec:
rules:
- host: rgb.`A.B.C.D`.nip.io
http:
paths:
- path: /
backend:
serviceName: red
servicePort: 80
- path: /
backend:
serviceName: green
servicePort: 80
- path: /
backend:
serviceName: blue
servicePort: 80
```
]
---
class: extra-details
## Other ingress controllers
*Just to illustrate how different things are ...*
- With the NGINX ingress controller:
- define two ingress ressources
<br/>
(specifying rules with the same host+path)
- add `nginx.ingress.kubernetes.io/canary` annotations on each
- With Linkerd2:
- define two services
- define an extra service for the weighted aggregate of the two
- define a TrafficSplit (this is a CRD introduced by the SMI spec)
---
class: extra-details
## We need more than that
What we saw is just one of the multiple building blocks that we need to achieve a canary release.
We also need:
- metrics (latency, performance ...) for our releases
- automation to alter canary weights
(increase canary weight if metrics look good; decrease otherwise)
- a mechanism to manage the lifecycle of the canary releases
(create them, promote them, delete them ...)
For inspiration, check [flagger by Weave](https://github.com/weaveworks/flagger).

View File

@@ -1,36 +1,34 @@
# Exposing HTTP services with Ingress resources
- Service = layer 4 (TCP, UDP, SCTP)
- HTTP services are typically exposed on port 80
- works with every TCP/UDP/SCTP protocol
(and 443 for HTTPS)
- doesn't "see" or interpret HTTP
- `NodePort` services are great, but they are *not* on port 80
- Ingress = layer 7 (HTTP)
(by default, they use port range 30000-32767)
- only for HTTP
- can route requests depending on URI or host header
- can handle TLS
- How can we get *many* HTTP services on port 80? 🤔
---
## Why should we use Ingress resources?
## Various ways to expose something on port 80
A few use-cases:
- Service with `type: LoadBalancer`
- URI routing (e.g. for single page apps)
*costs a little bit of money; not always available*
`/api` → service `api:5000`
- Service with one (or multiple) `ExternalIP`
everything else → service `static:80`
*requires public nodes; limited by number of nodes*
- Cost optimization
- Service with `hostPort` or `hostNetwork`
(because individual `LoadBalancer` services typically cost money)
*same limitations as `ExternalIP`; even harder to manage*
- Automatic handling of TLS certificates
- Ingress resources
*addresses all these limitations, yay!*
---
@@ -183,70 +181,20 @@ class: extra-details
---
## Accepting connections on port 80 (and 443)
- Web site users don't want to specify port numbers
(e.g. "connect to https://blahblah.whatever:31550")
- Our ingress controller needs to actually be exposed on port 80
(and 443 if we want to handle HTTPS)
- Let's see how we can achieve that!
---
## Various ways to expose something on port 80
- Service with `type: LoadBalancer`
*costs a little bit of money; not always available*
- Service with one (or multiple) `ExternalIP`
*requires public nodes; limited by number of nodes*
- Service with `hostPort` or `hostNetwork`
*same limitations as `ExternalIP`; even harder to manage*
---
## Deploying pods listening on port 80
- We are going to run Traefik in Pods with `hostNetwork: true`
- We want our ingress load balancer to be available on port 80
(so that our load balancer can use the "real" port 80 of our nodes)
- The best way to do that would be with a `LoadBalancer` service
- Traefik Pods will be created by a DaemonSet
... but it requires support from the underlying infrastructure
(so that we get one instance of Traefik on every node of the cluster)
- Instead, we are going to use the `hostNetwork` mode on the Traefik pods
- This means that we will be able to connect to any node of the cluster on port 80
.warning[This is not typical of a production setup!]
- Let's see what this `hostNetwork` mode is about ...
---
## Doing it in production
- When running "on cloud", the easiest option is a `LoadBalancer` service
- When running "on prem", it depends:
- [MetalLB] is a good option if a pool of public IP addresses is available
- otherwise, using `externalIPs` on a few nodes (2-3 for redundancy)
- Many variations/optimizations are possible depending on our exact scenario!
[MetalLB]: https://metallb.org/
---
class: extra-details
## Without `hostNetwork`
- Normally, each pod gets its own *network namespace*
@@ -263,8 +211,6 @@ class: extra-details
---
class: extra-details
## With `hostNetwork: true`
- No network namespace gets created
@@ -283,6 +229,26 @@ class: extra-details
---
class: extra-details
## Other techniques to expose port 80
- We could use pods specifying `hostPort: 80`
... but with most CNI plugins, this [doesn't work or requires additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
- We could use a `NodePort` service
... but that requires [changing the `--service-node-port-range` flag in the API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/)
- We could create a service with an external IP
... this would work, but would require a few extra steps
(figuring out the IP address and adding it to the service)
---
## Running Traefik
- The [Traefik documentation][traefikdoc] recommends to use a Helm chart
@@ -304,8 +270,6 @@ class: extra-details
---
class: extra-details
## Taints and tolerations
- A *taint* is an attribute added to a node
@@ -532,6 +496,10 @@ This is normal: we haven't provided any ingress rule yet.
## Creating ingress resources
- Before Kubernetes 1.19, we must use YAML manifests
(see example on next slide)
- Since Kubernetes 1.19, we can use `kubectl create ingress`
```bash
@@ -566,21 +534,7 @@ This is normal: we haven't provided any ingress rule yet.
---
## Before Kubernetes 1.19
- Before Kubernetes 1.19:
- `kubectl create ingress` wasn't available
- `apiVersion: networking.k8s.io/v1` wasn't supported
- It was necessary to use YAML, and `apiVersion: networking.k8s.io/v1beta1`
(see example on next slide)
---
## YAML for old ingress resources
## Ingress resources in YAML
Here is a minimal host-based ingress resource:
@@ -601,15 +555,23 @@ spec:
```
(It is in `k8s/ingress.yaml`.)
---
## YAML for new ingress resources
class: extra-details
## Ingress API version
- The YAML on the previous slide uses `apiVersion: networking.k8s.io/v1beta1`
- Starting with Kubernetes 1.19, `networking.k8s.io/v1` is available
- And we can use `kubectl create ingress` 🎉
- However, with Kubernetes 1.19 (and later), we can use `kubectl create ingress`
- We can see "modern" YAML with `-o yaml --dry-run=client`:
- We chose to keep an "old" (deprecated!) YAML example for folks still using older versions of Kubernetes
- If we want to see "modern" YAML, we can use `-o yaml --dry-run=client`:
```bash
kubectl create ingress red -o yaml --dry-run=client \
@@ -679,6 +641,157 @@ class: extra-details
- It is still in alpha stage
---
## Vendor-specific example
- Let's see how to implement *canary releases*
- The example here will use Traefik v1
(which is obsolete)
- It won't work on your Kubernetes cluster!
(unless you're running an oooooold version of Kubernetes)
(and an equally oooooooold version of Traefik)
- We've left it here just as an example!
---
## Canary releases
- A *canary release* (or canary launch or canary deployment) is a release that will process only a small fraction of the workload
- After deploying the canary, we compare its metrics to the normal release
- If the metrics look good, the canary will progressively receive more traffic
(until it gets 100% and becomes the new normal release)
- If the metrics aren't good, the canary is automatically removed
- When we deploy a bad release, only a tiny fraction of traffic is affected
---
## Various ways to implement canary
- Example 1: canary for a microservice
- 1% of all requests (sampled randomly) are sent to the canary
- the remaining 99% are sent to the normal release
- Example 2: canary for a web app
- 1% of users are sent to the canary web site
- the remaining 99% are sent to the normal release
- Example 3: canary for shipping physical goods
- 1% of orders are shipped with the canary process
- the remaining 99% are shipped with the normal process
- We're going to implement example 1 (per-request routing)
---
## Canary releases with Traefik v1
- We need to deploy the canary and expose it with a separate service
- Then, in the Ingress resource, we need:
- multiple `paths` entries (one for each service, canary and normal)
- an extra annotation indicating the weight of each service
- If we want, we can send requests to more than 2 services
---
## The Ingress resource
.small[
```yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: rgb
annotations:
traefik.ingress.kubernetes.io/service-weights: |
red: 50%
green: 25%
blue: 25%
spec:
rules:
- host: rgb.`A.B.C.D`.nip.io
http:
paths:
- path: /
backend:
serviceName: red
servicePort: 80
- path: /
backend:
serviceName: green
servicePort: 80
- path: /
backend:
serviceName: blue
servicePort: 80
```
]
---
class: extra-details
## Other ingress controllers
*Just to illustrate how different things are ...*
- With the NGINX ingress controller:
- define two ingress ressources
<br/>
(specifying rules with the same host+path)
- add `nginx.ingress.kubernetes.io/canary` annotations on each
- With Linkerd2:
- define two services
- define an extra service for the weighted aggregate of the two
- define a TrafficSplit (this is a CRD introduced by the SMI spec)
---
class: extra-details
## We need more than that
What we saw is just one of the multiple building blocks that we need to achieve a canary release.
We also need:
- metrics (latency, performance ...) for our releases
- automation to alter canary weights
(increase canary weight if metrics look good; decrease otherwise)
- a mechanism to manage the lifecycle of the canary releases
(create them, promote them, delete them ...)
For inspiration, check [flagger by Weave](https://github.com/weaveworks/flagger).
???
:EN:- The Ingress resource

View File

@@ -0,0 +1,51 @@
## Basic Commands
run
create
get
delete
logs
explain
describe
exec
## Modifying Objects
apply (upsert)
set
edit
patch
label
annotate
https://blog.atomist.com/kubernetes-apply-replace-patch/
diff
replace
wait
## NetCommands
expose
port-forward
proxy
## Deploy Command
rollout
scale
autoscale
## Cluster Management Commands
certificate
cluster-info
cordon
uncordon
drain
taint
## Troubleshooting and Debugging Commands
top
attach
cp
auth
debug
## Settings Commands
completion
## Other Commands
alpha
api-resources
api-versions
config
plugin
version
Please Share this API Explorer

269
slides/k8s/kubectl-first.md Normal file
View File

@@ -0,0 +1,269 @@
# First contact with `kubectl`
- `kubectl` is (almost) the only tool we'll need to talk to Kubernetes
- It is a rich CLI tool around the Kubernetes API
(Everything you can do with `kubectl`, you can do directly with the API)
- On our machines, there is a `~/.kube/config` file with:
- the Kubernetes API address
- the path to our TLS certificates used to authenticate
- You can also use the `--kubeconfig` flag to pass a config file
- Or directly `--server`, `--user`, etc.
- `kubectl` can be pronounced "Cube C T L", "Cube cuttle", "Cube cuddle"...
---
class: extra-details
## `kubectl` is the new SSH
- We often start managing servers with SSH
(installing packages, troubleshooting ...)
- At scale, it becomes tedious, repetitive, error-prone
- Instead, we use config management, central logging, etc.
- In many cases, we still need SSH:
- as the underlying access method (e.g. Ansible)
- to debug tricky scenarios
- to inspect and poke at things
---
class: extra-details
## The parallel with `kubectl`
- We often start managing Kubernetes clusters with `kubectl`
(deploying applications, troubleshooting ...)
- At scale (with many applications or clusters), it becomes tedious, repetitive, error-prone
- Instead, we use automated pipelines, observability tooling, etc.
- In many cases, we still need `kubectl`:
- to debug tricky scenarios
- to inspect and poke at things
- The Kubernetes API is always the underlying access method
---
## `kubectl get`
- Let's look at our `Node` resources with `kubectl get`!
.lab[
- Look at the composition of our cluster:
```bash
kubectl get node
```
- These commands are equivalent:
```bash
kubectl get no
kubectl get node
kubectl get nodes
```
]
---
## kubectl is an API Server Client
- kubectl verbose (-v)
- --v=6 Display requested resources.
- --v=7 Display HTTP request headers.
- --v=8 Display HTTP request contents.
- --v=9 Display HTTP request contents without truncation of contents.
```bash
kubectl get nodes --v=8
```
---
## Obtaining machine-readable output
- `kubectl get` can output JSON, YAML, or be directly formatted
.lab[
- Give us more info about the nodes:
```bash
kubectl get nodes -o wide
```
- Let's have some YAML:
```bash
kubectl get no -o yaml
```
See that `kind: List` at the end? It's the type of our result!
]
---
## (Ab)using `kubectl` and `jq`
- It's super easy to build custom reports
.lab[
- Show the capacity of all our nodes as a stream of JSON objects:
```bash
kubectl get nodes -o json |
jq ".items[] | {name:.metadata.name} + .status.capacity"
```
]
---
class: extra-details
## Exploring types and definitions
- We can list all available resource types by running `kubectl api-resources`
<br/>
(In Kubernetes 1.10 and prior, this command used to be `kubectl get`)
- We can view the definition for a resource type with:
```bash
kubectl explain type
```
- We can view the definition of a field in a resource, for instance:
```bash
kubectl explain node.spec
```
- Or get the full definition of all fields and sub-fields:
```bash
kubectl explain node --recursive
```
---
class: extra-details
## Introspection vs. documentation
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/#api-reference)
- The API documentation is usually easier to read, but:
- it won't show custom types (like Custom Resource Definitions)
- we need to make sure that we look at the correct version
- `kubectl api-resources` and `kubectl explain` perform *introspection*
(they communicate with the API server and obtain the exact type definitions)
---
## Type names
- The most common resource names have three forms:
- singular (e.g. `node`, `service`, `deployment`)
- plural (e.g. `nodes`, `services`, `deployments`)
- short (e.g. `no`, `svc`, `deploy`)
- Some resources do not have a short name
- `Endpoints` only have a plural form
(because even a single `Endpoints` resource is actually a list of endpoints)
---
## Viewing details
- We can use `kubectl get -o yaml` to see all available details
- However, YAML output is often simultaneously too much and not enough
- For instance, `kubectl get node node1 -o yaml` is:
- too much information (e.g.: list of images available on this node)
- not enough information (e.g.: doesn't show pods running on this node)
- difficult to read for a human operator
- For a comprehensive overview, we can use `kubectl describe` instead
---
## `kubectl describe`
- `kubectl describe` needs a resource type and (optionally) a resource name
- It is possible to provide a resource name *prefix*
(all matching objects will be displayed)
- `kubectl describe` will retrieve some extra information about the resource
.lab[
- Look at the information available for `node1` with one of the following commands:
```bash
kubectl describe node/node1
kubectl describe node node1
```
]
(We should notice a bunch of control plane pods.)
---
## Listing running containers
- Containers are manipulated through *pods*
- A pod is a group of containers:
- running together (on the same node)
- sharing resources (RAM, CPU; but also network, volumes)
.lab[
- List pods on our cluster:
```bash
kubectl get pods
```
]
--
*Where are the pods that we saw just a moment earlier?!?*

340
slides/k8s/kubectl-more.md Normal file
View File

@@ -0,0 +1,340 @@
# More contact with `kubectl`
- Namespaces
- Clusters
- Proxy
---
## Namespaces
- Namespaces allow us to segregate resources
.lab[
- List the namespaces on our cluster with one of these commands:
```bash
kubectl get namespaces
kubectl get namespace
kubectl get ns
```
]
--
*You know what ... This `kube-system` thing looks suspicious.*
*In fact, I'm pretty sure it showed up earlier, when we did:*
`kubectl describe node node1`
---
## Accessing namespaces
- By default, `kubectl` uses the `default` namespace
- We can see resources in all namespaces with `--all-namespaces`
.lab[
- List the pods in all namespaces:
```bash
kubectl get pods --all-namespaces
```
- Since Kubernetes 1.14, we can also use `-A` as a shorter version:
```bash
kubectl get pods -A
```
]
*Here are our system pods!*
---
## What are all these control plane pods?
- `etcd` is our etcd server
- `kube-apiserver` is the API server
- `kube-controller-manager` and `kube-scheduler` are other control plane components
- `coredns` provides DNS-based service discovery ([replacing kube-dns as of 1.11](https://kubernetes.io/blog/2018/07/10/coredns-ga-for-kubernetes-cluster-dns/))
- `kube-proxy` is the (per-node) component managing port mappings and such
- `weave` is the (per-node) component managing the network overlay
- the `READY` column indicates the number of containers in each pod
(1 for most pods, but `weave` has 2, for instance)
---
## Scoping another namespace
- We can also look at a different namespace (other than `default`)
.lab[
- List only the pods in the `kube-system` namespace:
```bash
kubectl get pods --namespace=kube-system
kubectl get pods -n kube-system
```
]
---
## Namespaces and other `kubectl` commands
- We can use `-n`/`--namespace` with almost every `kubectl` command
- Example:
- `kubectl create --namespace=X` to create something in namespace X
- We can use `-A`/`--all-namespaces` with most commands that manipulate multiple objects
- Examples:
- `kubectl delete` can delete resources across multiple namespaces
- `kubectl label` can add/remove/update labels across multiple namespaces
---
class: extra-details
## What about `kube-public`?
.lab[
- List the pods in the `kube-public` namespace:
```bash
kubectl -n kube-public get pods
```
]
Nothing!
`kube-public` is created by kubeadm & [used for security bootstrapping](https://kubernetes.io/blog/2017/01/stronger-foundation-for-creating-and-managing-kubernetes-clusters).
---
class: extra-details
## Exploring `kube-public`
- The only interesting object in `kube-public` is a ConfigMap named `cluster-info`
.lab[
- List ConfigMap objects:
```bash
kubectl -n kube-public get configmaps
```
- Inspect `cluster-info`:
```bash
kubectl -n kube-public get configmap cluster-info -o yaml
```
]
Note the `selfLink` URI: `/api/v1/namespaces/kube-public/configmaps/cluster-info`
We can use that!
---
class: extra-details
## Accessing `cluster-info`
- Earlier, when trying to access the API server, we got a `Forbidden` message
- But `cluster-info` is readable by everyone (even without authentication)
.lab[
- Retrieve `cluster-info`:
```bash
curl -k https://10.96.0.1/api/v1/namespaces/kube-public/configmaps/cluster-info
```
]
- We were able to access `cluster-info` (without auth)
- It contains a `kubeconfig` file
---
class: extra-details
## Retrieving `kubeconfig`
- We can easily extract the `kubeconfig` file from this ConfigMap
.lab[
- Display the content of `kubeconfig`:
```bash
curl -sk https://10.96.0.1/api/v1/namespaces/kube-public/configmaps/cluster-info \
| jq -r .data.kubeconfig
```
]
- This file holds the canonical address of the API server, and the public key of the CA
- This file *does not* hold client keys or tokens
- This is not sensitive information, but allows us to establish trust
---
class: extra-details
## What about `kube-node-lease`?
- Starting with Kubernetes 1.14, there is a `kube-node-lease` namespace
(or in Kubernetes 1.13 if the NodeLease feature gate is enabled)
- That namespace contains one Lease object per node
- *Node leases* are a new way to implement node heartbeats
(i.e. node regularly pinging the control plane to say "I'm alive!")
- For more details, see [Efficient Node Heartbeats KEP] or the [node controller documentation]
[Efficient Node Heartbeats KEP]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/589-efficient-node-heartbeats/README.md
[node controller documentation]: https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller
---
## Services
- A *service* is a stable endpoint to connect to "something"
(In the initial proposal, they were called "portals")
.lab[
- List the services on our cluster with one of these commands:
```bash
kubectl get services
kubectl get svc
```
]
--
There is already one service on our cluster: the Kubernetes API itself.
---
## ClusterIP services
- A `ClusterIP` service is internal, available from the cluster only
- This is useful for introspection from within containers
.lab[
- Try to connect to the API:
```bash
curl -k https://`10.96.0.1`
```
- `-k` is used to skip certificate verification
- Make sure to replace 10.96.0.1 with the CLUSTER-IP shown by `kubectl get svc`
]
The command above should either time out, or show an authentication error. Why?
---
## Time out
- Connections to ClusterIP services only work *from within the cluster*
- If we are outside the cluster, the `curl` command will probably time out
(Because the IP address, e.g. 10.96.0.1, isn't routed properly outside the cluster)
- This is the case with most "real" Kubernetes clusters
- To try the connection from within the cluster, we can use [shpod](https://github.com/jpetazzo/shpod)
---
## Authentication error
This is what we should see when connecting from within the cluster:
```json
$ curl -k https://10.96.0.1
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {
},
"status": "Failure",
"message": "forbidden: User \"system:anonymous\" cannot get path \"/\"",
"reason": "Forbidden",
"details": {
},
"code": 403
}
```
---
## Explanations
- We can see `kind`, `apiVersion`, `metadata`
- These are typical of a Kubernetes API reply
- Because we *are* talking to the Kubernetes API
- The Kubernetes API tells us "Forbidden"
(because it requires authentication)
- The Kubernetes API is reachable from within the cluster
(many apps integrating with Kubernetes will use this)
---
## DNS integration
- Each service also gets a DNS record
- The Kubernetes DNS resolver is available *from within pods*
(and sometimes, from within nodes, depending on configuration)
- Code running in pods can connect to services using their name
(e.g. https://kubernetes/...)
???
:EN:- Getting started with kubectl
:FR:- Se familiariser avec kubectl

View File

@@ -0,0 +1,399 @@
# Scaling our application
- `kubectl` gives us a simple command to scale a workload:
`kubectl scale TYPE NAME --replicas=HOWMANY`
- Let's try it on our Pod, so that we have more Pods!
.lab[
- Try to scale the Pod:
```bash
kubectl scale pod pingpong --replicas=3
```
]
🤔 We get the following error, what does that mean?
```
Error from server (NotFound): the server could not find the requested resource
```
---
## Scaling a Pod
- We cannot "scale a Pod"
(that's not completely true; we could give it more CPU/RAM)
- If we want more Pods, we need to create more Pods
(i.e. execute `kubectl run` multiple times)
- There must be a better way!
(spoiler alert: yes, there is a better way!)
---
class: extra-details
## `NotFound`
- What's the meaning of that error?
```
Error from server (NotFound): the server could not find the requested resource
```
- When we execute `kubectl scale THAT-RESOURCE --replicas=THAT-MANY`,
<br/>
it is like telling Kubernetes:
*go to THAT-RESOURCE and set the scaling button to position THAT-MANY*
- Pods do not have a "scaling button"
- Try to execute the `kubectl scale pod` command with `-v6`
- We see a `PATCH` request to `/scale`: that's the "scaling button"
(technically it's called a *subresource* of the Pod)
---
## Creating more pods
- We are going to create a ReplicaSet
(= set of replicas = set of identical pods)
- In fact, we will create a Deployment, which itself will create a ReplicaSet
- Why so many layers? We'll explain that shortly, don't worry!
---
## Creating a Deployment running `ping`
- Let's create a Deployment instead of a single Pod
.lab[
- Create the Deployment; pay attention to the `--`:
```bash
kubectl create deployment pingpong --image=alpine -- ping 127.0.0.1
```
]
- The `--` is used to separate:
- "options/flags of `kubectl create`
- command to run in the container
---
## What has been created?
.lab[
<!-- ```hide kubectl wait pod --selector=app=pingpong --for condition=ready ``` -->
- Check the resources that were created:
```bash
kubectl get all
```
]
Note: `kubectl get all` is a lie. It doesn't show everything.
(But it shows a lot of "usual suspects", i.e. commonly used resources.)
---
## There's a lot going on here!
```
NAME READY STATUS RESTARTS AGE
pod/pingpong 1/1 Running 0 4m17s
pod/pingpong-6ccbc77f68-kmgfn 1/1 Running 0 11s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3h45
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/pingpong 1/1 1 1 11s
NAME DESIRED CURRENT READY AGE
replicaset.apps/pingpong-6ccbc77f68 1 1 1 11s
```
Our new Pod is not named `pingpong`, but `pingpong-xxxxxxxxxxx-yyyyy`.
We have a Deployment named `pingpong`, and an extra ReplicaSet, too. What's going on?
---
## From Deployment to Pod
We have the following resources:
- `deployment.apps/pingpong`
This is the Deployment that we just created.
- `replicaset.apps/pingpong-xxxxxxxxxx`
This is a Replica Set created by this Deployment.
- `pod/pingpong-xxxxxxxxxx-yyyyy`
This is a *pod* created by the Replica Set.
Let's explain what these things are.
---
## Pod
- Can have one or multiple containers
- Runs on a single node
(Pod cannot "straddle" multiple nodes)
- Pods cannot be moved
(e.g. in case of node outage)
- Pods cannot be scaled horizontally
(except by manually creating more Pods)
---
class: extra-details
## Pod details
- A Pod is not a process; it's an environment for containers
- it cannot be "restarted"
- it cannot "crash"
- The containers in a Pod can crash
- They may or may not get restarted
(depending on Pod's restart policy)
- If all containers exit successfully, the Pod ends in "Succeeded" phase
- If some containers fail and don't get restarted, the Pod ends in "Failed" phase
---
## Replica Set
- Set of identical (replicated) Pods
- Defined by a pod template + number of desired replicas
- If there are not enough Pods, the Replica Set creates more
(e.g. in case of node outage; or simply when scaling up)
- If there are too many Pods, the Replica Set deletes some
(e.g. if a node was disconnected and comes back; or when scaling down)
- We can scale up/down a Replica Set
- we update the manifest of the Replica Set
- as a consequence, the Replica Set controller creates/deletes Pods
---
## Deployment
- Replica Sets control *identical* Pods
- Deployments are used to roll out different Pods
(different image, command, environment variables, ...)
- When we update a Deployment with a new Pod definition:
- a new Replica Set is created with the new Pod definition
- that new Replica Set is progressively scaled up
- meanwhile, the old Replica Set(s) is(are) scaled down
- This is a *rolling update*, minimizing application downtime
- When we scale up/down a Deployment, it scales up/down its Replica Set
---
## Can we scale now?
- Let's try `kubectl scale` again, but on the Deployment!
.lab[
- Scale our `pingpong` deployment:
```bash
kubectl scale deployment pingpong --replicas 3
```
- Note that we could also write it like this:
```bash
kubectl scale deployment/pingpong --replicas 3
```
- Check that we now have multiple pods:
```bash
kubectl get pods
```
]
---
class: extra-details
## Scaling a Replica Set
- What if we scale the Replica Set instead of the Deployment?
- The Deployment would notice it right away and scale back to the initial level
- The Replica Set makes sure that we have the right numbers of Pods
- The Deployment makes sure that the Replica Set has the right size
(conceptually, it delegates the management of the Pods to the Replica Set)
- This might seem weird (why this extra layer?) but will soon make sense
(when we will look at how rolling updates work!)
---
## Checking Deployment logs
- `kubectl logs` needs a Pod name
- But it can also work with a *type/name*
(e.g. `deployment/pingpong`)
.lab[
- View the result of our `ping` command:
```bash
kubectl logs deploy/pingpong --tail 2
```
]
- It shows us the logs of the first Pod of the Deployment
- We'll see later how to get the logs of *all* the Pods!
---
## Resilience
- The *deployment* `pingpong` watches its *replica set*
- The *replica set* ensures that the right number of *pods* are running
- What happens if pods disappear?
.lab[
- In a separate window, watch the list of pods:
```bash
watch kubectl get pods
```
<!--
```wait Every 2.0s```
```tmux split-pane -v```
-->
- Destroy the pod currently shown by `kubectl logs`:
```
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
```
<!--
```tmux select-pane -t 0```
```copy pingpong-[^-]*-.....```
```tmux last-pane```
```keys kubectl delete pod ```
```paste```
```key ^J```
```check```
-->
]
---
## What happened?
- `kubectl delete pod` terminates the pod gracefully
(sending it the TERM signal and waiting for it to shutdown)
- As soon as the pod is in "Terminating" state, the Replica Set replaces it
- But we can still see the output of the "Terminating" pod in `kubectl logs`
- Until 30 seconds later, when the grace period expires
- The pod is then killed, and `kubectl logs` exits
---
## Deleting a standalone Pod
- What happens if we delete a standalone Pod?
(like the first `pingpong` Pod that we created)
.lab[
- Delete the Pod:
```bash
kubectl delete pod pingpong
```
<!--
```key ^D```
```key ^C```
-->
]
- No replacement Pod gets created because there is no *controller* watching it
- That's why we will rarely use standalone Pods in practice
(except for e.g. punctual debugging or executing a short supervised task)
???
:EN:- Running pods and deployments
:FR:- Créer un pod et un déploiement

View File

@@ -0,0 +1,346 @@
# Running our first containers on Kubernetes
- First things first: we cannot run a container
--
- We are going to run a pod, and in that pod there will be a single container
--
- In that container in the pod, we are going to run a simple `ping` command
---
## Starting a simple pod with `kubectl run`
- `kubectl run` is convenient to start a single pod
- We need to specify at least a *name* and the image we want to use
- Optionally, we can specify the command to run in the pod
.lab[
- Let's ping the address of `localhost`, the loopback interface:
```bash
kubectl run pingpong --image alpine ping 127.0.0.1
```
<!-- ```hide kubectl wait pod --selector=run=pingpong --for condition=ready``` -->
]
The output tells us that a Pod was created:
```
pod/pingpong created
```
---
## Viewing container output
- Let's use the `kubectl logs` command
- It takes a Pod name as argument
- Unless specified otherwise, it will only show logs of the first container in the pod
(Good thing there's only one in ours!)
.lab[
- View the result of our `ping` command:
```bash
kubectl logs pingpong
```
]
---
## Streaming logs in real time
- Just like `docker logs`, `kubectl logs` supports convenient options:
- `-f`/`--follow` to stream logs in real time (à la `tail -f`)
- `--tail` to indicate how many lines you want to see (from the end)
- `--since` to get logs only after a given timestamp
.lab[
- View the latest logs of our `ping` command:
```bash
kubectl logs pingpong --tail 1 --follow
```
- Stop it with Ctrl-C
<!--
```wait seq=3```
```keys ^C```
-->
]
---
## Authoring YAML
- We have already generated YAML implicitly, with e.g.:
- `kubectl run`
- When and why do we need to write our own YAML?
- How do we write YAML from scratch?
---
## The limits of generated YAML
- Many advanced (and even not-so-advanced) features require to write YAML:
- pods with multiple containers
- resource limits
- healthchecks
- DaemonSets, StatefulSets
- and more!
- How do we access these features?
---
## Various ways to write YAML
- Completely from scratch with our favorite editor
(yeah, right)
- Dump an existing resource with `kubectl get -o yaml ...`
(it is recommended to clean up the result)
- Ask `kubectl` to generate the YAML
(with a `kubectl create --dry-run=client -o yaml`)
- Use The Docs, Luke
(the documentation almost always has YAML examples)
---
## Generating YAML from scratch
- Start with a namespace:
```yaml
kind: Namespace
apiVersion: v1
metadata:
name: hello
```
- We can use `kubectl explain` to see resource definitions:
```bash
kubectl explain -r pod.spec
```
- Not the easiest option!
---
## Dump the YAML for an existing resource
- `kubectl get -o yaml` works!
- A lot of fields in `metadata` are not necessary
(`managedFields`, `resourceVersion`, `uid`, `creationTimestamp` ...)
- Most objects will have a `status` field that is not necessary
- Default or empty values can also be removed for clarity
- This can be done manually or with the `kubectl-neat` plugin
`kubectl get -o yaml ... | kubectl neat`
---
## Generating YAML without creating resources
- We can use the `--dry-run=client` option
.lab[
- Generate the YAML for a Deployment without creating it:
```bash
kubectl run pingpong --image alpine --dry-run=client ping 127.0.0.1
kubectl run pingpong --image alpine --dry-run=client ping 127.0.0.1 >ping.yaml
```
- Optionally clean it up with `kubectl neat`, too
```bash
kubectl apply -f ping.yaml
```
]
---
class: extra-details
## Server-side dry run
- Server-side dry run will do all the work, but *not* persist to etcd
(all validation and mutation hooks will be executed)
.lab[
- Try the same YAML file as earlier, with server-side dry run:
```bash
kubectl run pingpong --image alpine --dry-run=server ping 127.0.0.1
```
]
---
class: extra-details
## Advantages of server-side dry run
- The YAML is verified much more extensively
- The only step that is skipped is "write to etcd"
- YAML that passes server-side dry run *should* apply successfully
(unless the cluster state changes by the time the YAML is actually applied)
- Validating or mutating hooks that have side effects can also be an issue
---
class: extra-details
## `kubectl diff`
- `kubectl diff` does a server-side dry run, *and* shows differences
.lab[
- Try `kubectl diff` on the YAML that we tweaked earlier:
```bash
kubectl diff -f web.yaml
```
<!-- ```wait status:``` -->
]
Note: we don't need to specify `--validate=false` here.
---
## Advantage of YAML
- Using YAML (instead of `kubectl create <kind>`) allows to be *declarative*
- The YAML describes the desired state of our cluster and applications
- YAML can be stored, versioned, archived (e.g. in git repositories)
- To change resources, change the YAML files
(instead of using `kubectl edit`/`scale`/`label`/etc.)
- Changes can be reviewed before being applied
(with code reviews, pull requests ...)
- This workflow is sometimes called "GitOps"
(there are tools like Weave Flux or GitKube to facilitate it)
---
## YAML in practice
- Get started with `kubectl run ...`
(until you have something that sort of works)
- Then, run these commands again, but with `-o yaml --dry-run=client`
(to generate and save YAML manifests)
- Try to apply these manifests in a clean environment
(e.g. a new Namespace)
- Check that everything works; tweak and iterate if needed
- Commit the YAML to a repo 💯🏆️
---
## "Day 2" YAML
- Don't hesitate to remove unused fields
(e.g. `creationTimestamp: null`, most `{}` values...)
- Check your YAML with:
[kube-score](https://github.com/zegl/kube-score) (installable with krew)
[kube-linter](https://github.com/stackrox/kube-linter)
- Check live resources with tools like [popeye](https://popeyecli.io/)
- Remember that like all linters, they need to be configured for your needs!
???
:EN:- Techniques to write YAML manifests
:FR:- Comment écrire des *manifests* YAML
---
## Multi-Line Command arguments
.lab[
```bash
/bin/sh -c takes a single string parameter
- command:
- /bin/sh
- -c
- |
echo "running below scripts"
i=0;
while true;
do
echo "$i: $(date)";
i=$((i+1));
sleep 1;
done
```
]

View File

@@ -554,28 +554,6 @@ Note: the `apiVersion` field appears to be optional.
---
class: extra-details
## Managing `ownerReferences`
- By default, the generated object and triggering object have independent lifecycles
(deleting the triggering object doesn't affect the generated object)
- It is possible to associate the generated object with the triggering object
(so that deleting the triggering object also deletes the generated object)
- This is done by adding the triggering object information to `ownerReferences`
(in the generated object `metadata`)
- See [Linking resources with ownerReferences][ownerref] for an example
[ownerref]: https://kyverno.io/docs/writing-policies/generate/#linking-resources-with-ownerreferences
---
## Asynchronous creation
- Kyverno creates resources asynchronously

View File

@@ -40,9 +40,7 @@
- Each person gets their own private set of VMs
- The connection information is on a shared spreadsheet
(URL to be shared through portal and chat)
- Each person should have a printed card with connection information
- We will connect to these VMs with SSH

View File

@@ -14,20 +14,32 @@
- CPU is a *compressible resource*
- it can be preempted immediately without adverse effect
- if we have N CPU and need 2N, we run at 50% speed
(it can be preempted immediately without adverse effect)
- Memory is an *incompressible resource*
- it needs to be swapped out to be reclaimed; and this is costly
- if we have N GB RAM and need 2N, we might run at... 0.1% speed!
(it needs to be swapped out to be reclaimed; and this is costly)
- As a result, exceeding limits will have different consequences for CPU and memory
---
## Exceeding CPU limits
- CPU can be reclaimed instantaneously
(in fact, it is preempted hundreds of times per second, at each context switch)
- If a container uses too much CPU, it can be throttled
(it will be scheduled less often)
- The processes in that container will run slower
(or rather: they will not run faster)
---
class: extra-details
## CPU limits implementation details
@@ -134,59 +146,39 @@ For more details, check [this blog post](https://erickhun.com/posts/kubernetes-f
---
## Running low on memory
## Exceeding memory limits
- When the system runs low on memory, it starts to reclaim used memory
- Memory needs to be swapped out before being reclaimed
(we talk about "memory pressure")
- "Swapping" means writing memory pages to disk, which is very slow
- Option 1: free up some buffers and caches
- On a classic system, a process that swaps can get 1000x slower
(fastest option; might affect performance if cache memory runs very low)
(because disk I/O is 1000x slower than memory I/O)
- Option 2: swap, i.e. write to disk some memory of one process to give it to another
- Exceeding the memory limit (even by a small amount) can reduce performance *a lot*
(can have a huge negative impact on performance because disks are slow)
- Kubernetes *does not support swap* (more on that later!)
- Option 3: terminate a process and reclaim all its memory
(OOM or Out Of Memory Killer on Linux)
- Exceeding the memory limit will cause the container to be killed
---
## Memory limits on Kubernetes
## Limits vs requests
- Kubernetes *does not support swap*
(but it may support it in the future, thanks to [KEP 2400])
- If a container exceeds its memory *limit*, it gets killed immediately
- If a node is overcommitted and under memory pressure, it will terminate some pods
(see next slide for some details about what "overcommit" means here!)
[KEP 2400]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2400-node-swap/README.md#implementation-history
---
## Overcommitting resources
- *Limits* are "hard limits" (a container *cannot* exceed its limits)
- Limits are "hard limits" (they can't be exceeded)
- a container exceeding its memory limit is killed
- a container exceeding its CPU limit is throttled
- On a given node, the sum of pod *limits* can be higher than the node size
- Requests are used for scheduling purposes
- *Requests* are used for scheduling purposes
- a container using *less* than what it requested will never be killed or throttled
- a container can use more than its requested CPU or RAM amounts
- the scheduler uses the requested sizes to determine placement
- a container using *less* than what it requested should never be killed or throttled
- On a given node, the sum of pod *requests* cannot be higher than the node size
- the resources requested by all pods on a node will never exceed the node size
---
@@ -230,31 +222,9 @@ Each pod is assigned a QoS class (visible in `status.qosClass`).
---
class: extra-details
## Where is my swap?
## CPU and RAM reservation
- Kubernetes passes resources requests and limits to the container engine
- The container engine applies these requests and limits with specific mechanisms
- Example: on Linux, this is typically done with control groups aka cgroups
- Most systems use cgroups v1, but cgroups v2 are slowly being rolled out
(e.g. available in Ubuntu 22.04 LTS)
- Cgroups v2 have new, interesting features for memory control:
- ability to set "minimum" memory amounts (to effectively reserve memory)
- better control on the amount of swap used by a container
---
class: extra-details
## What's the deal with swap?
- The semantics of memory and swap limits on Linux cgroups are complex
- With cgroups v1, it's not possible to disable swap for a cgroup
@@ -268,8 +238,6 @@ class: extra-details
- The simplest solution was to disable swap entirely
- Kubelet will refuse to start if it detects that swap is enabled!
---
## Alternative point of view
@@ -300,7 +268,7 @@ class: extra-details
- You will need to add the flag `--fail-swap-on=false` to kubelet
(remember: it won't otherwise start if it detects that swap is enabled)
(otherwise, it won't start!)
---
@@ -698,18 +666,6 @@ class: extra-details
---
## Underutilization
- Remember: when assigning a pod to a node, the scheduler looks at *requests*
(not at current utilization on the node)
- If pods request resources but don't use them, this can lead to underutilization
(because the scheduler will consider that the node is full and can't fit new pods)
---
## Viewing a namespace limits and quotas
- `kubectl describe namespace` will display resource limits and quotas

View File

@@ -1,18 +1,14 @@
# Rolling updates
- How should we update a running application?
- By default (without rolling updates), when a scaled resource is updated:
- Strategy 1: delete old version, then deploy new version
- new pods are created
(not great, because it obviously provokes downtime!)
- old pods are terminated
- Strategy 2: deploy new version, then delete old version
- ... all at the same time
(uses a lot of resources; also how do we shift traffic?)
- Strategy 3: replace running pods one at a time
(sounds interesting; and good news, Kubernetes does it for us!)
- if something goes wrong, ¯\\\_(ツ)\_/¯
---

View File

@@ -20,17 +20,13 @@
## Docker Desktop
- Available on Linux, Mac, and Windows
- Free for personal use and small businesses
(less than 250 employees and less than $10 millions in annual revenue)
- Available on Mac and Windows
- Gives you one cluster with one node
- Streamlined installation and user experience
- Very easy to use if you are already using Docker Desktop:
- Great integration with various network stacks and e.g. corporate VPNs
go to Docker Desktop preferences and enable Kubernetes
- Ideal for Docker users who need good integration between both platforms
@@ -44,11 +40,13 @@
- Runs Kubernetes nodes in Docker containers
- Can deploy multiple clusters, with multiple nodes
- Can deploy multiple clusters, with multiple nodes, and multiple master nodes
- Runs the control plane on Kubernetes nodes
- As of June 2020, two versions co-exist: stable (1.7) and beta (3.0)
- Control plane can also run on multiple nodes
- They have different syntax and options, this can be confusing
(but don't let that stop you!)
---
@@ -86,7 +84,7 @@
- More advanced scenarios require writing a short [config file](https://kind.sigs.k8s.io/docs/user/quick-start#configuring-your-kind-cluster)
(to define multiple nodes, multiple control plane nodes, set Kubernetes versions ...)
(to define multiple nodes, multiple master nodes, set Kubernetes versions ...)
- Can deploy multiple clusters
@@ -126,9 +124,7 @@
## [Rancher Desktop](https://rancherdesktop.io/)
- Available on Linux, Mac, and Windows
- Free and open-source
- Available on Mac and Windows
- Runs a single cluster with a single node
@@ -138,7 +134,7 @@
- Emphasis on ease of use (like Docker Desktop)
- Relatively young product (first release in May 2021)
- Very young product (first release in May 2021)
- Based on k3s and other proven components

View File

@@ -0,0 +1,32 @@
# YAML in 5 minutes or less
- YAML == Yet Another Markup Language
- Any JSON file can be transformed into YAML
- YAML is a superset of JSON
- ie a valid YAML file can contain JSON
---
## YAML Syntax and Types
- YAML Syntax is based on indentation
- YAML Data Types
- Name/Value Maps
- Arrays
- String
- Number
- Boolean
- YAML support for Multi-line strings
See Samples
- k8s/sampleYaml.yaml
- k8s/sampleYamlAsJson.json

62
slides/kadm-fullday.yml Normal file
View File

@@ -0,0 +1,62 @@
title: |
Kubernetes
for Admins and Ops
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
- static-pods-exercise
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
- k8s/prereqs-admin.md
- k8s/architecture.md
#- k8s/internal-apis.md
- k8s/deploymentslideshow.md
- k8s/dmuc.md
-
- k8s/multinode.md
- k8s/cni.md
- k8s/cni-internals.md
- k8s/interco.md
-
- k8s/apilb.md
#- k8s/setup-overview.md
#- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
- k8s/cluster-upgrade.md
- k8s/cluster-backup.md
- k8s/staticpods.md
-
#- k8s/cloud-controller-manager.md
#- k8s/bootstrap.md
- k8s/control-plane-auth.md
- k8s/pod-security-intro.md
- k8s/pod-security-policies.md
- k8s/pod-security-admission.md
- k8s/user-cert.md
- k8s/csr-api.md
- k8s/openid-connect.md
-
#- k8s/lastwords-admin.md
- k8s/links.md
- shared/thankyou.md

92
slides/kadm-twodays.yml Normal file
View File

@@ -0,0 +1,92 @@
title: |
Kubernetes
for administrators
and operators
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
# DAY 1
- - k8s/prereqs-admin.md
- k8s/architecture.md
- k8s/internal-apis.md
- k8s/deploymentslideshow.md
- k8s/dmuc.md
- - k8s/multinode.md
- k8s/cni.md
- k8s/cni-internals.md
- k8s/interco.md
- - k8s/apilb.md
- k8s/setup-overview.md
#- k8s/setup-devel.md
- k8s/setup-managed.md
- k8s/setup-selfhosted.md
- k8s/cluster-upgrade.md
- k8s/staticpods.md
- - k8s/cluster-backup.md
- k8s/cloud-controller-manager.md
- k8s/healthchecks.md
- k8s/healthchecks-more.md
# DAY 2
- - k8s/kubercoins.md
- k8s/logs-cli.md
- k8s/logs-centralized.md
- k8s/authn-authz.md
- k8s/user-cert.md
- k8s/csr-api.md
- - k8s/openid-connect.md
- k8s/control-plane-auth.md
###- k8s/bootstrap.md
- k8s/netpol.md
- k8s/pod-security-intro.md
- k8s/pod-security-policies.md
- k8s/pod-security-admission.md
- - k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/horizontal-pod-autoscaler.md
- - k8s/prometheus.md
#- k8s/prometheus-stack.md
- k8s/extending-api.md
- k8s/crd.md
- k8s/operators.md
- k8s/eck.md
###- k8s/operators-design.md
###- k8s/operators-example.md
# CONCLUSION
- - k8s/lastwords.md
- k8s/links.md
- shared/thankyou.md
- |
# (All content after this slide is bonus material)
# EXTRA
- - k8s/volumes.md
- k8s/configuration.md
- k8s/secrets.md
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
#- k8s/portworx.md
- k8s/openebs.md
- k8s/stateful-failover.md

View File

@@ -2,11 +2,12 @@ title: |
Advanced
Kubernetes
chat: "[Slack](https://newrelic.slack.com/archives/C0438EFM97F)"
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-09-nr2.container.training/
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
@@ -26,84 +27,63 @@ content:
- #1
- k8s/prereqs-admin.md
- k8s/architecture.md
- k8s/internal-apis.md
- k8s/deploymentslideshow.md
- k8s/dmuc.md
- interlude-form.md
- #2
- k8s/multinode.md
- k8s/cni.md
- k8s/interco.md
- #3
- k8s/cni-internals.md
- k8s/apilb.md
- #2
- k8s/demo-apps.md
- k8s/netpol.md
- k8s/authn-authz.md
- k8s/user-cert.md
- k8s/csr-api.md
- k8s/openid-connect.md
- k8s/pod-security-intro.md
- k8s/pod-security-policies.md
- k8s/pod-security-admission.md
- exercises/netpol-details.md
- exercises/rbac-details.md
- #3
- k8s/helm-intro.md
- k8s/ingress.md
- k8s/cert-manager.md
- k8s/ingress-tls.md
- k8s/ingress-advanced.md
- k8s/control-plane-auth.md
- |
# (Extra content)
- k8s/staticpods.md
- k8s/cluster-upgrade.md
- #4
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
- |
# (Extra content)
- k8s/helm-create-better-chart.md
- k8s/helm-dependencies.md
- k8s/helm-values-schema-validation.md
- k8s/helm-secrets.md
- exercises/helm-generic-chart-details.md
- exercises/helm-umbrella-chart-details.md
- #4
- k8s/ytt.md
- #5
- k8s/extending-api.md
- k8s/admission.md
- k8s/cainjector.md
- k8s/kyverno.md
- k8s/crd.md
- k8s/operators.md
- k8s/sealed-secrets.md
- k8s/operators-design.md
- k8s/operators-example.md
- k8s/owners-and-dependents.md
- k8s/events.md
- k8s/finalizers.md
- exercises/sealed-secrets-details.md
- #5
- k8s/resource-limits.md
- k8s/cluster-sizing.md
- k8s/cluster-autoscaler.md
- k8s/horizontal-pod-autoscaler.md
- k8s/crd.md
- #6
- k8s/ingress-tls.md
- k8s/ingress-advanced.md
- k8s/cert-manager.md
- k8s/cainjector.md
- k8s/eck.md
- #7
- k8s/admission.md
- k8s/kyverno.md
- #8
- k8s/aggregation-layer.md
- k8s/metrics-server.md
- k8s/prometheus.md
- k8s/prometheus-stack.md
- k8s/hpa-v2.md
- k8s/batch-jobs.md
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
- k8s/stateful-failover.md
- shared/thankyou.md
-
- |
# (Extra content I)
- k8s/setup-devel.md
- k8s/accessinternal.md
- k8s/kubectlproxy.md
- k8s/k9s.md
- k8s/tilt.md
- k8s/ytt.md
-
- |
# (Extra content II)
- k8s/internal-apis.md
- k8s/staticpods.md
- k8s/cluster-upgrade.md
- k8s/control-plane-auth.md
- #9
- k8s/operators-design.md
- k8s/operators-example.md
- k8s/kubebuilder.md
- k8s/events.md
- k8s/finalizers.md
- |
# (Extra content)
- k8s/owners-and-dependents.md
- k8s/apiserver-deepdive.md
#- k8s/record.md
- shared/thankyou.md

134
slides/kube-fullday.yml Normal file
View File

@@ -0,0 +1,134 @@
title: |
Deploying and Scaling Microservices
with Kubernetes
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
- shared/prereqs.md
#- shared/webssh.md
- shared/connecting.md
#- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
-
- k8s/kubectl-run.md
#- k8s/batch-jobs.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/kubenet.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
#- k8s/exercise-wordsmith.md
-
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- k8s/namespaces.md
- k8s/yamldeploy.md
- k8s/setup-overview.md
- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
-
- k8s/dashboard.md
- k8s/rollout.md
- k8s/healthchecks.md
- k8s/ingress.md
#- k8s/volumes.md
- k8s/configuration.md
- k8s/secrets.md
- k8s/openebs.md
#- k8s/k9s.md
#- k8s/tilt.md
#- k8s/kubectlscale.md
#- k8s/scalingdockercoins.md
#- shared/hastyconclusions.md
#- k8s/daemonset.md
#- k8s/authoring-yaml.md
#- k8s/exercise-yaml.md
#- k8s/localkubeconfig.md
#- k8s/access-eks-cluster.md
#- k8s/accessinternal.md
#- k8s/kubectlproxy.md
#- k8s/healthchecks-more.md
#- k8s/record.md
#- k8s/ingress-tls.md
#- k8s/kustomize.md
#- k8s/helm-intro.md
#- k8s/helm-chart-format.md
#- k8s/helm-create-basic-chart.md
#- k8s/helm-create-better-chart.md
#- k8s/helm-dependencies.md
#- k8s/helm-values-schema-validation.md
#- k8s/helm-secrets.md
#- k8s/exercise-helm.md
#- k8s/ytt.md
#- k8s/gitlab.md
#- k8s/create-chart.md
#- k8s/create-more-charts.md
#- k8s/netpol.md
#- k8s/authn-authz.md
#- k8s/user-cert.md
#- k8s/csr-api.md
#- k8s/openid-connect.md
#- k8s/pod-security-intro.md
#- k8s/pod-security-policies.md
#- k8s/pod-security-admission.md
#- k8s/exercise-configmap.md
#- k8s/build-with-docker.md
#- k8s/build-with-kaniko.md
#- k8s/logs-centralized.md
#- k8s/prometheus.md
#- k8s/prometheus-stack.md
#- k8s/statefulsets.md
#- k8s/consul.md
#- k8s/pv-pvc-sc.md
#- k8s/volume-claim-templates.md
#- k8s/portworx.md
#- k8s/openebs.md
#- k8s/stateful-failover.md
#- k8s/extending-api.md
#- k8s/crd.md
#- k8s/admission.md
#- k8s/operators.md
#- k8s/operators-design.md
#- k8s/operators-example.md
#- k8s/staticpods.md
#- k8s/finalizers.md
#- k8s/owners-and-dependents.md
#- k8s/gitworkflows.md
-
#- k8s/whatsnext.md
- k8s/lastwords.md
#- k8s/links.md
- shared/thankyou.md

89
slides/kube-halfday.yml Normal file
View File

@@ -0,0 +1,89 @@
title: |
Kubernetes 101
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
#- logistics.md
# Bridget-specific; others use logistics.md
- logistics-bridget.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- - shared/prereqs.md
#- shared/webssh.md
- shared/connecting.md
- k8s/versions-k8s.md
- shared/sampleapp.md
# Bridget doesn't go into as much depth with compose
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- shared/declarative.md
- k8s/declarative.md
- k8s/kubenet.md
- k8s/kubectlget.md
- k8s/setup-overview.md
#- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
- - k8s/kubectl-run.md
#- k8s/batch-jobs.md
#- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/deploymentslideshow.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
#- k8s/localkubeconfig.md
#- k8s/access-eks-cluster.md
#- k8s/accessinternal.md
#- k8s/kubectlproxy.md
- - k8s/dashboard.md
#- k8s/k9s.md
#- k8s/tilt.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/rollout.md
#- k8s/record.md
- - k8s/logs-cli.md
# Bridget hasn't added EFK yet
#- k8s/logs-centralized.md
- k8s/namespaces.md
- k8s/helm-intro.md
#- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
#- k8s/helm-create-better-chart.md
#- k8s/helm-dependencies.md
#- k8s/helm-values-schema-validation.md
#- k8s/helm-secrets.md
#- k8s/kustomize.md
#- k8s/ytt.md
#- k8s/netpol.md
- k8s/whatsnext.md
# - k8s/links.md
# Bridget-specific
- k8s/links-bridget.md
- shared/thankyou.md

99
slides/kube-jerome.yml Normal file
View File

@@ -0,0 +1,99 @@
title: |
Kubernetes
Intermediate
Training
chat: "`#kubernetes-training-january-10-14`"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-01-nr.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- exercises/k8sfundamentals-brief.md
- exercises/localcluster-brief.md
- exercises/healthchecks-brief.md
- exercises/ingress-brief.md
- exercises/appconfig-brief.md
- # DAY 1
- shared/prereqs.md
- shared/webssh.md
- shared/connecting.md
#- k8s/versions-k8s.md
- shared/sampleapp.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
- k8s/kubectl-run.md
- k8s/kubenet.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- exercises/k8sfundamentals-details.md
- # DAY 2
- k8s/ourapponkube.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- k8s/namespaces.md
- k8s/yamldeploy.md
- k8s/setup-overview.md
- k8s/setup-devel.md
- k8s/localkubeconfig.md
- k8s/accessinternal.md
#- k8s/kubectlproxy.md
- k8s/k9s.md
- k8s/tilt.md
- exercises/localcluster-details.md
- # DAY 3
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/rollout.md
- k8s/healthchecks.md
- exercises/healthchecks-details.md
- k8s/ingress.md
#- k8s/ingress-tls.md
- exercises/ingress-details.md
- # DAY 4
- k8s/netpol.md
- k8s/authn-authz.md
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- # DAY 5
- k8s/volumes.md
- k8s/configuration.md
- k8s/secrets.md
- exercises/appconfig-details.md
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
#- k8s/portworx.md
#- k8s/openebs.md
#- k8s/stateful-failover.md
- # Extra
- |
# (Extra content)
- k8s/horizontal-pod-autoscaler.md
- k8s/dashboard.md
- k8s/batch-jobs.md

165
slides/kube-selfpaced.yml Normal file
View File

@@ -0,0 +1,165 @@
title: |
Deploying and Scaling Microservices
with Docker and Kubernetes
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- in-person
content:
- shared/title.md
#- logistics.md
- k8s/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
- shared/prereqs.md
#- shared/webssh.md
- shared/connecting.md
- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
-
- k8s/kubectlget.md
- k8s/kubectl-run.md
- k8s/batch-jobs.md
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
-
- k8s/kubenet.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
#- k8s/exercise-wordsmith.md
- k8s/yamldeploy.md
-
- k8s/setup-overview.md
- k8s/setup-devel.md
- k8s/setup-managed.md
- k8s/setup-selfhosted.md
- k8s/dashboard.md
- k8s/k9s.md
- k8s/tilt.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/authoring-yaml.md
#- k8s/exercise-yaml.md
-
- k8s/rollout.md
- k8s/healthchecks.md
- k8s/healthchecks-more.md
- k8s/record.md
-
- k8s/namespaces.md
- k8s/localkubeconfig.md
#- k8s/access-eks-cluster.md
- k8s/accessinternal.md
- k8s/kubectlproxy.md
-
- k8s/ingress.md
- k8s/ingress-advanced.md
- k8s/ingress-tls.md
- k8s/cert-manager.md
- k8s/cainjector.md
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
- k8s/helm-create-better-chart.md
- k8s/helm-dependencies.md
- k8s/helm-values-schema-validation.md
- k8s/helm-secrets.md
#- k8s/exercise-helm.md
- k8s/gitlab.md
- k8s/ytt.md
-
- k8s/netpol.md
- k8s/authn-authz.md
- k8s/pod-security-intro.md
- k8s/pod-security-policies.md
- k8s/pod-security-admission.md
- k8s/user-cert.md
- k8s/csr-api.md
- k8s/openid-connect.md
- k8s/control-plane-auth.md
-
- k8s/volumes.md
#- k8s/exercise-configmap.md
- k8s/build-with-docker.md
- k8s/build-with-kaniko.md
-
- k8s/configuration.md
- k8s/secrets.md
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
- k8s/portworx.md
- k8s/openebs.md
- k8s/stateful-failover.md
-
- k8s/logs-centralized.md
- k8s/prometheus.md
- k8s/prometheus-stack.md
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/cluster-autoscaler.md
- k8s/horizontal-pod-autoscaler.md
- k8s/hpa-v2.md
-
- k8s/extending-api.md
- k8s/apiserver-deepdive.md
- k8s/crd.md
- k8s/aggregation-layer.md
- k8s/admission.md
- k8s/operators.md
- k8s/operators-design.md
- k8s/operators-example.md
- k8s/kubebuilder.md
- k8s/sealed-secrets.md
- k8s/kyverno.md
- k8s/eck.md
- k8s/finalizers.md
- k8s/owners-and-dependents.md
- k8s/events.md
-
- k8s/dmuc.md
- k8s/multinode.md
- k8s/cni.md
- k8s/cni-internals.md
- k8s/apilb.md
- k8s/staticpods.md
-
- k8s/cluster-upgrade.md
- k8s/cluster-backup.md
- k8s/cloud-controller-manager.md
- k8s/gitworkflows.md
-
- k8s/lastwords.md
- k8s/links.md
- shared/thankyou.md

133
slides/kube-twodays.yml Normal file
View File

@@ -0,0 +1,133 @@
title: |
Deploying and Scaling Microservices
with Kubernetes
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
-
- shared/prereqs.md
#- shared/webssh.md
- shared/connecting.md
#- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
-
- k8s/kubectl-run.md
- k8s/batch-jobs.md
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/kubenet.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
#- k8s/exercise-wordsmith.md
-
- k8s/yamldeploy.md
- k8s/setup-overview.md
- k8s/setup-devel.md
#- k8s/setup-managed.md
#- k8s/setup-selfhosted.md
- k8s/dashboard.md
- k8s/k9s.md
#- k8s/tilt.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/authoring-yaml.md
#- k8s/exercise-yaml.md
-
- k8s/localkubeconfig.md
#- k8s/access-eks-cluster.md
- k8s/accessinternal.md
#- k8s/kubectlproxy.md
- k8s/rollout.md
- k8s/healthchecks.md
#- k8s/healthchecks-more.md
- k8s/record.md
-
- k8s/namespaces.md
- k8s/ingress.md
#- k8s/ingress-advanced.md
#- k8s/ingress-tls.md
- k8s/kustomize.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
- k8s/helm-create-better-chart.md
- k8s/helm-dependencies.md
- k8s/helm-values-schema-validation.md
- k8s/helm-secrets.md
#- k8s/exercise-helm.md
#- k8s/ytt.md
- k8s/gitlab.md
-
- k8s/netpol.md
- k8s/authn-authz.md
#- k8s/csr-api.md
#- k8s/openid-connect.md
#- k8s/pod-security-intro.md
#- k8s/pod-security-policies.md
#- k8s/pod-security-admission.md
-
- k8s/volumes.md
#- k8s/exercise-configmap.md
#- k8s/build-with-docker.md
#- k8s/build-with-kaniko.md
- k8s/configuration.md
- k8s/secrets.md
- k8s/logs-centralized.md
#- k8s/prometheus.md
#- k8s/prometheus-stack.md
-
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
#- k8s/portworx.md
- k8s/openebs.md
- k8s/stateful-failover.md
#- k8s/extending-api.md
#- k8s/admission.md
#- k8s/operators.md
#- k8s/operators-design.md
#- k8s/operators-example.md
#- k8s/staticpods.md
#- k8s/owners-and-dependents.md
#- k8s/gitworkflows.md
-
- k8s/whatsnext.md
- k8s/lastwords.md
- k8s/links.md
- shared/thankyou.md

120
slides/kube.aug.yaml Normal file
View File

@@ -0,0 +1,120 @@
title: |
Kubernetes
Intermediate
Training
chat: "`Zoom Chat`"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-08-nr.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics-gerry.md
- k8s/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
# - shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
- shared/chat-room-zoom-webinar.md
- shared/prereqs.md
- shared/webssh.md
- shared/connecting.md
- shared/toc.md
- exercises/k8sfundamentals-brief.md
- exercises/localcluster-brief.md
- exercises/healthchecks-brief.md
- exercises/ingress-brief.md
- exercises/appconfig-brief.md
-
# DAY 1
- containers/Macro_View.md
#- shared/webssh.md
#- k8s/versions-k8s.md
#- shared/composescale.md
- k8s/concepts-k8s.md
- k8s/deploymentslideshow.md
- shared/declarative.md
- k8s/declarative.md
#- shared/hastyconclusions.md
# - k8s/shippingimages.md
- k8s/kubectl-first.md
- k8s/authoring-yaml.md
#- k8s/buildshiprun-selfhosted.md
- k8s/alias-and-references.md
- # DAY 2
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- k8s/kubectl-run.md
- k8s/kubectl-more.md
- k8s/kubectlexpose.md
- k8s/rollout.md
- k8s/yamldeploy.md
- # DAY 3 (Started with 2 hour's lab and discussion)
- k8s/daemonset.md
- k8s/namespaces.md
- k8s/healthchecks.md
- k8s/kubenet.md
- exercises/healthchecks-details.md
- # DAY 4
- k8s/netpol.md
- k8s/accessinternal.md
- k8s/ingress.md
- containers/software-deployment.md
- k8s/volumes.md
- k8s/configuration.md
- k8s/secrets.md
- k8s/volume-claim-templates.md
- exercises/ingress-details.md
- exercises/appconfig-details.md
- # DAY 5
# - k8s/kubectlproxy.md
- k8s/consul.md
- k8s/statefulsets.md
- k8s/pv-pvc-sc.md
- k8s/authn-authz.md
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/setup-overview.md
- k8s/setup-devel.md
- k8s/cluster-sizing.md
- k8s/localkubeconfig.md
- k8s/k9s.md
- k8s/tilt.md
- exercises/localcluster-details.md
- shared/thankyou.md
- # DockerCoins
- |
# (Docker Coins Example)
- shared/sampleapp.md
- shared/composedown.md
- k8s/buildshiprun-dockerhub.md
- k8s/shippingimages.md
- exercises/k8sfundamentals-details.md
- k8s/ourapponkube.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- # Extra
- |
# (Extra content)
- k8s/horizontal-pod-autoscaler.md
- k8s/dashboard.md
- k8s/batch-jobs.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
- k8s/helm-create-better-chart.md

99
slides/kube.orig.yml Normal file
View File

@@ -0,0 +1,99 @@
title: |
Kubernetes
Intermediate
Training
chat: "`Zoom Chat`"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-08-nr.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics-gerry.md
- k8s/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- exercises/k8sfundamentals-brief.md
- exercises/localcluster-brief.md
- exercises/healthchecks-brief.md
- exercises/ingress-brief.md
- exercises/appconfig-brief.md
- # DAY 1
- shared/prereqs.md
- shared/webssh.md
- shared/connecting.md
#- k8s/versions-k8s.md
- shared/sampleapp.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
- k8s/kubectl-run.md
- k8s/kubenet.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- exercises/k8sfundamentals-details.md
- # DAY 2
- k8s/ourapponkube.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- k8s/namespaces.md
- k8s/yamldeploy.md
- k8s/setup-overview.md
- k8s/setup-devel.md
- k8s/localkubeconfig.md
- k8s/accessinternal.md
#- k8s/kubectlproxy.md
- k8s/k9s.md
- k8s/tilt.md
- exercises/localcluster-details.md
- # DAY 3
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/rollout.md
- k8s/healthchecks.md
- exercises/healthchecks-details.md
- k8s/ingress.md
#- k8s/ingress-tls.md
- exercises/ingress-details.md
- # DAY 4
- k8s/netpol.md
- k8s/authn-authz.md
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- # DAY 5
- k8s/volumes.md
- k8s/configuration.md
- k8s/secrets.md
- exercises/appconfig-details.md
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
#- k8s/portworx.md
#- k8s/openebs.md
#- k8s/stateful-failover.md
- # Extra
- |
# (Extra content)
- k8s/horizontal-pod-autoscaler.md
- k8s/dashboard.md
- k8s/batch-jobs.md

124
slides/kube.yml Normal file
View File

@@ -0,0 +1,124 @@
title: |
Kubernetes
Intermediate
Training
chat: "`Zoom Chat`"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-09-nr1.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics-gerry.md
- k8s/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
# - shared/chat-room-slack.md
- shared/chat-room-zoom-meeting.md
# - shared/chat-room-zoom-webinar.md
- shared/toc.md
- exercises/k8sfundamentals-brief.md
- exercises/localcluster-brief.md
- exercises/healthchecks-brief.md
- exercises/ingress-brief.md
- exercises/appconfig-brief.md
-
# DAY 1
- shared/prereqs.md
- shared/webssh.md
- shared/connecting.md
- containers/Macro_View.md
#- shared/webssh.md
#- k8s/versions-k8s.md
#- shared/composescale.md
- k8s/concepts-k8s-intro.md
- shared/declarative.md
- k8s/declarative.md
- k8s/yaml-in-5-min.md
- k8s/concepts-k8s-arch.md
- k8s/deploymentslideshow.md
#- shared/hastyconclusions.md
# - k8s/shippingimages.md
- k8s/kubectl-first.md
- k8s/kubectl-run-pod.md
# - k8s/authoring-yaml.md
#- k8s/buildshiprun-selfhosted.md
- k8s/alias-and-references.md
- containers/Cmd_And_Entrypoint.md
- # DAY 2
- k8s/labels-annotations.md
- k8s/kubectl-logs.md
- k8s/logs-cli.md
- k8s/kubectl-run-deployment.md
- k8s/deploymentslideshow.md
- k8s/kubectl-more.md
- k8s/kubectlexpose.md
- # DAY 3
- k8s/rollout.md
- k8s/yamldeploy.md
- k8s/daemonset.md
- k8s/namespaces.md
- k8s/healthchecks.md
- k8s/configuration.md
- k8s/secrets.md
- k8s/volumes.md
- # DAY 4
- k8s/volume-claim-templates.md
- k8s/statefulsets.md
- k8s/kubenet.md
- k8s/netpol.md
- k8s/accessinternal.md
- k8s/ingress.md
- exercises/healthchecks-details.md
- exercises/ingress-details.md
- exercises/appconfig-details.md
- # DAY 5
# - k8s/kubectlproxy.md
- k8s/pv-pvc-sc.md
- k8s/authn-authz.md
- k8s/consul.md
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/setup-overview.md
- k8s/setup-devel.md
- k8s/cluster-sizing.md
- k8s/localkubeconfig.md
- k8s/k9s.md
- k8s/tilt.md
- exercises/localcluster-details.md
- shared/thankyou.md
- # DockerCoins
- |
# (Docker Coins Example)
- shared/sampleapp.md
- shared/composedown.md
- k8s/buildshiprun-dockerhub.md
- k8s/shippingimages.md
- exercises/k8sfundamentals-details.md
- k8s/ourapponkube.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- # Extra
- |
# (Extra content)
- containers/software-deployment.md
- k8s/horizontal-pod-autoscaler.md
- k8s/dashboard.md
- k8s/batch-jobs.md
- k8s/helm-intro.md
- k8s/helm-chart-format.md
- k8s/helm-create-basic-chart.md
- k8s/helm-create-better-chart.md

62
slides/logistics-gerry.md Normal file
View File

@@ -0,0 +1,62 @@
## While We are Waiting To Get Started ...
This Side is at [https://2022-09-nr1.container.training/#2](https://2022-09-nr1.container.training/#2)
- If you have not already done so, please complete this survey: [https://docs.google.com/forms/d/1TEQylRtwZ7M_6fx0Zo9ErBYDeJlASAxSSgExHgafHKM
](https://docs.google.com/forms/d/1TEQylRtwZ7M_6fx0Zo9ErBYDeJlASAxSSgExHgafHKM)
- Your lab computers are assigned on this Google Sheet: [https://docs.google.com/spreadsheets/d/1s8CboVoTOg9mWPZaRkLXJwRwEqNhceoPuTIKq6K3r7Q](https://docs.google.com/spreadsheets/d/1s8CboVoTOg9mWPZaRkLXJwRwEqNhceoPuTIKq6K3r7Q)
- Enter your name in column 1 in one of the unclaimed rows
- Your Lab Computer IP address is found in the ** node1 ** column
- Log into your lab computer: ** ssh -l k8s node1 **
- Password is: ** relics **
- Verify all is good with the command: ** kubectl version --short **
- Class Starts at 8AM EST / 12PM EST each day
---
## Introductions
- Hello! I'm [Gerry Seidman](https://www.linkedin.com/in/gerryseidman), Ardan Labs (gerry.seidman@ardanlabs.com)
- The training will run Monday to Friday, from 8:00AM to 12:00PM PST (11AM-3PM EST)
- There will be breaks!
- Feel free to interrupt for questions at any time
- *Especially when you see full screen container pictures!*
- Live feedback, questions, help: @@CHAT@@
<!-- -->
[@alexbuisine]: https://twitter.com/alexbuisine
[EphemeraSearch]: https://ephemerasearch.com/
[@jpetazzo]: https://twitter.com/jpetazzo
[@s0ulshake]: https://twitter.com/s0ulshake
[Quantgene]: https://www.quantgene.com/
---
## Exercises
- At the end of each day, there is a series of exercises
- To make the most out of the training, please try the exercises!
(it will help to practice and memorize the content of the day)
- We'll try to finish a bit earlier each day so that you can have lab time
(and if you can put some extra time that's even better ♥)
- Each day will start with a quick review of the exercises of the previous day
- If you have any questions, blockers, curiosities ... Ping me in the zoom chat

View File

@@ -1,27 +1,18 @@
## Intros
## Introductions
- Hello! We are:
- Hello! I'm FIXME
- Jérôme Petazzoni ([@jpetazzo])
- The training will run Monday to Friday:
- Dana Engebretson ([@bigdana])
- 11am-3pm (New-York)
- The training will from 8am to noon, Monday to Friday
- 4pm-8pm (London)
- There will be breaks every hour
- 5pm-9pm (Barcelona, Paris, Berlin)
- Feel free to interrupt for questions at any time
- *Especially when you see full screen container pictures!*
(I will watch them in awkward silence while I wait for your questions)
- Live feedback, questions, help: @@CHAT@@
<!-- -->
- There will be a short break (\~10 min) every hour
[@alexbuisine]: https://twitter.com/alexbuisine
[@bigdana]: https://twitter.com/bigdana
[EphemeraSearch]: https://ephemerasearch.com/
[@jpetazzo]: https://twitter.com/jpetazzo
[@s0ulshake]: https://twitter.com/s0ulshake
@@ -29,12 +20,18 @@
---
## Dynamic content
## Exercises
- The content of this course will be adapted to suit your needs!
- At the end of each day, there is a series of exercises
- We'll share a link to a form so you can tell us what we should work on
- To make the most out of the training, please try the exercises!
- Expect the content of the deck to change between Monday and Tuesday
(it will help to practice and memorize the content of the day)
(we will reorder/reorganize the content accordingly)
- We'll try to finish a bit earlier each day so that you can have lab time
(and if you can put some extra time that's even better ♥)
- Each day will start with a quick review of the exercises of the previous day
- Ping us on @@CHAT@@!

20564
slides/out.html Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
# Pre-requirements
## Pre-requirements
- Be comfortable with the UNIX command line

71
slides/swarm-fullday.yml Normal file
View File

@@ -0,0 +1,71 @@
title: |
Container Orchestration
with Docker and Swarm
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: https://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
- snap
- btp-auto
- benchmarking
- elk-manual
- prom-manual
content:
- shared/title.md
- logistics.md
- swarm/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-slack.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- - shared/prereqs.md
- shared/connecting.md
- swarm/versions.md
- shared/sampleapp.md
- shared/composescale.md
- shared/hastyconclusions.md
- shared/composedown.md
- swarm/swarmkit.md
- shared/declarative.md
- swarm/swarmmode.md
- swarm/creatingswarm.md
#- swarm/machine.md
- swarm/morenodes.md
- - swarm/firstservice.md
- swarm/ourapponswarm.md
- swarm/hostingregistry.md
- swarm/testingregistry.md
- swarm/btp-manual.md
- swarm/swarmready.md
- swarm/stacks.md
- swarm/cicd.md
- swarm/updatingservices.md
- swarm/rollingupdates.md
- swarm/healthchecks.md
- - swarm/operatingswarm.md
- swarm/netshoot.md
- swarm/ipsec.md
- swarm/swarmtools.md
- swarm/security.md
- swarm/secrets.md
- swarm/encryptionatrest.md
- swarm/leastprivilege.md
- swarm/apiscope.md
- - swarm/logging.md
- swarm/metrics.md
- swarm/gui.md
- swarm/stateful.md
- swarm/extratips.md
- shared/thankyou.md
- swarm/links.md

Some files were not shown because too many files have changed in this diff Show More