mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-28 00:13:51 +00:00
Compare commits
1 Commits
2023-01-en
...
2022-08-th
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea3178327a |
@@ -17,8 +17,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -30,8 +30,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -43,8 +43,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -56,8 +56,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -71,8 +71,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -84,8 +84,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -106,8 +106,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -126,8 +126,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
@@ -182,8 +182,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
@@ -204,8 +204,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
@@ -229,8 +229,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
@@ -253,8 +253,8 @@ spec:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
@@ -262,7 +262,7 @@ spec:
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
- --enable-skip-login
|
||||
- --enable-insecure-login
|
||||
image: kubernetesui/dashboard:v2.7.0
|
||||
image: kubernetesui/dashboard:v2.5.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -293,7 +293,7 @@ spec:
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.8
|
||||
- image: kubernetesui/metrics-scraper:v1.0.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -17,8 +17,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -30,8 +30,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -43,8 +43,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -56,8 +56,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -71,8 +71,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -84,8 +84,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -106,8 +106,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -126,8 +126,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
@@ -182,8 +182,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
@@ -204,8 +204,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
@@ -229,8 +229,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
@@ -253,15 +253,15 @@ spec:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --auto-generate-certificates
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
image: kubernetesui/dashboard:v2.7.0
|
||||
image: kubernetesui/dashboard:v2.5.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -292,7 +292,7 @@ spec:
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.8
|
||||
- image: kubernetesui/metrics-scraper:v1.0.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -17,8 +17,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -30,8 +30,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -43,8 +43,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -56,8 +56,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -71,8 +71,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -84,8 +84,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -106,8 +106,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -126,8 +126,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
@@ -182,8 +182,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
@@ -204,8 +204,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
@@ -229,8 +229,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
@@ -253,15 +253,15 @@ spec:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --auto-generate-certificates
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
image: kubernetesui/dashboard:v2.7.0
|
||||
image: kubernetesui/dashboard:v2.5.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -292,7 +292,7 @@ spec:
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.8
|
||||
- image: kubernetesui/metrics-scraper:v1.0.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -344,12 +344,3 @@ metadata:
|
||||
creationTimestamp: null
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: kubernetes.io/service-account-token
|
||||
metadata:
|
||||
name: cluster-admin-token
|
||||
namespace: kubernetes-dashboard
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: cluster-admin
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
kind: HorizontalPodAutoscaler
|
||||
apiVersion: autoscaling/v2
|
||||
apiVersion: autoscaling/v2beta2
|
||||
metadata:
|
||||
name: rng
|
||||
spec:
|
||||
|
||||
@@ -15,10 +15,10 @@ spec:
|
||||
- key: "{{ request.operation }}"
|
||||
operator: Equals
|
||||
value: UPDATE
|
||||
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
- key: "{{ request.object.metadata.labels.color || '' }}"
|
||||
- key: "{{ request.object.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
|
||||
@@ -15,10 +15,10 @@ spec:
|
||||
- key: "{{ request.operation }}"
|
||||
operator: Equals
|
||||
value: UPDATE
|
||||
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
- key: "{{ request.object.metadata.labels.color || '' }}"
|
||||
- key: "{{ request.object.metadata.labels.color }}"
|
||||
operator: Equals
|
||||
value: ""
|
||||
validate:
|
||||
|
||||
@@ -70,15 +70,4 @@ add_namespace() {
|
||||
kubectl create serviceaccount -n kubernetes-dashboard cluster-admin \
|
||||
-o yaml --dry-run=client \
|
||||
#
|
||||
echo ---
|
||||
cat <<EOF
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: kubernetes.io/service-account-token
|
||||
metadata:
|
||||
name: cluster-admin-token
|
||||
namespace: kubernetes-dashboard
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: cluster-admin
|
||||
EOF
|
||||
) > dashboard-with-token.yaml
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
---
|
||||
- hosts: nodes
|
||||
become: yes
|
||||
sudo: true
|
||||
vars_files:
|
||||
- vagrant.yml
|
||||
|
||||
tasks:
|
||||
|
||||
- name: clean up the home folder
|
||||
file:
|
||||
path: /home/vagrant/{{ item }}
|
||||
@@ -23,23 +24,25 @@
|
||||
|
||||
- name: installing dependencies
|
||||
apt:
|
||||
name: apt-transport-https,ca-certificates,python3-pip,tmux
|
||||
name: apt-transport-https,ca-certificates,python-pip,tmux
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: fetching docker repo key
|
||||
apt_key:
|
||||
url: https://download.docker.com/linux/ubuntu/gpg
|
||||
state: present
|
||||
keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
id: 58118E89F3A912897C070ADBF76221572C52609D
|
||||
|
||||
- name: adding docker repo
|
||||
- name: adding package repos
|
||||
apt_repository:
|
||||
repo: deb https://download.docker.com/linux/ubuntu focal stable
|
||||
repo: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- deb https://apt.dockerproject.org/repo ubuntu-trusty main
|
||||
|
||||
- name: installing docker
|
||||
apt:
|
||||
name: docker-ce,docker-ce-cli,containerd.io,docker-compose-plugin
|
||||
name: docker-engine
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
@@ -53,7 +56,7 @@
|
||||
lineinfile:
|
||||
dest: /etc/default/docker
|
||||
line: DOCKER_OPTS="--host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:55555"
|
||||
regexp: "^#?DOCKER_OPTS=.*$"
|
||||
regexp: '^#?DOCKER_OPTS=.*$'
|
||||
state: present
|
||||
register: docker_opts
|
||||
|
||||
@@ -63,14 +66,22 @@
|
||||
state: restarted
|
||||
when: docker_opts is defined and docker_opts.changed
|
||||
|
||||
- name: install docker-compose from official github repo
|
||||
get_url:
|
||||
url: https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64
|
||||
dest: /usr/local/bin/docker-compose
|
||||
mode: "u+x,g+x"
|
||||
- name: performing pip autoupgrade
|
||||
pip:
|
||||
name: pip
|
||||
state: latest
|
||||
|
||||
- name: installing virtualenv
|
||||
pip:
|
||||
name: virtualenv
|
||||
state: latest
|
||||
|
||||
- name: Install Docker Compose via PIP
|
||||
pip: name=docker-compose
|
||||
|
||||
- name:
|
||||
file: path="/usr/local/bin/docker-compose"
|
||||
file:
|
||||
path="/usr/local/bin/docker-compose"
|
||||
state=file
|
||||
mode=0755
|
||||
owner=vagrant
|
||||
@@ -117,3 +128,5 @@
|
||||
line: "127.0.0.1 localhost {{ inventory_hostname }}"
|
||||
- regexp: '^127\.0\.1\.1'
|
||||
line: "127.0.1.1 {{ inventory_hostname }}"
|
||||
|
||||
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
---
|
||||
vagrant:
|
||||
default_box: ubuntu/focal64
|
||||
default_box: ubuntu/trusty64
|
||||
default_box_check_update: true
|
||||
ssh_insert_key: false
|
||||
min_memory: 256
|
||||
min_cores: 1
|
||||
|
||||
instances:
|
||||
|
||||
- hostname: node1
|
||||
private_ip: 10.10.10.10
|
||||
memory: 1512
|
||||
@@ -36,3 +37,6 @@ instances:
|
||||
private_ip: 10.10.10.50
|
||||
memory: 512
|
||||
cores: 1
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -34,15 +34,28 @@ to that directory, then create the clusters using that configuration.
|
||||
|
||||
- Scaleway: run `scw init`
|
||||
|
||||
2. Run!
|
||||
2. Optional: set number of clusters, cluster size, and region.
|
||||
|
||||
By default, 1 cluster will be configured, with 2 nodes, and auto-scaling up to 5 nodes.
|
||||
|
||||
If you want, you can override these parameters, with the following variables.
|
||||
|
||||
```bash
|
||||
./run.sh <providername> <location> [number of clusters] [min nodes] [max nodes]
|
||||
export TF_VAR_how_many_clusters=5
|
||||
export TF_VAR_min_nodes_per_pool=2
|
||||
export TF_VAR_max_nodes_per_pool=4
|
||||
export TF_VAR_location=xxx
|
||||
```
|
||||
|
||||
If you don't specify a provider name, it will list available providers.
|
||||
The `location` variable is optional. Each provider should have a default value.
|
||||
The value of the `location` variable is provider-specific. Examples:
|
||||
|
||||
If you don't specify a location, it will list locations available for this provider.
|
||||
| Provider | Example value | How to see possible values
|
||||
|---------------|-------------------|---------------------------
|
||||
| Digital Ocean | `ams3` | `doctl compute region list`
|
||||
| Google Cloud | `europe-north1-a` | `gcloud compute zones list`
|
||||
| Linode | `eu-central` | `linode-cli regions list`
|
||||
| Oracle Cloud | `eu-stockholm-1` | `oci iam region list`
|
||||
|
||||
You can also specify multiple locations, and then they will be
|
||||
used in round-robin fashion.
|
||||
@@ -53,15 +66,22 @@ my requests to increase that quota were denied) you can do the
|
||||
following:
|
||||
|
||||
```bash
|
||||
LOCATIONS=$(gcloud compute zones list --format=json | jq -r .[].name | grep ^europe)
|
||||
./run.sh googlecloud "$LOCATIONS"
|
||||
export TF_VAR_location=$(gcloud compute zones list --format=json | jq -r .[].name | grep ^europe)
|
||||
```
|
||||
|
||||
Then when you apply, clusters will be created across all available
|
||||
zones in Europe. (When I write this, there are 20+ zones in Europe,
|
||||
so even with my quota, I can create 40 clusters.)
|
||||
|
||||
3. Shutting down
|
||||
3. Run!
|
||||
|
||||
```bash
|
||||
./run.sh <providername>
|
||||
```
|
||||
|
||||
(If you don't specify a provider name, it will list available providers.)
|
||||
|
||||
4. Shutting down
|
||||
|
||||
Go to the directory that was created by the previous step (`tag-YYYY-MM...`)
|
||||
and run `terraform destroy`.
|
||||
@@ -92,7 +112,7 @@ terraform init
|
||||
|
||||
See steps above, and add the following extra steps:
|
||||
|
||||
- Digital Ocean:
|
||||
- Digital Coean:
|
||||
```bash
|
||||
export DIGITALOCEAN_ACCESS_TOKEN=$(grep ^access-token ~/.config/doctl/config.yaml | cut -d: -f2 | tr -d " ")
|
||||
```
|
||||
@@ -140,30 +160,3 @@ terraform destroy
|
||||
```bash
|
||||
rm stage2/terraform.tfstate*
|
||||
```
|
||||
|
||||
10. Clean up leftovers.
|
||||
|
||||
Some providers don't clean up properly the resources created by the CCM.
|
||||
For instance, when you create a Kubernetes `Service` of type
|
||||
`LoadBalancer`, it generally provisions a cloud load balancer.
|
||||
On Linode (and possibly other providers, too!) these cloud load balancers
|
||||
aren't deleted when the cluster gets deleted, and they keep incurring
|
||||
charges. You should check for those, to make sure that you don't
|
||||
get charged for resources that you don't use anymore. As I write this
|
||||
paragraph, there is:
|
||||
|
||||
- `linode-delete-ccm-loadbalancers.sh` to delete the Linode
|
||||
nodebalancers; but be careful: it deletes **all** the nodebalancers
|
||||
whose name starts with `ccm-`, which means that if you still have
|
||||
Kubernetes clusters, their load balancers will be deleted as well!
|
||||
|
||||
- `linode-delete-pvc-volumes.sh` to delete Linode persistent disks
|
||||
that have been created to satisfy Persistent Volume Claims
|
||||
(these need to be removed manually because the default Storage Class
|
||||
on Linode has a RETAIN policy). Again, be careful, this will wipe
|
||||
out any volume whose label starts with `pvc`. (I don't know if it
|
||||
will remove volumes that are still attached.)
|
||||
|
||||
Eventually, I hope to add more scripts for other providers, and make
|
||||
them more selective and more robust, but for now, that's better than
|
||||
nothing.
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
linode-cli nodebalancers list --json |
|
||||
jq '.[] | select(.label | startswith("ccm-")) | .id' |
|
||||
xargs -n1 -P10 linode-cli nodebalancers delete
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
linode-cli volumes list --json |
|
||||
jq '.[] | select(.label | startswith("pvc")) | .id' |
|
||||
xargs -n1 -P10 linode-cli volumes delete
|
||||
@@ -3,37 +3,11 @@ set -e
|
||||
|
||||
TIME=$(which time)
|
||||
|
||||
if [ -f ~/.config/doctl/config.yaml ]; then
|
||||
export DIGITALOCEAN_ACCESS_TOKEN=$(grep ^access-token ~/.config/doctl/config.yaml | cut -d: -f2 | tr -d " ")
|
||||
fi
|
||||
|
||||
if [ -f ~/.config/linode-cli ]; then
|
||||
export LINODE_TOKEN=$(grep ^token ~/.config/linode-cli | cut -d= -f2 | tr -d " ")
|
||||
fi
|
||||
|
||||
[ "$1" ] || {
|
||||
echo "Syntax:"
|
||||
echo ""
|
||||
echo "$0 <provider> <region> [how-many-clusters] [min-nodes] [max-nodes]"
|
||||
echo ""
|
||||
PROVIDER=$1
|
||||
[ "$PROVIDER" ] || {
|
||||
echo "Please specify a provider as first argument, or 'ALL' for parallel mode."
|
||||
echo "Available providers:"
|
||||
ls -1 source/modules
|
||||
echo ""
|
||||
echo "Leave the region empty to show available regions for this provider."
|
||||
echo "You can also specify ALL as a provider to simultaneously provision"
|
||||
echo "many clusters on *each* provider for benchmarking purposes."
|
||||
echo ""
|
||||
exit 1
|
||||
}
|
||||
|
||||
PROVIDER="$1"
|
||||
export TF_VAR_location="$2"
|
||||
export TF_VAR_how_many_clusters="${3-1}"
|
||||
export TF_VAR_min_nodes_per_pool="${4-2}"
|
||||
export TF_VAR_max_nodes_per_pool="${5-4}"
|
||||
|
||||
[ "$TF_VAR_location" ] || {
|
||||
"./source/modules/$PROVIDER/list_locations.sh"
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
||||
@@ -62,11 +62,9 @@ resource "null_resource" "wait_for_nodes" {
|
||||
KUBECONFIG = local_file.kubeconfig[each.key].filename
|
||||
}
|
||||
command = <<-EOT
|
||||
while sleep 1; do
|
||||
kubectl get nodes --watch | grep --silent --line-buffered . &&
|
||||
kubectl wait node --for=condition=Ready --all --timeout=10m &&
|
||||
break
|
||||
done
|
||||
set -e
|
||||
kubectl get nodes --watch | grep --silent --line-buffered .
|
||||
kubectl wait node --for=condition=Ready --all --timeout=10m
|
||||
EOT
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
doctl compute region list
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
gcloud compute zones list
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
linode-cli regions list
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
oci iam region list
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/bin/sh
|
||||
echo "# Note that this is hard-coded in $0.
|
||||
# I don't know if there is a way to list regions through the Scaleway API.
|
||||
fr-par
|
||||
nl-ams
|
||||
pl-waw"
|
||||
@@ -56,5 +56,5 @@ variable "location" {
|
||||
# scw k8s version list -o json | jq -r .[].name
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "1.24.7"
|
||||
default = "1.23.6"
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "2.16.1"
|
||||
version = "2.7.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
INFRACLASS=scaleway
|
||||
#SCW_INSTANCE_TYPE=DEV1-L
|
||||
SCW_ZONE=fr-par-2
|
||||
#SCW_ZONE=fr-par-2
|
||||
|
||||
@@ -131,8 +131,6 @@ set nowrap
|
||||
SQRL
|
||||
|
||||
pssh -I "sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.tmux.conf" <<SQRL
|
||||
set -g status-style bg=yellow,bold
|
||||
|
||||
bind h select-pane -L
|
||||
bind j select-pane -D
|
||||
bind k select-pane -U
|
||||
@@ -159,9 +157,6 @@ _cmd_clusterize() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# Disable unattended upgrades so that they don't mess up with the subsequent steps
|
||||
pssh sudo rm -f /etc/apt/apt.conf.d/50unattended-upgrades
|
||||
|
||||
# Special case for scaleway since it doesn't come with sudo
|
||||
if [ "$INFRACLASS" = "scaleway" ]; then
|
||||
pssh -l root "
|
||||
@@ -187,23 +182,9 @@ _cmd_clusterize() {
|
||||
pssh "
|
||||
if [ -f /etc/iptables/rules.v4 ]; then
|
||||
sudo sed -i 's/-A INPUT -j REJECT --reject-with icmp-host-prohibited//' /etc/iptables/rules.v4
|
||||
sudo netfilter-persistent flush
|
||||
sudo netfilter-persistent start
|
||||
fi"
|
||||
|
||||
# oracle-cloud-agent upgrades pacakges in the background.
|
||||
# This breaks our deployment scripts, because when we invoke apt-get, it complains
|
||||
# that the lock already exists (symptom: random "Exited with error code 100").
|
||||
# Workaround: if we detect oracle-cloud-agent, remove it.
|
||||
# But this agent seems to also take care of installing/upgrading
|
||||
# the unified-monitoring-agent package, so when we stop the snap,
|
||||
# it can leave dpkg in a broken state. We "fix" it with the 2nd command.
|
||||
pssh "
|
||||
if [ -d /snap/oracle-cloud-agent ]; then
|
||||
sudo snap remove oracle-cloud-agent
|
||||
sudo dpkg --remove --force-remove-reinstreq unified-monitoring-agent
|
||||
fi"
|
||||
|
||||
# Copy settings and install Python YAML parser
|
||||
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
|
||||
pssh "
|
||||
@@ -258,6 +239,14 @@ _cmd_docker() {
|
||||
sudo ln -sfn /mnt/docker /var/lib/docker
|
||||
fi
|
||||
|
||||
# containerd 1.6 breaks Weave.
|
||||
# See https://github.com/containerd/containerd/issues/6921
|
||||
sudo tee /etc/apt/preferences.d/containerd <<EOF
|
||||
Package: containerd.io
|
||||
Pin: version 1.5.*
|
||||
Pin-Priority: 1000
|
||||
EOF
|
||||
|
||||
# This will install the latest Docker.
|
||||
sudo apt-get -qy install apt-transport-https ca-certificates curl software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
@@ -267,21 +256,19 @@ _cmd_docker() {
|
||||
|
||||
# Add registry mirror configuration.
|
||||
if ! [ -f /etc/docker/daemon.json ]; then
|
||||
sudo mkdir -p /etc/docker
|
||||
echo '{\"registry-mirrors\": [\"https://mirror.gcr.io\"]}' | sudo tee /etc/docker/daemon.json
|
||||
sudo systemctl restart docker
|
||||
fi
|
||||
"
|
||||
|
||||
##VERSION## https://github.com/docker/compose/releases
|
||||
COMPOSE_VERSION=v2.11.1
|
||||
COMPOSE_PLATFORM='linux-$(uname -m)'
|
||||
|
||||
# Just in case you need Compose 1.X, you can use the following lines.
|
||||
# (But it will probably only work for x86_64 machines.)
|
||||
#COMPOSE_VERSION=1.29.2
|
||||
#COMPOSE_PLATFORM='Linux-$(uname -m)'
|
||||
|
||||
if [ "$ARCHITECTURE" ]; then
|
||||
COMPOSE_VERSION=v2.2.3
|
||||
COMPOSE_PLATFORM='linux-$(uname -m)'
|
||||
else
|
||||
COMPOSE_VERSION=1.29.2
|
||||
COMPOSE_PLATFORM='Linux-$(uname -m)'
|
||||
fi
|
||||
pssh "
|
||||
set -e
|
||||
### Install docker-compose.
|
||||
@@ -359,8 +346,7 @@ EOF"
|
||||
pssh --timeout 200 "
|
||||
sudo apt-get update -q &&
|
||||
sudo apt-get install -qy kubelet kubeadm kubectl &&
|
||||
sudo apt-mark hold kubelet kubeadm kubectl &&
|
||||
kubeadm completion bash | sudo tee /etc/bash_completion.d/kubeadm &&
|
||||
sudo apt-mark hold kubelet kubeadm kubectl
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
|
||||
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
|
||||
@@ -433,9 +419,8 @@ EOF
|
||||
# Install weave as the pod network
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
#kubever=\$(kubectl version | base64 | tr -d '\n') &&
|
||||
#kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=\$kubever
|
||||
kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml
|
||||
kubever=\$(kubectl version | base64 | tr -d '\n') &&
|
||||
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=\$kubever
|
||||
fi"
|
||||
|
||||
# Join the other nodes to the cluster
|
||||
@@ -493,13 +478,12 @@ _cmd_kubetools() {
|
||||
# Install kube-ps1
|
||||
pssh "
|
||||
set -e
|
||||
if ! [ -d /opt/kube-ps1 ]; then
|
||||
if ! [ -f /etc/profile.d/kube-ps1.sh ]; then
|
||||
cd /tmp
|
||||
git clone https://github.com/jonmosco/kube-ps1
|
||||
sudo mv kube-ps1 /opt/kube-ps1
|
||||
sudo cp kube-ps1/kube-ps1.sh /etc/profile.d/kube-ps1.sh
|
||||
sudo -u $USER_LOGIN sed -i s/docker-prompt/kube_ps1/ /home/$USER_LOGIN/.bashrc &&
|
||||
sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc <<EOF
|
||||
. /opt/kube-ps1/kube-ps1.sh
|
||||
KUBE_PS1_PREFIX=""
|
||||
KUBE_PS1_SUFFIX=""
|
||||
KUBE_PS1_SYMBOL_ENABLE="false"
|
||||
@@ -510,13 +494,13 @@ EOF
|
||||
|
||||
# Install stern
|
||||
##VERSION## https://github.com/stern/stern/releases
|
||||
STERN_VERSION=1.22.0
|
||||
STERN_VERSION=1.20.1
|
||||
FILENAME=stern_${STERN_VERSION}_linux_${ARCH}
|
||||
URL=https://github.com/stern/stern/releases/download/v$STERN_VERSION/$FILENAME.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
curl -fsSL $URL |
|
||||
sudo tar -C /usr/local/bin -zx stern
|
||||
sudo tar -C /usr/local/bin -zx --strip-components=1 $FILENAME/stern
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
stern --version
|
||||
@@ -532,7 +516,7 @@ EOF
|
||||
|
||||
# Install kustomize
|
||||
##VERSION## https://github.com/kubernetes-sigs/kustomize/releases
|
||||
KUSTOMIZE_VERSION=v4.5.7
|
||||
KUSTOMIZE_VERSION=v4.4.0
|
||||
URL=https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${ARCH}.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kustomize ]; then
|
||||
@@ -551,7 +535,7 @@ EOF
|
||||
if [ ! -x /usr/local/bin/ship ]; then
|
||||
##VERSION##
|
||||
curl -fsSL https://github.com/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_$ARCH.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
# Install the AWS IAM authenticator
|
||||
@@ -559,8 +543,8 @@ EOF
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -fsSLo /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/$ARCH/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
aws-iam-authenticator version
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
aws-iam-authenticator version
|
||||
fi"
|
||||
|
||||
# Install the krew package manager
|
||||
@@ -577,7 +561,7 @@ EOF
|
||||
# Install k9s
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/k9s ]; then
|
||||
FILENAME=k9s_Linux_$ARCH.tar.gz &&
|
||||
FILENAME=k9s_Linux_$HERP_DERP_ARCH.tar.gz &&
|
||||
curl -fsSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin k9s
|
||||
k9s version
|
||||
@@ -602,7 +586,6 @@ EOF
|
||||
FILENAME=tilt.\$TILT_VERSION.linux.$TILT_ARCH.tar.gz
|
||||
curl -fsSL https://github.com/tilt-dev/tilt/releases/download/v\$TILT_VERSION/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin tilt
|
||||
tilt completion bash | sudo tee /etc/bash_completion.d/tilt
|
||||
tilt version
|
||||
fi"
|
||||
|
||||
@@ -611,7 +594,6 @@ EOF
|
||||
if [ ! -x /usr/local/bin/skaffold ]; then
|
||||
curl -fsSLo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-$ARCH &&
|
||||
sudo install skaffold /usr/local/bin/
|
||||
skaffold completion bash | sudo tee /etc/bash_completion.d/skaffold
|
||||
skaffold version
|
||||
fi"
|
||||
|
||||
@@ -620,28 +602,9 @@ EOF
|
||||
if [ ! -x /usr/local/bin/kompose ]; then
|
||||
curl -fsSLo kompose https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-$ARCH &&
|
||||
sudo install kompose /usr/local/bin
|
||||
kompose completion bash | sudo tee /etc/bash_completion.d/kompose
|
||||
kompose version
|
||||
fi"
|
||||
|
||||
# Install KinD
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kind ]; then
|
||||
curl -fsSLo kind https://github.com/kubernetes-sigs/kind/releases/latest/download/kind-linux-$ARCH &&
|
||||
sudo install kind /usr/local/bin
|
||||
kind completion bash | sudo tee /etc/bash_completion.d/kind
|
||||
kind version
|
||||
fi"
|
||||
|
||||
# Install YTT
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ytt ]; then
|
||||
curl -fsSLo ytt https://github.com/vmware-tanzu/carvel-ytt/releases/latest/download/ytt-linux-$ARCH &&
|
||||
sudo install ytt /usr/local/bin
|
||||
ytt completion bash | sudo tee /etc/bash_completion.d/ytt
|
||||
ytt version
|
||||
fi"
|
||||
|
||||
##VERSION## https://github.com/bitnami-labs/sealed-secrets/releases
|
||||
KUBESEAL_VERSION=0.17.4
|
||||
#case $ARCH in
|
||||
|
||||
@@ -36,7 +36,7 @@ if os.path.isfile(domain_or_domain_file):
|
||||
clusters = [line.split() for line in lines]
|
||||
else:
|
||||
ips = open(f"tags/{ips_file_or_tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{ips_file_or_tag}/settings.yaml"
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
clusters = []
|
||||
while ips:
|
||||
|
||||
@@ -17,17 +17,8 @@
|
||||
exit 1
|
||||
}
|
||||
|
||||
NETLIFY_CONFIG_FILE=~/.config/netlify/config.json
|
||||
|
||||
if ! [ -f "$NETLIFY_CONFIG_FILE" ]; then
|
||||
echo "Could not find Netlify configuration file ($NETLIFY_CONFIG_FILE)."
|
||||
echo "Try to run the following command, and try again:"
|
||||
echo "npx netlify-cli login"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NETLIFY_USERID=$(jq .userId < "$NETLIFY_CONFIG_FILE")
|
||||
NETLIFY_TOKEN=$(jq -r .users[$NETLIFY_USERID].auth.token < "$NETLIFY_CONFIG_FILE")
|
||||
NETLIFY_USERID=$(jq .userId < ~/.config/netlify/config.json)
|
||||
NETLIFY_TOKEN=$(jq -r .users[$NETLIFY_USERID].auth.token < ~/.config/netlify/config.json)
|
||||
|
||||
netlify() {
|
||||
URI=$1
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
resource "azurerm_resource_group" "_" {
|
||||
name = var.prefix
|
||||
location = var.location
|
||||
}
|
||||
|
||||
resource "azurerm_public_ip" "_" {
|
||||
count = var.how_many_nodes
|
||||
name = format("%s-%04d", var.prefix, count.index + 1)
|
||||
location = azurerm_resource_group._.location
|
||||
resource_group_name = azurerm_resource_group._.name
|
||||
allocation_method = "Dynamic"
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface" "_" {
|
||||
count = var.how_many_nodes
|
||||
name = format("%s-%04d", var.prefix, count.index + 1)
|
||||
location = azurerm_resource_group._.location
|
||||
resource_group_name = azurerm_resource_group._.name
|
||||
|
||||
ip_configuration {
|
||||
name = "internal"
|
||||
subnet_id = azurerm_subnet._.id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
public_ip_address_id = azurerm_public_ip._[count.index].id
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_linux_virtual_machine" "_" {
|
||||
count = var.how_many_nodes
|
||||
name = format("%s-%04d", var.prefix, count.index + 1)
|
||||
resource_group_name = azurerm_resource_group._.name
|
||||
location = azurerm_resource_group._.location
|
||||
size = var.size
|
||||
admin_username = "ubuntu"
|
||||
network_interface_ids = [
|
||||
azurerm_network_interface._[count.index].id,
|
||||
]
|
||||
|
||||
admin_ssh_key {
|
||||
username = "ubuntu"
|
||||
public_key = local.authorized_keys
|
||||
}
|
||||
|
||||
os_disk {
|
||||
caching = "ReadWrite"
|
||||
storage_account_type = "Standard_LRS"
|
||||
}
|
||||
|
||||
source_image_reference {
|
||||
publisher = "Canonical"
|
||||
offer = "UbuntuServer"
|
||||
sku = "18.04-LTS" # FIXME
|
||||
version = "latest"
|
||||
}
|
||||
}
|
||||
|
||||
# The public IP address only gets allocated when the address actually gets
|
||||
# attached to the virtual machine. So we need to do this extra indrection
|
||||
# to retrieve the IP addresses. Otherwise the IP addresses show up as blank.
|
||||
# See: https://github.com/hashicorp/terraform-provider-azurerm/issues/310#issuecomment-335479735
|
||||
|
||||
data "azurerm_public_ip" "_" {
|
||||
count = var.how_many_nodes
|
||||
name = format("%s-%04d", var.prefix, count.index + 1)
|
||||
resource_group_name = azurerm_resource_group._.name
|
||||
depends_on = [azurerm_linux_virtual_machine._]
|
||||
}
|
||||
|
||||
output "ip_addresses" {
|
||||
value = join("", formatlist("%s\n", data.azurerm_public_ip._.*.ip_address))
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
resource "azurerm_virtual_network" "_" {
|
||||
name = "tf-vnet"
|
||||
address_space = ["10.10.0.0/16"]
|
||||
location = azurerm_resource_group._.location
|
||||
resource_group_name = azurerm_resource_group._.name
|
||||
}
|
||||
|
||||
resource "azurerm_subnet" "_" {
|
||||
name = "tf-subnet"
|
||||
resource_group_name = azurerm_resource_group._.name
|
||||
virtual_network_name = azurerm_virtual_network._.name
|
||||
address_prefixes = ["10.10.0.0/20"]
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 1"
|
||||
required_providers {
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "=3.33.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "azurerm" {
|
||||
features {}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
variable "prefix" {
|
||||
type = string
|
||||
default = "provisioned-with-terraform"
|
||||
}
|
||||
|
||||
variable "how_many_nodes" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
locals {
|
||||
authorized_keys = file("~/.ssh/id_rsa.pub")
|
||||
}
|
||||
|
||||
/*
|
||||
Available sizes:
|
||||
"Standard_D11_v2" # CPU=2 RAM=14
|
||||
"Standard_F4s_v2" # CPU=4 RAM=8
|
||||
"Standard_D1_v2" # CPU=1 RAM=3.5
|
||||
"Standard_B1ms" # CPU=1 RAM=2
|
||||
"Standard_B2s" # CPU=2 RAM=4
|
||||
*/
|
||||
|
||||
variable "size" {
|
||||
type = string
|
||||
default = "Standard_F4s_v2"
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
type = string
|
||||
default = "South Africa North"
|
||||
}
|
||||
68
slides/1.yml
68
slides/1.yml
@@ -1,68 +0,0 @@
|
||||
title: |
|
||||
Docker Intensif
|
||||
|
||||
chat: "[Mattermost](https://highfive.container.training/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2023-01-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- # DAY 2
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- # DAY 3
|
||||
- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- # DAY 4
|
||||
- containers/Buildkit.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
- containers/Orchestration_Overview.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
- shared/thankyou.md
|
||||
#- containers/links.md
|
||||
89
slides/2.yml
89
slides/2.yml
@@ -1,89 +0,0 @@
|
||||
title: |
|
||||
Fondamentaux Kubernetes
|
||||
|
||||
chat: "[Mattermost](https://highfive.container.training/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2023-01-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
- exercises/healthchecks-brief.md
|
||||
- shared/toc.md
|
||||
- # 1
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/service-types.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- exercises/k8sfundamentals-details.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
- # 2
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/yamldeploy.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/authoring-yaml.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
- exercises/localcluster-details.md
|
||||
- # 3
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- exercises/healthchecks-details.md
|
||||
- # 4
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-tls.md
|
||||
#- k8s/ingress-advanced.md
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/batch-jobs.md
|
||||
- shared/thankyou.md
|
||||
42
slides/3.yml
42
slides/3.yml
@@ -1,42 +0,0 @@
|
||||
title: |
|
||||
Packaging d'applications
|
||||
pour Kubernetes
|
||||
|
||||
chat: "[Mattermost](https://highfive.container.training/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2023-01-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- k8s/demo-apps.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- exercises/helm-generic-chart-details.md
|
||||
-
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
- exercises/helm-umbrella-chart-details.md
|
||||
-
|
||||
- k8s/ytt.md
|
||||
- shared/thankyou.md
|
||||
69
slides/4.yml
69
slides/4.yml
@@ -1,69 +0,0 @@
|
||||
title: |
|
||||
Kubernetes Avancé
|
||||
|
||||
chat: "[Mattermost](https://highfive.container.training/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2023-01-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- shared/toc.md
|
||||
- exercises/netpol-brief.md
|
||||
- exercises/sealed-secrets-brief.md
|
||||
- exercises/kyverno-ingress-domain-name-brief.md
|
||||
- #1
|
||||
- k8s/demo-apps.md
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/cainjector.md
|
||||
- k8s/ingress-tls.md
|
||||
- exercises/netpol-details.md
|
||||
- exercises/sealed-secrets-details.md
|
||||
- #2
|
||||
- k8s/extending-api.md
|
||||
- k8s/crd.md
|
||||
- k8s/operators.md
|
||||
- k8s/admission.md
|
||||
- k8s/cainjector.md
|
||||
- k8s/kyverno.md
|
||||
- exercises/kyverno-ingress-domain-name-details.md
|
||||
- #3
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/apiserver-deepdive.md
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/hpa-v2.md
|
||||
- #4
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
#- k8s/eck.md
|
||||
#- k8s/portworx.md
|
||||
- k8s/openebs.md
|
||||
- k8s/stateful-failover.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/operators-example.md
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/events.md
|
||||
- k8s/finalizers.md
|
||||
- shared/thankyou.md
|
||||
58
slides/5.yml
58
slides/5.yml
@@ -1,58 +0,0 @@
|
||||
title: |
|
||||
Opérer Kubernetes
|
||||
|
||||
chat: "[Mattermost](https://highfive.container.training/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2023-01-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
-
|
||||
- k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
-
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/interco.md
|
||||
-
|
||||
- k8s/cni-internals.md
|
||||
- k8s/apilb.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/staticpods.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
#- k8s/cloud-controller-manager.md
|
||||
-
|
||||
- k8s/control-plane-auth.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- k8s/pod-security-intro.md
|
||||
- k8s/pod-security-policies.md
|
||||
- k8s/pod-security-admission.md
|
||||
- shared/thankyou.md
|
||||
#-
|
||||
# |
|
||||
# # (Extra content)
|
||||
# - k8s/apiserver-deepdive.md
|
||||
# - k8s/setup-overview.md
|
||||
# - k8s/setup-devel.md
|
||||
# - k8s/setup-managed.md
|
||||
# - k8s/setup-selfhosted.md
|
||||
@@ -1,7 +1,7 @@
|
||||
# Uncomment and/or edit one of the the following lines if necessary.
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /kube.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
@@ -23,5 +23,3 @@
|
||||
|
||||
# Survey form
|
||||
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform
|
||||
|
||||
/ /highfive.html 200!
|
||||
|
||||
36
slides/autopilot/package-lock.json
generated
36
slides/autopilot/package-lock.json
generated
@@ -194,9 +194,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/engine.io": {
|
||||
"version": "6.2.1",
|
||||
"resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.2.1.tgz",
|
||||
"integrity": "sha512-ECceEFcAaNRybd3lsGQKas3ZlMVjN3cyWwMP25D2i0zWfyiytVbTpRPa34qrr+FHddtpBVOmq4H/DCv1O0lZRA==",
|
||||
"version": "6.2.0",
|
||||
"resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.2.0.tgz",
|
||||
"integrity": "sha512-4KzwW3F3bk+KlzSOY57fj/Jx6LyRQ1nbcyIadehl+AnXjKT7gDO0ORdRi/84ixvMKTym6ZKuxvbzN62HDDU1Lg==",
|
||||
"dependencies": {
|
||||
"@types/cookie": "^0.4.1",
|
||||
"@types/cors": "^2.8.12",
|
||||
@@ -742,9 +742,9 @@
|
||||
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
|
||||
},
|
||||
"node_modules/socket.io-client/node_modules/socket.io-parser": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.1.tgz",
|
||||
"integrity": "sha512-V4GrkLy+HeF1F/en3SpUaM+7XxYXpuMUWLGde1kSSh5nQMN4hLrbPIkD+otwh6q9R6NOQBN4AMaOZ2zVjui82g==",
|
||||
"version": "4.2.0",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.0.tgz",
|
||||
"integrity": "sha512-tLfmEwcEwnlQTxFB7jibL/q2+q8dlVQzj4JdRLJ/W/G1+Fu9VSxCx1Lo+n1HvXxKnM//dUuD0xgiA7tQf57Vng==",
|
||||
"dependencies": {
|
||||
"@socket.io/component-emitter": "~3.1.0",
|
||||
"debug": "~4.3.1"
|
||||
@@ -754,9 +754,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/socket.io-parser": {
|
||||
"version": "4.0.5",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.0.5.tgz",
|
||||
"integrity": "sha512-sNjbT9dX63nqUFIOv95tTVm6elyIU4RvB1m8dOeZt+IgWwcWklFDOdmGcfo3zSiRsnR/3pJkjY5lfoGqEe4Eig==",
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.0.4.tgz",
|
||||
"integrity": "sha512-t+b0SS+IxG7Rxzda2EVvyBZbvFPBCjJoyHuE0P//7OAsN23GItzDRdWa6ALxZI/8R5ygK7jAR6t028/z+7295g==",
|
||||
"dependencies": {
|
||||
"@types/component-emitter": "^1.2.10",
|
||||
"component-emitter": "~1.3.0",
|
||||
@@ -1033,9 +1033,9 @@
|
||||
"integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w=="
|
||||
},
|
||||
"engine.io": {
|
||||
"version": "6.2.1",
|
||||
"resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.2.1.tgz",
|
||||
"integrity": "sha512-ECceEFcAaNRybd3lsGQKas3ZlMVjN3cyWwMP25D2i0zWfyiytVbTpRPa34qrr+FHddtpBVOmq4H/DCv1O0lZRA==",
|
||||
"version": "6.2.0",
|
||||
"resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.2.0.tgz",
|
||||
"integrity": "sha512-4KzwW3F3bk+KlzSOY57fj/Jx6LyRQ1nbcyIadehl+AnXjKT7gDO0ORdRi/84ixvMKTym6ZKuxvbzN62HDDU1Lg==",
|
||||
"requires": {
|
||||
"@types/cookie": "^0.4.1",
|
||||
"@types/cors": "^2.8.12",
|
||||
@@ -1456,9 +1456,9 @@
|
||||
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
|
||||
},
|
||||
"socket.io-parser": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.1.tgz",
|
||||
"integrity": "sha512-V4GrkLy+HeF1F/en3SpUaM+7XxYXpuMUWLGde1kSSh5nQMN4hLrbPIkD+otwh6q9R6NOQBN4AMaOZ2zVjui82g==",
|
||||
"version": "4.2.0",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.0.tgz",
|
||||
"integrity": "sha512-tLfmEwcEwnlQTxFB7jibL/q2+q8dlVQzj4JdRLJ/W/G1+Fu9VSxCx1Lo+n1HvXxKnM//dUuD0xgiA7tQf57Vng==",
|
||||
"requires": {
|
||||
"@socket.io/component-emitter": "~3.1.0",
|
||||
"debug": "~4.3.1"
|
||||
@@ -1467,9 +1467,9 @@
|
||||
}
|
||||
},
|
||||
"socket.io-parser": {
|
||||
"version": "4.0.5",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.0.5.tgz",
|
||||
"integrity": "sha512-sNjbT9dX63nqUFIOv95tTVm6elyIU4RvB1m8dOeZt+IgWwcWklFDOdmGcfo3zSiRsnR/3pJkjY5lfoGqEe4Eig==",
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.0.4.tgz",
|
||||
"integrity": "sha512-t+b0SS+IxG7Rxzda2EVvyBZbvFPBCjJoyHuE0P//7OAsN23GItzDRdWa6ALxZI/8R5ygK7jAR6t028/z+7295g==",
|
||||
"requires": {
|
||||
"@types/component-emitter": "^1.2.10",
|
||||
"component-emitter": "~1.3.0",
|
||||
|
||||
@@ -19,7 +19,7 @@ They abstract the connection details for this services, and can help with:
|
||||
|
||||
* fail over (how do I know to which instance of a replicated service I should connect?)
|
||||
|
||||
* load balancing (how do I spread my requests across multiple instances of a service?)
|
||||
* load balancing (how to I spread my requests across multiple instances of a service?)
|
||||
|
||||
* authentication (what if my service requires credentials, certificates, or otherwise?)
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ At the end of this section, you will be able to:
|
||||
|
||||
---
|
||||
|
||||
## Running an NGINX server
|
||||
## Runing an NGINX server
|
||||
|
||||
```bash
|
||||
$ docker run -d -P nginx
|
||||
|
||||
@@ -1,75 +1,57 @@
|
||||
#!/usr/bin/env python
|
||||
import re
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
FIRST_SLIDE_MARKER = "name: toc-"
|
||||
PART_PREFIX = "part-"
|
||||
|
||||
filename = sys.argv[1]
|
||||
if filename.endswith(".html"):
|
||||
html_file = filename
|
||||
yaml_file = filename[: -len(".html")]
|
||||
else:
|
||||
html_file = filename + ".html"
|
||||
yaml_file = filename
|
||||
excluded_classes = yaml.safe_load(open(yaml_file))["exclude"]
|
||||
|
||||
PREFIX = "name: toc-"
|
||||
EXCLUDED = ["in-person"]
|
||||
|
||||
class State(object):
|
||||
def __init__(self):
|
||||
self.current_slide = -1
|
||||
self.parts = {}
|
||||
|
||||
def end_section(self):
|
||||
if state.section_title:
|
||||
print(
|
||||
"{0.section_start}\t{0.section_slides}\t{0.section_title}".format(self)
|
||||
)
|
||||
if self.section_part:
|
||||
if self.section_part not in self.parts:
|
||||
self.parts[self.section_part] = 0
|
||||
self.parts[self.section_part] += self.section_slides
|
||||
|
||||
def new_section(self, slide):
|
||||
# Normally, the title should be prefixed by a space
|
||||
# (because section titles are first-level titles in markdown,
|
||||
# e.g. "# Introduction", and markmaker removes the # but leaves
|
||||
# the leading space).
|
||||
self.current_slide = 1
|
||||
self.section_title = None
|
||||
if "\n " in slide:
|
||||
self.section_title = slide.split("\n ")[1].split("\n")[0]
|
||||
toc_links = re.findall("\(#toc-(.*)\)", slide)
|
||||
self.section_part = None
|
||||
for toc_link in toc_links:
|
||||
if toc_link.startswith(PART_PREFIX):
|
||||
self.section_part = toc_link
|
||||
self.section_start = self.current_slide
|
||||
self.section_start = 0
|
||||
self.section_slides = 0
|
||||
|
||||
self.parts = {}
|
||||
self.sections = {}
|
||||
def show(self):
|
||||
if self.section_title.startswith("part-"):
|
||||
return
|
||||
print("{0.section_title}\t{0.section_start}\t{0.section_slides}".format(self))
|
||||
self.sections[self.section_title] = self.section_slides
|
||||
|
||||
state = State()
|
||||
state.new_section("")
|
||||
print("{}\t{}\t{}".format("index", "size", "title"))
|
||||
|
||||
for slide in open(html_file).read().split("\n---\n"):
|
||||
excluded = False
|
||||
for line in slide.split("\n"):
|
||||
if line.startswith("class:"):
|
||||
for klass in excluded_classes:
|
||||
if klass in line.split():
|
||||
excluded = True
|
||||
if excluded:
|
||||
continue
|
||||
if FIRST_SLIDE_MARKER in slide:
|
||||
# A new section starts. Show info about the part that just ended.
|
||||
state.end_section()
|
||||
state.new_section(slide)
|
||||
state.section_slides += 1
|
||||
for sub_slide in slide.split("\n--\n"):
|
||||
title = None
|
||||
for line in open(sys.argv[1]):
|
||||
line = line.rstrip()
|
||||
if line.startswith(PREFIX):
|
||||
if state.section_title is None:
|
||||
print("{}\t{}\t{}".format("title", "index", "size"))
|
||||
else:
|
||||
state.show()
|
||||
state.section_title = line[len(PREFIX):].strip()
|
||||
state.section_start = state.current_slide
|
||||
state.section_slides = 0
|
||||
if line == "---":
|
||||
state.current_slide += 1
|
||||
else:
|
||||
state.end_section()
|
||||
state.section_slides += 1
|
||||
if line == "--":
|
||||
state.current_slide += 1
|
||||
toc_links = re.findall("\(#toc-(.*)\)", line)
|
||||
if toc_links and state.section_title.startswith("part-"):
|
||||
if state.section_title not in state.parts:
|
||||
state.parts[state.section_title] = []
|
||||
state.parts[state.section_title].append(toc_links[0])
|
||||
# This is really hackish
|
||||
if line.startswith("class:"):
|
||||
for klass in EXCLUDED:
|
||||
if klass in line:
|
||||
state.section_slides -= 1
|
||||
state.current_slide -= 1
|
||||
|
||||
state.show()
|
||||
|
||||
for part in sorted(state.parts, key=lambda f: int(f.split("-")[1])):
|
||||
print("{}\t{}\t{}".format(0, state.parts[part], "total size for " + part))
|
||||
part_size = sum(state.sections[s] for s in state.parts[part])
|
||||
print("{}\t{}\t{}".format("total size for", part, part_size))
|
||||
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
|
||||
(we will use the `rng` service in the dockercoins app)
|
||||
|
||||
- See what happens when the load increases
|
||||
- See what happens when the load increses
|
||||
|
||||
(spoiler alert: it involves timeouts!)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
- Add an ingress controller to a Kubernetes cluster
|
||||
|
||||
- Create an ingress resource for a couple of web apps on that cluster
|
||||
- Create an ingress resource for a web app on that cluster
|
||||
|
||||
- Challenge: accessing/exposing port 80
|
||||
|
||||
|
||||
@@ -1,131 +1,49 @@
|
||||
# Exercise — Ingress
|
||||
|
||||
- We want to expose a couple of web apps through an ingress controller
|
||||
- We want to expose a web app through an ingress controller
|
||||
|
||||
- This will require:
|
||||
|
||||
- the web apps (e.g. two instances of `jpetazzo/color`)
|
||||
- the web app itself (dockercoins, NGINX, whatever we want)
|
||||
|
||||
- an ingress controller
|
||||
|
||||
- a domain name (`use \*.nip.io` or `\*.localdev.me`)
|
||||
|
||||
- an ingress resource
|
||||
|
||||
---
|
||||
|
||||
## Different scenarios
|
||||
## Goal
|
||||
|
||||
We will use a different deployment mechanism depending on the cluster that we have:
|
||||
- We want to be able to access the web app using a URL like:
|
||||
|
||||
- Managed cluster with working `LoadBalancer` Services
|
||||
http://webapp.localdev.me
|
||||
|
||||
- Local development cluster
|
||||
*or*
|
||||
|
||||
- Cluster without `LoadBalancer` Services (e.g. deployed with `kubeadm`)
|
||||
http://webapp.A.B.C.D.nip.io
|
||||
|
||||
---
|
||||
|
||||
## The apps
|
||||
|
||||
- The web apps will be deployed similarly, regardless of the scenario
|
||||
|
||||
- Let's start by deploying two web apps, e.g.:
|
||||
|
||||
a Deployment called `blue` and another called `green`, using image `jpetazzo/color`
|
||||
|
||||
- Expose them with two `ClusterIP` Services
|
||||
|
||||
---
|
||||
|
||||
## Scenario "classic cloud Kubernetes"
|
||||
|
||||
*Difficulty: easy*
|
||||
|
||||
For this scenario, we need a cluster with working `LoadBalancer` Services.
|
||||
|
||||
(For instance, a managed Kubernetes cluster from a cloud provider.)
|
||||
|
||||
We suggest to use "Ingress NGINX" with its default settings.
|
||||
|
||||
It can be installed with `kubectl apply` or with `helm`.
|
||||
|
||||
Both methods are described in [the documentation][ingress-nginx-deploy].
|
||||
|
||||
We want our apps to be available on e.g. http://X.X.X.X/blue and http://X.X.X.X/green
|
||||
<br/>
|
||||
(where X.X.X.X is the IP address of the `LoadBalancer` allocated by Ingress NGINX).
|
||||
|
||||
[ingress-nginx-deploy]: https://kubernetes.github.io/ingress-nginx/deploy/
|
||||
|
||||
---
|
||||
|
||||
## Scenario "local development cluster"
|
||||
|
||||
*Difficulty: easy-hard (depends on the type of cluster!)*
|
||||
|
||||
For this scenario, we want to use a local cluster like KinD, minikube, etc.
|
||||
|
||||
We suggest to use "Ingress NGINX" again, like for the previous scenario.
|
||||
|
||||
Furthermore, we want to use `localdev.me`.
|
||||
|
||||
We want our apps to be available on e.g. `blue.localdev.me` and `green.localdev.me`.
|
||||
|
||||
The difficulty is to ensure that `localhost:80` will map to the ingress controller.
|
||||
|
||||
(See next slide for hints!)
|
||||
(where A.B.C.D is the IP address of one of our nodes)
|
||||
|
||||
---
|
||||
|
||||
## Hints
|
||||
|
||||
- With clusters like Docker Desktop, the first `LoadBalancer` service uses `localhost`
|
||||
- For the ingress controller, we can use:
|
||||
|
||||
(if the ingress controller is the first `LoadBalancer` service, we're all set!)
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/index.md)
|
||||
|
||||
- With clusters like K3D and KinD, it is possible to define extra port mappings
|
||||
- the [Traefik Helm chart](https://doc.traefik.io/traefik/getting-started/install-traefik/#use-the-helm-chart)
|
||||
|
||||
(and map e.g. `localhost:80` to port 30080 on the node; then use that as a `NodePort`)
|
||||
- the container.training [Traefik DaemonSet](https://raw.githubusercontent.com/jpetazzo/container.training/main/k8s/traefik-v2.yaml)
|
||||
|
||||
---
|
||||
- If our cluster supports LoadBalancer Services: easy
|
||||
|
||||
## Scenario "on premises cluster", take 1
|
||||
(nothing special to do)
|
||||
|
||||
*Difficulty: easy*
|
||||
- For local clusters, things can be more difficult; two options:
|
||||
|
||||
For this scenario, we need a cluster with nodes that are publicly accessible.
|
||||
- map localhost:80 to e.g. a NodePort service, and use `\*.localdev.me`
|
||||
|
||||
We want to deploy the ingress controller so that it listens on port 80 on all nodes.
|
||||
|
||||
This can be done e.g. with the manifests in @@LINK[k8s/traefik.yaml].
|
||||
|
||||
We want our apps to be available on e.g. http://X.X.X.X/blue and http://X.X.X.X/green
|
||||
<br/>
|
||||
(where X.X.X.X is the IP address of any of our nodes).
|
||||
|
||||
---
|
||||
|
||||
## Scenario "on premises cluster", take 2
|
||||
|
||||
*Difficulty: medium*
|
||||
|
||||
We want to deploy the ingress controller so that it listens on port 80 on all nodes.
|
||||
|
||||
But this time, we want to use a Helm chart to install the ingress controller.
|
||||
|
||||
We can use either the Ingress NGINX Helm chart, or the Traefik Helm chart.
|
||||
|
||||
Test with an untainted node first.
|
||||
|
||||
Feel free to make it work on tainted nodes (e.g. control plane nodes) later.
|
||||
|
||||
---
|
||||
|
||||
## Scenario "on premises cluster", take 3
|
||||
|
||||
*Difficulty: hard*
|
||||
|
||||
This is similar to the previous scenario, but with two significant changes:
|
||||
|
||||
1. We only want to run the ingress controller on nodes that have the role `ingress`.
|
||||
|
||||
2. We don't want to use `hostNetwork`, but a list of `externalIPs` instead.
|
||||
- use hostNetwork, or ExternalIP, and use `\*.nip.io`
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<html>
|
||||
<head>
|
||||
<style>
|
||||
td {
|
||||
background: #ccc;
|
||||
padding: 1em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Mardi 17 janvier 2023</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 18 janvier 2023</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 19 janvier 2023</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 20 janvier 2023</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 24 janvier 2023</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes (groupe A)</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 25 janvier 2023</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes (groupe A)</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 26 janvier 2023</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes (groupe A)</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 27 janvier 2023</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes (groupe A)</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 7 février 2023</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes (groupe B)</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 8 février 2023</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes (groupe B)</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 9 février 2023</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes (groupe B)</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 10 février 2023</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes (groupe B)</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 6 février 2023</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 7 février 2023</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 8 février 2023</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 9 février 2023</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lundi 13 février 2023</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 14 février 2023</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 15 février 2023</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 16 février 2023</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 17 février 2023</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 394 KiB |
@@ -13,4 +13,3 @@ https://gallant-turing-d0d520.netlify.com/containers/train-of-containers-1.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/train-of-containers-2.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/two-containers-on-a-truck.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/wall-of-containers.jpeg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/catene-de-conteneurs.jpg
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
#- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
#- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Container_Networking_Basics.md
|
||||
#- containers/Network_Drivers.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Multi_Stage_Builds.md
|
||||
#- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Advanced_Dockerfiles.md
|
||||
#- containers/Buildkit.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
#- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,72 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Buildkit.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Pods_Anatomy.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,80 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
-
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Start_And_Attach.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Resource_Limits.md
|
||||
- # DAY 2
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
-
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Installing_Docker.md
|
||||
- containers/Container_Engines.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Buildkit.md
|
||||
-
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Orchestration_Overview.md
|
||||
-
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
#-
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Ambassadors.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
@@ -274,7 +274,7 @@ class: extra-details
|
||||
|
||||
- ...or with a Secret with the right [type and annotation][create-token]
|
||||
|
||||
[create-token]: https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#create-token
|
||||
[create-token]: https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#to-create-additional-api-tokens
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -202,9 +202,7 @@ class: extra-details
|
||||
|
||||
- These are JWS signatures using HMAC-SHA256
|
||||
|
||||
(see [the reference documentation][configmap-signing] for more details)
|
||||
|
||||
[configmap-signing]: https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/#configmap-signing
|
||||
(see [here](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/#configmap-signing) for more details)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
|
||||
- We must run nodes on a supported infrastructure
|
||||
|
||||
- Check the [GitHub repo][autoscaler-providers] for a non-exhaustive list of supported providers
|
||||
- See [here] for a non-exhaustive list of supported providers
|
||||
|
||||
- Sometimes, the cluster autoscaler is installed automatically
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
|
||||
(which is often non-trivial and highly provider-specific)
|
||||
|
||||
[autoscaler-providers]: https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider
|
||||
[here]: https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -138,7 +138,7 @@ class: extra-details
|
||||
|
||||
- The Cluster Autoscaler only supports a few cloud infrastructures
|
||||
|
||||
(see the [kubernetes/autoscaler repo][kubernetes-autoscaler-repo] for a list)
|
||||
(see [here](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider) for a list)
|
||||
|
||||
- The Cluster Autoscaler cannot scale down nodes that have pods using:
|
||||
|
||||
@@ -148,8 +148,6 @@ class: extra-details
|
||||
|
||||
- a restrictive PodDisruptionBudget
|
||||
|
||||
[kubernetes-autoscaler-repo]: https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider
|
||||
|
||||
---
|
||||
|
||||
## Other way to do capacity planning
|
||||
|
||||
@@ -24,11 +24,11 @@
|
||||
|
||||
- Interface parameters (MTU, sysctls) could be tweaked by the `tuning` plugin
|
||||
|
||||
The reference plugins are available [here][cni-reference-plugins].
|
||||
The reference plugins are available [here].
|
||||
|
||||
Look in each plugin's directory for its documentation.
|
||||
|
||||
[cni-reference-plugins]: https://github.com/containernetworking/plugins/tree/master/plugins
|
||||
[here]: https://github.com/containernetworking/plugins/tree/master/plugins
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -79,20 +79,6 @@
|
||||
|
||||
(blue/green deployment, canary deployment)
|
||||
|
||||
--
|
||||
|
||||
.footnote[
|
||||
On the next page: canary cage with an oxygen bottle, designed to keep the canary alive.
|
||||
<br/>
|
||||
(See https://post.lurk.org/@zilog/109632335293371919 for details.)
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## More things that Kubernetes can do for us
|
||||
@@ -301,9 +287,7 @@ No!
|
||||
|
||||
--
|
||||
|
||||
- The Docker Engine used to be the default option to run containers with Kubernetes
|
||||
|
||||
- Support for Docker (specifically: dockershim) was removed in Kubernetes 1.24
|
||||
- By default, Kubernetes uses the Docker Engine to run containers
|
||||
|
||||
- We can leverage other pluggable runtimes through the *Container Runtime Interface*
|
||||
|
||||
@@ -345,26 +329,32 @@ Yes!
|
||||
|
||||
- We can do these things without Docker
|
||||
<br/>
|
||||
(but with some languages/frameworks, it might be much harder)
|
||||
(and get diagnosed with NIH¹ syndrome)
|
||||
|
||||
- Docker is still the most stable container engine today
|
||||
<br/>
|
||||
(but other options are maturing very quickly)
|
||||
|
||||
.footnote[¹[Not Invented Here](https://en.wikipedia.org/wiki/Not_invented_here)]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
- On our Kubernetes clusters:
|
||||
|
||||
*Not anymore*
|
||||
|
||||
- On our development environments, CI pipelines ... :
|
||||
|
||||
*Yes, almost certainly*
|
||||
|
||||
- On our production servers:
|
||||
|
||||
*Yes (today)*
|
||||
|
||||
*Probably not (in the future)*
|
||||
|
||||
.footnote[More information about CRI [on the Kubernetes blog](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes)]
|
||||
|
||||
---
|
||||
|
||||
## Interacting with Kubernetes
|
||||
|
||||
@@ -1,62 +1,46 @@
|
||||
# Healthchecks
|
||||
|
||||
- Containers can have *healthchecks* (also called "probes")
|
||||
- Containers can have *healthchecks*
|
||||
|
||||
- There are three kinds of healthchecks, corresponding to different use-cases:
|
||||
- There are three kinds of healthchecks, corresponding to very different use-cases:
|
||||
|
||||
`startupProbe`, `readinessProbe`, `livenessProbe`
|
||||
- liveness = detect when a container is "dead" and needs to be restarted
|
||||
|
||||
- readiness = detect when a container is ready to serve traffic
|
||||
|
||||
- startup = detect if a container has finished to boot
|
||||
|
||||
- These healthchecks are optional (we can use none, all, or some of them)
|
||||
|
||||
- Different probes are available:
|
||||
- Different probes are available (HTTP request, TCP connection, program execution)
|
||||
|
||||
HTTP GET, TCP connection, arbitrary program execution, GRPC
|
||||
|
||||
- All these probes have a binary result (success/failure)
|
||||
|
||||
- Probes that aren't defined will default to a "success" result
|
||||
- Let's see the difference and how to use them!
|
||||
|
||||
---
|
||||
|
||||
## Use-cases in brief
|
||||
|
||||
*My container takes a long time to boot before being able to serve traffic.*
|
||||
|
||||
→ use a `startupProbe` (but often a `readinessProbe` can also do the job)
|
||||
|
||||
*Sometimes, my container is unavailable or overloaded, and needs to e.g. be taken temporarily out of load balancer rotation.*
|
||||
|
||||
→ use a `readinessProbe`
|
||||
|
||||
*Sometimes, my container enters a broken state which can only be fixed by a restart.*
|
||||
|
||||
→ use a `livenessProbe`
|
||||
|
||||
---
|
||||
|
||||
## Liveness probes
|
||||
## Liveness probe
|
||||
|
||||
*This container is dead, we don't know how to fix it, other than restarting it.*
|
||||
|
||||
- Check if the container is dead or alive
|
||||
- Indicates if the container is dead or alive
|
||||
|
||||
- If Kubernetes determines that the container is dead:
|
||||
- A dead container cannot come back to life
|
||||
|
||||
- it terminates the container gracefully
|
||||
- If the liveness probe fails, the container is killed (destroyed)
|
||||
|
||||
- it restarts the container (unless the Pod's `restartPolicy` is `Never`)
|
||||
(to make really sure that it's really dead; no zombies or undeads!)
|
||||
|
||||
- With the default parameters, it takes:
|
||||
- What happens next depends on the pod's `restartPolicy`:
|
||||
|
||||
- up to 30 seconds to determine that the container is dead
|
||||
- `Never`: the container is not restarted
|
||||
|
||||
- up to 30 seconds to terminate it
|
||||
- `OnFailure` or `Always`: the container is restarted
|
||||
|
||||
---
|
||||
|
||||
## When to use a liveness probe
|
||||
|
||||
- To detect failures that can't be recovered
|
||||
- To indicate failures that can't be recovered
|
||||
|
||||
- deadlocks (causing all requests to time out)
|
||||
|
||||
@@ -64,45 +48,47 @@
|
||||
|
||||
- Anything where our incident response would be "just restart/reboot it"
|
||||
|
||||
---
|
||||
|
||||
## Liveness probes gotchas
|
||||
|
||||
.warning[**Do not** use liveness probes for problems that can't be fixed by a restart]
|
||||
|
||||
- Otherwise we just restart our pods for no reason, creating useless load
|
||||
|
||||
.warning[**Do not** depend on other services within a liveness probe]
|
||||
---
|
||||
|
||||
- Otherwise we can experience cascading failures
|
||||
## Readiness probe (1)
|
||||
|
||||
(example: web server liveness probe that makes a requests to a database)
|
||||
*Make sure that a container is ready before continuing a rolling update.*
|
||||
|
||||
.warning[**Make sure** that liveness probes respond quickly]
|
||||
- Indicates if the container is ready to handle traffic
|
||||
|
||||
- The default probe timeout is 1 second (this can be tuned!)
|
||||
- When doing a rolling update, the Deployment controller waits for Pods to be ready
|
||||
|
||||
- If the probe takes longer than that, it will eventually cause a restart
|
||||
(a Pod is ready when all the containers in the Pod are ready)
|
||||
|
||||
- Improves reliability and safety of rolling updates:
|
||||
|
||||
- don't roll out a broken version (that doesn't pass readiness checks)
|
||||
|
||||
- don't lose processing capacity during a rolling update
|
||||
|
||||
---
|
||||
|
||||
## Readiness probes
|
||||
## Readiness probe (2)
|
||||
|
||||
*Sometimes, my container "needs a break".*
|
||||
*Temporarily remove a container (overloaded or otherwise) from a Service load balancer.*
|
||||
|
||||
- Check if the container is ready or not
|
||||
- A container can mark itself "not ready" temporarily
|
||||
|
||||
- If the container is not ready, its Pod is not ready
|
||||
(e.g. if it's overloaded or needs to reload/restart/garbage collect...)
|
||||
|
||||
- If the Pod belongs to a Service, it is removed from its Endpoints
|
||||
- If a container becomes "unready" it might be ready again soon
|
||||
|
||||
(it stops receiving new connections but existing ones are not affected)
|
||||
- If the readiness probe fails:
|
||||
|
||||
- If there is a rolling update in progress, it might pause
|
||||
- the container is *not* killed
|
||||
|
||||
(Kubernetes will try to respect the MaxUnavailable parameter)
|
||||
- if the pod is a member of a service, it is temporarily removed
|
||||
|
||||
- As soon as the readiness probe suceeds again, everything goes back to normal
|
||||
- it is re-added as soon as the readiness probe passes again
|
||||
|
||||
---
|
||||
|
||||
@@ -116,31 +102,67 @@
|
||||
|
||||
- To indicate temporary failure or unavailability
|
||||
|
||||
- runtime is busy doing garbage collection or (re)loading data
|
||||
|
||||
- application can only service *N* parallel connections
|
||||
|
||||
- new connections will be directed to other Pods
|
||||
- runtime is busy doing garbage collection or initial data load
|
||||
|
||||
- To redirect new connections to other Pods
|
||||
|
||||
(e.g. fail the readiness probe when the Pod's load is too high)
|
||||
|
||||
---
|
||||
|
||||
## Startup probes
|
||||
## Dependencies
|
||||
|
||||
*My container takes a long time to boot before being able to serve traffic.*
|
||||
- If a web server depends on a database to function, and the database is down:
|
||||
|
||||
- After creating a container, Kubernetes runs its startup probe
|
||||
- the web server's liveness probe should succeed
|
||||
|
||||
- The container will be considered "unhealthy" until the probe succeeds
|
||||
- the web server's readiness probe should fail
|
||||
|
||||
- As long as the container is "unhealthy", its Pod...:
|
||||
- Same thing for any hard dependency (without which the container can't work)
|
||||
|
||||
- is not added to Services' endpoints
|
||||
.warning[**Do not** fail liveness probes for problems that are external to the container]
|
||||
|
||||
- is not considered as "available" for rolling update purposes
|
||||
---
|
||||
|
||||
- Readiness and liveness probes are enabled *after* startup probe reports success
|
||||
## Timing and thresholds
|
||||
|
||||
(if there is no startup probe, readiness and liveness probes are enabled right away)
|
||||
- Probes are executed at intervals of `periodSeconds` (default: 10)
|
||||
|
||||
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
|
||||
|
||||
.warning[If a probe takes longer than that, it is considered as a FAIL]
|
||||
|
||||
- A probe is considered successful after `successThreshold` successes (default: 1)
|
||||
|
||||
- A probe is considered failing after `failureThreshold` failures (default: 3)
|
||||
|
||||
- A probe can have an `initialDelaySeconds` parameter (default: 0)
|
||||
|
||||
- Kubernetes will wait that amount of time before running the probe for the first time
|
||||
|
||||
(this is important to avoid killing services that take a long time to start)
|
||||
|
||||
---
|
||||
|
||||
## Startup probe
|
||||
|
||||
*The container takes too long to start, and is killed by the liveness probe!*
|
||||
|
||||
- By default, probes (including liveness) start immediately
|
||||
|
||||
- With the default probe interval and failure threshold:
|
||||
|
||||
*a container must respond in less than 30 seconds, or it will be killed!*
|
||||
|
||||
- There are two ways to avoid that:
|
||||
|
||||
- set `initialDelaySeconds` (a fixed, rigid delay)
|
||||
|
||||
- use a `startupProbe`
|
||||
|
||||
- Kubernetes will run only the startup probe, and when it succeeds, run the other probes
|
||||
|
||||
---
|
||||
|
||||
@@ -156,296 +178,121 @@
|
||||
|
||||
---
|
||||
|
||||
## Startup probes gotchas
|
||||
|
||||
- When defining a `startupProbe`, we almost always want to adjust its parameters
|
||||
|
||||
(specifically, its `failureThreshold` - this is explained in next slide)
|
||||
|
||||
- Otherwise, if the container fails to start within 30 seconds...
|
||||
|
||||
*Kubernetes terminates the container and restarts it!*
|
||||
|
||||
- Sometimes, it's easier/simpler to use a `readinessProbe` instead
|
||||
|
||||
(except when also using a `livenessProbe`)
|
||||
|
||||
---
|
||||
|
||||
## Timing and thresholds
|
||||
|
||||
- Probes are executed at intervals of `periodSeconds` (default: 10)
|
||||
|
||||
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
|
||||
|
||||
.warning[If a probe takes longer than that, it is considered as a FAIL]
|
||||
|
||||
.warning[For liveness probes **and startup probes** this terminates and restarts the container]
|
||||
|
||||
- A probe is considered successful after `successThreshold` successes (default: 1)
|
||||
|
||||
- A probe is considered failing after `failureThreshold` failures (default: 3)
|
||||
|
||||
- All these parameters can be set independently for each probe
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `initialDelaySeconds`
|
||||
|
||||
- A probe can have an `initialDelaySeconds` parameter (default: 0)
|
||||
|
||||
- Kubernetes will wait that amount of time before running the probe for the first time
|
||||
|
||||
- It is generally better to use a `startupProbe` instead
|
||||
|
||||
(but this parameter did exist before startup probes were implemented)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `readinessProbe` vs `startupProbe`
|
||||
|
||||
- A lot of blog posts / documentations / tutorials recommend readiness probes...
|
||||
|
||||
- ...even in scenarios where a startup probe would seem more appropriate!
|
||||
|
||||
- This is because startup probes are relatively recent
|
||||
|
||||
(they reached GA status in Kubernetes 1.20)
|
||||
|
||||
- When there is no `livenessProbe`, using a `readinessProbe` is simpler:
|
||||
|
||||
- a `startupProbe` generally requires to change the `failureThreshold`
|
||||
|
||||
- a `startupProbe` generally also requires a `readinessProbe`
|
||||
|
||||
- a single `readinessProbe` can fulfill both roles
|
||||
|
||||
---
|
||||
|
||||
## Different types of probes
|
||||
|
||||
- Kubernetes supports the following mechanisms:
|
||||
- HTTP request
|
||||
|
||||
- `exec` (arbitrary program execution)
|
||||
- specify URL of the request (and optional headers)
|
||||
|
||||
- `httpGet` (HTTP GET request)
|
||||
- any status code between 200 and 399 indicates success
|
||||
|
||||
- `tcpSocket` (check if a TCP port is accepting connections)
|
||||
- TCP connection
|
||||
|
||||
- `grpc` (standard [GRPC Health Checking Protocol][grpc])
|
||||
- the probe succeeds if the TCP port is open
|
||||
|
||||
- All probes give binary results ("it works" or "it doesn't")
|
||||
- arbitrary exec
|
||||
|
||||
- Let's see the specific details for each of them!
|
||||
- a command is executed in the container
|
||||
|
||||
[grpc]: https://grpc.github.io/grpc/core/md_doc_health-checking.html
|
||||
- exit status of zero indicates success
|
||||
|
||||
---
|
||||
|
||||
## `exec`
|
||||
## Benefits of using probes
|
||||
|
||||
- Runs an arbitrary program *inside* the container
|
||||
- Rolling updates proceed when containers are *actually ready*
|
||||
|
||||
(like with `kubectl exec` or `docker exec`)
|
||||
(as opposed to merely started)
|
||||
|
||||
- The program must be available in the container image
|
||||
- Containers in a broken state get killed and restarted
|
||||
|
||||
- Kubernetes uses the exit status of the program
|
||||
(instead of serving errors or timeouts)
|
||||
|
||||
(standard UNIX convention: 0 = success, anything else = failure)
|
||||
- Unavailable backends get removed from load balancer rotation
|
||||
|
||||
(thus improving response times across the board)
|
||||
|
||||
- If a probe is not defined, it's as if there was an "always successful" probe
|
||||
|
||||
---
|
||||
|
||||
## `exec` example
|
||||
## Example: HTTP probe
|
||||
|
||||
When the worker is ready, it should create `/tmp/ready`.
|
||||
<br/>
|
||||
The following probe will give it 5 minutes to do so.
|
||||
Here is a pod template for the `rng` web service of the DockerCoins app:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: queueworker
|
||||
name: healthy-app
|
||||
spec:
|
||||
containers:
|
||||
- name: worker
|
||||
image: myregistry.../worker:v1.0
|
||||
startupProbe:
|
||||
exec:
|
||||
command:
|
||||
- test
|
||||
- -f
|
||||
- /tmp/ready
|
||||
failureThreshold: 30
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using shell constructs
|
||||
|
||||
- If we want to use pipes, conditionals, etc. we should invoke a shell
|
||||
|
||||
- Example:
|
||||
```yaml
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "curl http://localhost:5000/status | jq .ready | grep true"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## `httpGet`
|
||||
|
||||
- Make an HTTP GET request to the container
|
||||
|
||||
- The request will be made by Kubelet
|
||||
|
||||
(doesn't require extra binaries in the container image)
|
||||
|
||||
- `port` must be specified
|
||||
|
||||
- `path` and extra `httpHeaders` can be specified optionally
|
||||
|
||||
- Kubernetes uses HTTP status code of the response:
|
||||
|
||||
- 200-399 = success
|
||||
|
||||
- anything else = failure
|
||||
|
||||
---
|
||||
|
||||
## `httpGet` example
|
||||
|
||||
The following liveness probe restarts the container if it stops responding on `/healthz`:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: frontend
|
||||
image: myregistry.../frontend:v1.0
|
||||
- name: myapp
|
||||
image: myregistry.io/myapp:v1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 80
|
||||
path: /healthz
|
||||
periodSeconds: 5
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## `tcpSocket`
|
||||
|
||||
- Kubernetes checks if the indicated TCP port accepts connections
|
||||
|
||||
- There is no additional check
|
||||
|
||||
.warning[It's quite possible for a process to be broken, but still accept TCP connections!]
|
||||
If the backend serves an error, or takes longer than 1s, 3 times in a row, it gets killed.
|
||||
|
||||
---
|
||||
|
||||
## `grpc`
|
||||
## Example: exec probe
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
Here is a pod template for a Redis server:
|
||||
|
||||
- Available in beta since Kubernetes 1.24
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: redis-with-liveness
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["redis-cli", "ping"]
|
||||
```
|
||||
|
||||
- Leverages standard [GRPC Health Checking Protocol][grpc]
|
||||
|
||||
[grpc]: https://grpc.github.io/grpc/core/md_doc_health-checking.html
|
||||
If the Redis process becomes unresponsive, it will be killed.
|
||||
|
||||
---
|
||||
|
||||
## Best practices for healthchecks
|
||||
## Questions to ask before adding healthchecks
|
||||
|
||||
- Readiness probes are almost always beneficial
|
||||
- Do we want liveness, readiness, both?
|
||||
|
||||
- don't hesitate to add them early!
|
||||
(sometimes, we can use the same check, but with different failure thresholds)
|
||||
|
||||
- we can even make them *mandatory*
|
||||
- Do we have existing HTTP endpoints that we can use?
|
||||
|
||||
- Be more careful with liveness and startup probes
|
||||
- Do we need to add new endpoints, or perhaps use something else?
|
||||
|
||||
- they aren't always necessary
|
||||
- Are our healthchecks likely to use resources and/or slow down the app?
|
||||
|
||||
- they can even cause harm
|
||||
- Do they depend on additional services?
|
||||
|
||||
(this can be particularly tricky, see next slide)
|
||||
|
||||
---
|
||||
|
||||
## Readiness probes
|
||||
## Healthchecks and dependencies
|
||||
|
||||
- Almost always beneficial
|
||||
- Liveness checks should not be influenced by the state of external services
|
||||
|
||||
- Exceptions:
|
||||
- All checks should reply quickly (by default, less than 1 second)
|
||||
|
||||
- web service that doesn't have a dedicated "health" or "ping" route
|
||||
- Otherwise, they are considered to fail
|
||||
|
||||
- ...and all requests are "expensive" (e.g. lots of external calls)
|
||||
- This might require to check the health of dependencies asynchronously
|
||||
|
||||
---
|
||||
|
||||
## Liveness probes
|
||||
|
||||
- If we're not careful, we end up restarting containers for no reason
|
||||
|
||||
(which can cause additional load on the cluster, cascading failures, data loss, etc.)
|
||||
|
||||
- Suggestion:
|
||||
|
||||
- don't add liveness probes immediately
|
||||
|
||||
- wait until you have a bit of production experience with that code
|
||||
|
||||
- then add narrow-scoped healthchecks to detect specific failure modes
|
||||
|
||||
- Readiness and liveness probes should be different
|
||||
|
||||
(different check *or* different timeouts *or* different thresholds)
|
||||
|
||||
---
|
||||
|
||||
## Startup probes
|
||||
|
||||
- Only beneficial for containers that need a long time to start
|
||||
|
||||
(more than 30 seconds)
|
||||
|
||||
- If there is no liveness probe, it's simpler to just use a readiness probe
|
||||
|
||||
(since we probably want to have a readiness probe anyway)
|
||||
|
||||
- In other words, startup probes are useful in one situation:
|
||||
|
||||
*we have a liveness probe, AND the container needs a lot of time to start*
|
||||
|
||||
- Don't forget to change the `failureThreshold`
|
||||
|
||||
(otherwise the container will fail to start and be killed)
|
||||
|
||||
---
|
||||
|
||||
## Recap of the gotchas
|
||||
|
||||
- The default timeout is 1 second
|
||||
|
||||
- if a probe takes longer than 1 second to reply, Kubernetes considers that it fails
|
||||
|
||||
- this can be changed by setting the `timeoutSeconds` parameter
|
||||
<br/>(or refactoring the probe)
|
||||
|
||||
- Liveness probes should not be influenced by the state of external services
|
||||
|
||||
- Liveness probes and readiness probes should have different paramters
|
||||
|
||||
- For startup probes, remember to increase the `failureThreshold`
|
||||
(e.g. if a database or API might be healthy but still take more than
|
||||
1 second to reply, we should check the status asynchronously and report
|
||||
a cached status)
|
||||
|
||||
---
|
||||
|
||||
@@ -453,21 +300,21 @@ spec:
|
||||
|
||||
(In that context, worker = process that doesn't accept connections)
|
||||
|
||||
- A relatively easy solution is to use files
|
||||
- Readiness is useful mostly for rolling updates
|
||||
|
||||
- For a startup or readiness probe:
|
||||
(because workers aren't backends for a service)
|
||||
|
||||
- worker creates `/tmp/ready` when it's ready
|
||||
- probe checks the existence of `/tmp/ready`
|
||||
- Liveness may help us restart a broken worker, but how can we check it?
|
||||
|
||||
- For a liveness probe:
|
||||
- Embedding an HTTP server is a (potentially expensive) option
|
||||
|
||||
- worker touches `/tmp/alive` regularly
|
||||
<br/>(e.g. just before starting to work on a job)
|
||||
- probe checks that the timestamp on `/tmp/alive` is recent
|
||||
- if the timestamp is old, it means that the worker is stuck
|
||||
- Using a "lease" file can be relatively easy:
|
||||
|
||||
- Sometimes it can also make sense to embed a web server in the worker
|
||||
- touch a file during each iteration of the main loop
|
||||
|
||||
- check the timestamp of that file from an exec probe
|
||||
|
||||
- Writing logs (and checking them from the probe) also works
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -317,22 +317,6 @@ class: extra-details
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Determining if we're in a subchart
|
||||
|
||||
- `.Chart.IsRoot` indicates if we're in the top-level chart or in a sub-chart
|
||||
|
||||
- Useful in charts that are designed to be used standalone or as dependencies
|
||||
|
||||
- Example: generic chart
|
||||
|
||||
- when used standalone (`.Chart.IsRoot` is `true`), use `.Release.Name`
|
||||
|
||||
- when used as a subchart e.g. with multiple aliases, use `.Chart.Name`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Compatibility with Helm 2
|
||||
|
||||
- Chart `apiVersion: v1` is the only version supported by Helm 2
|
||||
|
||||
@@ -504,7 +504,7 @@ The `readme` may or may not have (accurate) explanations for the values.
|
||||
|
||||
- Update `my-juice-shop`:
|
||||
```bash
|
||||
helm upgrade my-juice-shop juice/juice-shop \
|
||||
helm upgrade my-juice-shop juice/my-juice-shop \
|
||||
--set service.type=NodePort
|
||||
```
|
||||
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
## Ingress and canary releases
|
||||
|
||||
- Let's see how to implement *canary releases*
|
||||
|
||||
- The example here will use Traefik v1
|
||||
|
||||
(which is obsolete)
|
||||
|
||||
- It won't work on your Kubernetes cluster!
|
||||
|
||||
(unless you're running an oooooold version of Kubernetes)
|
||||
|
||||
(and an equally oooooooold version of Traefik)
|
||||
|
||||
- We've left it here just as an example!
|
||||
|
||||
---
|
||||
|
||||
## Canary releases
|
||||
|
||||
- A *canary release* (or canary launch or canary deployment) is a release that will process only a small fraction of the workload
|
||||
|
||||
- After deploying the canary, we compare its metrics to the normal release
|
||||
|
||||
- If the metrics look good, the canary will progressively receive more traffic
|
||||
|
||||
(until it gets 100% and becomes the new normal release)
|
||||
|
||||
- If the metrics aren't good, the canary is automatically removed
|
||||
|
||||
- When we deploy a bad release, only a tiny fraction of traffic is affected
|
||||
|
||||
---
|
||||
|
||||
## Various ways to implement canary
|
||||
|
||||
- Example 1: canary for a microservice
|
||||
|
||||
- 1% of all requests (sampled randomly) are sent to the canary
|
||||
- the remaining 99% are sent to the normal release
|
||||
|
||||
- Example 2: canary for a web app
|
||||
|
||||
- 1% of users are sent to the canary web site
|
||||
- the remaining 99% are sent to the normal release
|
||||
|
||||
- Example 3: canary for shipping physical goods
|
||||
|
||||
- 1% of orders are shipped with the canary process
|
||||
- the remaining 99% are shipped with the normal process
|
||||
|
||||
- We're going to implement example 1 (per-request routing)
|
||||
|
||||
---
|
||||
|
||||
## Canary releases with Traefik v1
|
||||
|
||||
- We need to deploy the canary and expose it with a separate service
|
||||
|
||||
- Then, in the Ingress resource, we need:
|
||||
|
||||
- multiple `paths` entries (one for each service, canary and normal)
|
||||
|
||||
- an extra annotation indicating the weight of each service
|
||||
|
||||
- If we want, we can send requests to more than 2 services
|
||||
|
||||
---
|
||||
|
||||
## The Ingress resource
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: rgb
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/service-weights: |
|
||||
red: 50%
|
||||
green: 25%
|
||||
blue: 25%
|
||||
spec:
|
||||
rules:
|
||||
- host: rgb.`A.B.C.D`.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: red
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: green
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: blue
|
||||
servicePort: 80
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Other ingress controllers
|
||||
|
||||
*Just to illustrate how different things are ...*
|
||||
|
||||
- With the NGINX ingress controller:
|
||||
|
||||
- define two ingress ressources
|
||||
<br/>
|
||||
(specifying rules with the same host+path)
|
||||
|
||||
- add `nginx.ingress.kubernetes.io/canary` annotations on each
|
||||
|
||||
|
||||
- With Linkerd2:
|
||||
|
||||
- define two services
|
||||
|
||||
- define an extra service for the weighted aggregate of the two
|
||||
|
||||
- define a TrafficSplit (this is a CRD introduced by the SMI spec)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## We need more than that
|
||||
|
||||
What we saw is just one of the multiple building blocks that we need to achieve a canary release.
|
||||
|
||||
We also need:
|
||||
|
||||
- metrics (latency, performance ...) for our releases
|
||||
|
||||
- automation to alter canary weights
|
||||
|
||||
(increase canary weight if metrics look good; decrease otherwise)
|
||||
|
||||
- a mechanism to manage the lifecycle of the canary releases
|
||||
|
||||
(create them, promote them, delete them ...)
|
||||
|
||||
For inspiration, check [flagger by Weave](https://github.com/weaveworks/flagger).
|
||||
@@ -1,36 +1,34 @@
|
||||
# Exposing HTTP services with Ingress resources
|
||||
|
||||
- Service = layer 4 (TCP, UDP, SCTP)
|
||||
- HTTP services are typically exposed on port 80
|
||||
|
||||
- works with every TCP/UDP/SCTP protocol
|
||||
(and 443 for HTTPS)
|
||||
|
||||
- doesn't "see" or interpret HTTP
|
||||
- `NodePort` services are great, but they are *not* on port 80
|
||||
|
||||
- Ingress = layer 7 (HTTP)
|
||||
(by default, they use port range 30000-32767)
|
||||
|
||||
- only for HTTP
|
||||
|
||||
- can route requests depending on URI or host header
|
||||
|
||||
- can handle TLS
|
||||
- How can we get *many* HTTP services on port 80? 🤔
|
||||
|
||||
---
|
||||
|
||||
## Why should we use Ingress resources?
|
||||
## Various ways to expose something on port 80
|
||||
|
||||
A few use-cases:
|
||||
- Service with `type: LoadBalancer`
|
||||
|
||||
- URI routing (e.g. for single page apps)
|
||||
*costs a little bit of money; not always available*
|
||||
|
||||
`/api` → service `api:5000`
|
||||
- Service with one (or multiple) `ExternalIP`
|
||||
|
||||
everything else → service `static:80`
|
||||
*requires public nodes; limited by number of nodes*
|
||||
|
||||
- Cost optimization
|
||||
- Service with `hostPort` or `hostNetwork`
|
||||
|
||||
(because individual `LoadBalancer` services typically cost money)
|
||||
*same limitations as `ExternalIP`; even harder to manage*
|
||||
|
||||
- Automatic handling of TLS certificates
|
||||
- Ingress resources
|
||||
|
||||
*addresses all these limitations, yay!*
|
||||
|
||||
---
|
||||
|
||||
@@ -183,70 +181,20 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Accepting connections on port 80 (and 443)
|
||||
|
||||
- Web site users don't want to specify port numbers
|
||||
|
||||
(e.g. "connect to https://blahblah.whatever:31550")
|
||||
|
||||
- Our ingress controller needs to actually be exposed on port 80
|
||||
|
||||
(and 443 if we want to handle HTTPS)
|
||||
|
||||
- Let's see how we can achieve that!
|
||||
|
||||
---
|
||||
|
||||
## Various ways to expose something on port 80
|
||||
|
||||
- Service with `type: LoadBalancer`
|
||||
|
||||
*costs a little bit of money; not always available*
|
||||
|
||||
- Service with one (or multiple) `ExternalIP`
|
||||
|
||||
*requires public nodes; limited by number of nodes*
|
||||
|
||||
- Service with `hostPort` or `hostNetwork`
|
||||
|
||||
*same limitations as `ExternalIP`; even harder to manage*
|
||||
|
||||
---
|
||||
|
||||
## Deploying pods listening on port 80
|
||||
|
||||
- We are going to run Traefik in Pods with `hostNetwork: true`
|
||||
- We want our ingress load balancer to be available on port 80
|
||||
|
||||
(so that our load balancer can use the "real" port 80 of our nodes)
|
||||
- The best way to do that would be with a `LoadBalancer` service
|
||||
|
||||
- Traefik Pods will be created by a DaemonSet
|
||||
... but it requires support from the underlying infrastructure
|
||||
|
||||
(so that we get one instance of Traefik on every node of the cluster)
|
||||
- Instead, we are going to use the `hostNetwork` mode on the Traefik pods
|
||||
|
||||
- This means that we will be able to connect to any node of the cluster on port 80
|
||||
|
||||
.warning[This is not typical of a production setup!]
|
||||
- Let's see what this `hostNetwork` mode is about ...
|
||||
|
||||
---
|
||||
|
||||
## Doing it in production
|
||||
|
||||
- When running "on cloud", the easiest option is a `LoadBalancer` service
|
||||
|
||||
- When running "on prem", it depends:
|
||||
|
||||
- [MetalLB] is a good option if a pool of public IP addresses is available
|
||||
|
||||
- otherwise, using `externalIPs` on a few nodes (2-3 for redundancy)
|
||||
|
||||
- Many variations/optimizations are possible depending on our exact scenario!
|
||||
|
||||
[MetalLB]: https://metallb.org/
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Without `hostNetwork`
|
||||
|
||||
- Normally, each pod gets its own *network namespace*
|
||||
@@ -263,8 +211,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## With `hostNetwork: true`
|
||||
|
||||
- No network namespace gets created
|
||||
@@ -283,6 +229,26 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Other techniques to expose port 80
|
||||
|
||||
- We could use pods specifying `hostPort: 80`
|
||||
|
||||
... but with most CNI plugins, this [doesn't work or requires additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
|
||||
|
||||
- We could use a `NodePort` service
|
||||
|
||||
... but that requires [changing the `--service-node-port-range` flag in the API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/)
|
||||
|
||||
- We could create a service with an external IP
|
||||
|
||||
... this would work, but would require a few extra steps
|
||||
|
||||
(figuring out the IP address and adding it to the service)
|
||||
|
||||
---
|
||||
|
||||
## Running Traefik
|
||||
|
||||
- The [Traefik documentation][traefikdoc] recommends to use a Helm chart
|
||||
@@ -304,8 +270,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Taints and tolerations
|
||||
|
||||
- A *taint* is an attribute added to a node
|
||||
@@ -532,6 +496,10 @@ This is normal: we haven't provided any ingress rule yet.
|
||||
|
||||
## Creating ingress resources
|
||||
|
||||
- Before Kubernetes 1.19, we must use YAML manifests
|
||||
|
||||
(see example on next slide)
|
||||
|
||||
- Since Kubernetes 1.19, we can use `kubectl create ingress`
|
||||
|
||||
```bash
|
||||
@@ -566,21 +534,7 @@ This is normal: we haven't provided any ingress rule yet.
|
||||
|
||||
---
|
||||
|
||||
## Before Kubernetes 1.19
|
||||
|
||||
- Before Kubernetes 1.19:
|
||||
|
||||
- `kubectl create ingress` wasn't available
|
||||
|
||||
- `apiVersion: networking.k8s.io/v1` wasn't supported
|
||||
|
||||
- It was necessary to use YAML, and `apiVersion: networking.k8s.io/v1beta1`
|
||||
|
||||
(see example on next slide)
|
||||
|
||||
---
|
||||
|
||||
## YAML for old ingress resources
|
||||
## Ingress resources in YAML
|
||||
|
||||
Here is a minimal host-based ingress resource:
|
||||
|
||||
@@ -601,15 +555,23 @@ spec:
|
||||
|
||||
```
|
||||
|
||||
(It is in `k8s/ingress.yaml`.)
|
||||
|
||||
---
|
||||
|
||||
## YAML for new ingress resources
|
||||
class: extra-details
|
||||
|
||||
## Ingress API version
|
||||
|
||||
- The YAML on the previous slide uses `apiVersion: networking.k8s.io/v1beta1`
|
||||
|
||||
- Starting with Kubernetes 1.19, `networking.k8s.io/v1` is available
|
||||
|
||||
- And we can use `kubectl create ingress` 🎉
|
||||
- However, with Kubernetes 1.19 (and later), we can use `kubectl create ingress`
|
||||
|
||||
- We can see "modern" YAML with `-o yaml --dry-run=client`:
|
||||
- We chose to keep an "old" (deprecated!) YAML example for folks still using older versions of Kubernetes
|
||||
|
||||
- If we want to see "modern" YAML, we can use `-o yaml --dry-run=client`:
|
||||
|
||||
```bash
|
||||
kubectl create ingress red -o yaml --dry-run=client \
|
||||
@@ -679,6 +641,157 @@ class: extra-details
|
||||
|
||||
- It is still in alpha stage
|
||||
|
||||
---
|
||||
|
||||
## Vendor-specific example
|
||||
|
||||
- Let's see how to implement *canary releases*
|
||||
|
||||
- The example here will use Traefik v1
|
||||
|
||||
(which is obsolete)
|
||||
|
||||
- It won't work on your Kubernetes cluster!
|
||||
|
||||
(unless you're running an oooooold version of Kubernetes)
|
||||
|
||||
(and an equally oooooooold version of Traefik)
|
||||
|
||||
- We've left it here just as an example!
|
||||
|
||||
---
|
||||
|
||||
## Canary releases
|
||||
|
||||
- A *canary release* (or canary launch or canary deployment) is a release that will process only a small fraction of the workload
|
||||
|
||||
- After deploying the canary, we compare its metrics to the normal release
|
||||
|
||||
- If the metrics look good, the canary will progressively receive more traffic
|
||||
|
||||
(until it gets 100% and becomes the new normal release)
|
||||
|
||||
- If the metrics aren't good, the canary is automatically removed
|
||||
|
||||
- When we deploy a bad release, only a tiny fraction of traffic is affected
|
||||
|
||||
---
|
||||
|
||||
## Various ways to implement canary
|
||||
|
||||
- Example 1: canary for a microservice
|
||||
|
||||
- 1% of all requests (sampled randomly) are sent to the canary
|
||||
- the remaining 99% are sent to the normal release
|
||||
|
||||
- Example 2: canary for a web app
|
||||
|
||||
- 1% of users are sent to the canary web site
|
||||
- the remaining 99% are sent to the normal release
|
||||
|
||||
- Example 3: canary for shipping physical goods
|
||||
|
||||
- 1% of orders are shipped with the canary process
|
||||
- the remaining 99% are shipped with the normal process
|
||||
|
||||
- We're going to implement example 1 (per-request routing)
|
||||
|
||||
---
|
||||
|
||||
## Canary releases with Traefik v1
|
||||
|
||||
- We need to deploy the canary and expose it with a separate service
|
||||
|
||||
- Then, in the Ingress resource, we need:
|
||||
|
||||
- multiple `paths` entries (one for each service, canary and normal)
|
||||
|
||||
- an extra annotation indicating the weight of each service
|
||||
|
||||
- If we want, we can send requests to more than 2 services
|
||||
|
||||
---
|
||||
|
||||
## The Ingress resource
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: rgb
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/service-weights: |
|
||||
red: 50%
|
||||
green: 25%
|
||||
blue: 25%
|
||||
spec:
|
||||
rules:
|
||||
- host: rgb.`A.B.C.D`.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: red
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: green
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: blue
|
||||
servicePort: 80
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Other ingress controllers
|
||||
|
||||
*Just to illustrate how different things are ...*
|
||||
|
||||
- With the NGINX ingress controller:
|
||||
|
||||
- define two ingress ressources
|
||||
<br/>
|
||||
(specifying rules with the same host+path)
|
||||
|
||||
- add `nginx.ingress.kubernetes.io/canary` annotations on each
|
||||
|
||||
|
||||
- With Linkerd2:
|
||||
|
||||
- define two services
|
||||
|
||||
- define an extra service for the weighted aggregate of the two
|
||||
|
||||
- define a TrafficSplit (this is a CRD introduced by the SMI spec)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## We need more than that
|
||||
|
||||
What we saw is just one of the multiple building blocks that we need to achieve a canary release.
|
||||
|
||||
We also need:
|
||||
|
||||
- metrics (latency, performance ...) for our releases
|
||||
|
||||
- automation to alter canary weights
|
||||
|
||||
(increase canary weight if metrics look good; decrease otherwise)
|
||||
|
||||
- a mechanism to manage the lifecycle of the canary releases
|
||||
|
||||
(create them, promote them, delete them ...)
|
||||
|
||||
For inspiration, check [flagger by Weave](https://github.com/weaveworks/flagger).
|
||||
|
||||
???
|
||||
|
||||
:EN:- The Ingress resource
|
||||
|
||||
@@ -86,8 +86,8 @@
|
||||
|
||||
(This is inspired by the
|
||||
[uselessoperator](https://github.com/tilt-dev/uselessoperator)
|
||||
written by
|
||||
[V Körbes](https://twitter.com/veekorbes).
|
||||
written by
|
||||
[L Körbes](https://twitter.com/ellenkorbes).
|
||||
Highly recommend!💯)
|
||||
|
||||
---
|
||||
@@ -160,31 +160,34 @@ type MachineSpec struct {
|
||||
|
||||
We can use Go *marker comments* to give `controller-gen` extra details about how to handle our type, for instance:
|
||||
|
||||
```go
|
||||
//+kubebuilder:object:root=true
|
||||
```
|
||||
|
||||
→ top-level type exposed through API (as opposed to "member field of another type")
|
||||
|
||||
```go
|
||||
//+kubebuilder:subresource:status
|
||||
```
|
||||
|
||||
→ automatically generate a `status` subresource (very common with many types)
|
||||
|
||||
```go
|
||||
//+kubebuilder:printcolumn:JSONPath=".spec.switchPosition",name=Position,type=string
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.switchPosition",name=Position,type=string
|
||||
```
|
||||
|
||||
(See
|
||||
[marker syntax](https://book.kubebuilder.io/reference/markers.html),
|
||||
[CRD generation](https://book.kubebuilder.io/reference/markers/crd.html),
|
||||
[CRD validation](https://book.kubebuilder.io/reference/markers/crd-validation.html),
|
||||
[Object/DeepCopy](https://master.book.kubebuilder.io/reference/markers/object.html)
|
||||
[CRD validation](https://book.kubebuilder.io/reference/markers/crd-validation.html)
|
||||
)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using CRD v1
|
||||
|
||||
- By default, kubebuilder generates v1alpha1 CRDs
|
||||
|
||||
- If we want to generate v1 CRDs:
|
||||
|
||||
- edit `Makefile`
|
||||
|
||||
- update `crd:crdVersions=v1`
|
||||
|
||||
---
|
||||
|
||||
## Installing the CRD
|
||||
|
||||
After making these changes, we can run `make install`.
|
||||
@@ -205,7 +208,6 @@ Edit `config/samples/useless_v1alpha1_machine.yaml`:
|
||||
kind: Machine
|
||||
apiVersion: useless.container.training/v1alpha1
|
||||
metadata:
|
||||
labels: # ...
|
||||
name: machine-1
|
||||
spec:
|
||||
# Our useless operator will change that to "down"
|
||||
@@ -250,23 +252,20 @@ spec:
|
||||
|
||||
## Loading an object
|
||||
|
||||
Open `controllers/machine_controller.go`.
|
||||
|
||||
Add that code in the `Reconcile` method, at the `TODO(user)` location:
|
||||
Open `controllers/machine_controller.go` and add that code in the `Reconcile` method:
|
||||
|
||||
```go
|
||||
var machine uselessv1alpha1.Machine
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
if err := r.Get(ctx, req.NamespacedName, &machine); err != nil {
|
||||
logger.Info("error getting object")
|
||||
return ctrl.Result{}, err
|
||||
log.Info("error getting object")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info(
|
||||
"reconciling",
|
||||
"machine", req.NamespacedName,
|
||||
"switchPosition", machine.Spec.SwitchPosition,
|
||||
r.Log.Info(
|
||||
"reconciling",
|
||||
"machine", req.NamespaceName,
|
||||
"switchPosition", machine.Spec.SwitchPosition,
|
||||
)
|
||||
```
|
||||
|
||||
@@ -289,7 +288,7 @@ Then:
|
||||
|
||||
--
|
||||
|
||||
We get a bunch of errors and go stack traces! 🤔
|
||||
🤔
|
||||
|
||||
---
|
||||
|
||||
@@ -325,7 +324,7 @@ Let's try to update the machine like this:
|
||||
if machine.Spec.SwitchPosition != "down" {
|
||||
machine.Spec.SwitchPosition = "down"
|
||||
if err := r.Update(ctx, &machine); err != nil {
|
||||
logger.Info("error updating switch position")
|
||||
log.Info("error updating switch position")
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
}
|
||||
@@ -345,9 +344,9 @@ Again - update, `make run`, test.
|
||||
|
||||
(maybe with degraded behavior in the meantime)
|
||||
|
||||
- Status will almost always be a sub-resource, so that it can be updated separately
|
||||
- Status will almost always be a sub-resource
|
||||
|
||||
(and potentially with different permissions)
|
||||
(so that it can be updated separately "cheaply")
|
||||
|
||||
---
|
||||
|
||||
@@ -400,8 +399,8 @@ class: extra-details
|
||||
|
||||
## To requeue ...
|
||||
|
||||
`return ctrl.Result{RequeueAfter: 1 * time.Second}, nil`
|
||||
|
||||
`return ctrl.Result{RequeueAfter: 1 * time.Second}`
|
||||
|
||||
- That means: "try again in 1 second, and I will check if progress was made"
|
||||
|
||||
- This *does not* guarantee that we will be called exactly 1 second later:
|
||||
@@ -410,9 +409,7 @@ class: extra-details
|
||||
|
||||
- we might be called after (if the controller is busy with other objects)
|
||||
|
||||
- If we are waiting for another Kubernetes resource to change, there is a better way
|
||||
|
||||
(explained on next slide)
|
||||
- If we are waiting for another resource to change, there is an even better way!
|
||||
|
||||
---
|
||||
|
||||
@@ -420,41 +417,23 @@ class: extra-details
|
||||
|
||||
`return ctrl.Result{}, nil`
|
||||
|
||||
- That means: "we're done here!"
|
||||
- That means: "no need to set an alarm; we'll be notified some other way"
|
||||
|
||||
- This is also what we should use if we are waiting for another resource
|
||||
- Use this if we are waiting for another resource to update
|
||||
|
||||
(e.g. a LoadBalancer to be provisioned, a Pod to be ready...)
|
||||
|
||||
- In that case, we will need to set a *watch* (more on that later)
|
||||
|
||||
---
|
||||
|
||||
## Keeping track of state
|
||||
|
||||
- If we simply requeue the object to examine it 1 second later...
|
||||
|
||||
- ...We'll keep examining/requeuing it forever!
|
||||
|
||||
- We need to "remember" that we saw it (and when)
|
||||
|
||||
- Option 1: keep state in controller
|
||||
|
||||
(e.g. an internal `map`)
|
||||
|
||||
- Option 2: keep state in the object
|
||||
|
||||
(typically in its status field)
|
||||
|
||||
- Tradeoffs: concurrency / failover / control plane overhead...
|
||||
- For this to work, we need to set a *watch* (more on that later)
|
||||
|
||||
---
|
||||
|
||||
## "Improving" our controller, take 2
|
||||
|
||||
Let's store in the machine status the moment when we saw it:
|
||||
- Let's store in the machine status the moment when we saw it
|
||||
|
||||
```go
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.seenAt",name=Seen,type=date
|
||||
|
||||
type MachineStatus struct {
|
||||
// Time at which the machine was noticed by our controller.
|
||||
SeenAt *metav1.Time ``json:"seenAt,omitempty"``
|
||||
@@ -467,12 +446,6 @@ Note: `date` fields don't display timestamps in the future.
|
||||
|
||||
(That's why for this example it's simpler to use `seenAt` rather than `changeAt`.)
|
||||
|
||||
And for better visibility, add this along with the other `printcolumn` comments:
|
||||
|
||||
```go
|
||||
//+kubebuilder:printcolumn:JSONPath=".status.seenAt",name=Seen,type=date
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Set `seenAt`
|
||||
@@ -484,7 +457,7 @@ if machine.Status.SeenAt == nil {
|
||||
now := metav1.Now()
|
||||
machine.Status.SeenAt = &now
|
||||
if err := r.Status().Update(ctx, &machine); err != nil {
|
||||
logger.Info("error updating status.seenAt")
|
||||
log.Info("error updating status.seenAt")
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||
@@ -505,9 +478,8 @@ if machine.Spec.SwitchPosition != "down" {
|
||||
changeAt := machine.Status.SeenAt.Time.Add(5 * time.Second)
|
||||
if now.Time.After(changeAt) {
|
||||
machine.Spec.SwitchPosition = "down"
|
||||
machine.Status.SeenAt = nil
|
||||
if err := r.Update(ctx, &machine); err != nil {
|
||||
logger.Info("error updating switch position")
|
||||
log.Info("error updating switch position")
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
}
|
||||
@@ -524,33 +496,15 @@ if machine.Spec.SwitchPosition != "down" {
|
||||
|
||||
- We will now have two kinds of objects: machines, and switches
|
||||
|
||||
- Machines will store the number of switches in their spec
|
||||
|
||||
- Machines should have *at least* one switch, possibly *multiple ones*
|
||||
|
||||
- Our controller will automatically create switches if needed
|
||||
|
||||
(a bit like the ReplicaSet controller automatically creates Pods)
|
||||
|
||||
- The switches will be tied to their machine through a label
|
||||
|
||||
(let's pick `machine=name-of-the-machine`)
|
||||
|
||||
---
|
||||
|
||||
## Switch state
|
||||
|
||||
- The position of a switch will now be stored in the switch
|
||||
|
||||
(not in the machine like in the first scenario)
|
||||
- The position will now be stored in the switch, not the machine
|
||||
|
||||
- The machine will also expose the combined state of the switches
|
||||
|
||||
(through its status)
|
||||
- The switches will be tied to their machine through a label
|
||||
|
||||
- The machine's status will be automatically updated by the controller
|
||||
|
||||
(each time a switch is added/changed/removed)
|
||||
(See next slide for an example)
|
||||
|
||||
---
|
||||
|
||||
@@ -562,7 +516,7 @@ NAME SWITCHES POSITIONS
|
||||
machine-cz2vl 3 ddd
|
||||
machine-vf4xk 1 d
|
||||
|
||||
[jp@hex ~]$ kubectl get switches --show-labels
|
||||
[jp@hex ~]$ kubectl get switches --show-labels
|
||||
NAME POSITION SEEN LABELS
|
||||
switch-6wmjw down machine=machine-cz2vl
|
||||
switch-b8csg down machine=machine-cz2vl
|
||||
@@ -576,95 +530,39 @@ switch-rc59l down machine=machine-vf4xk
|
||||
|
||||
## Tasks
|
||||
|
||||
1. Create the new resource type (but don't create a controller)
|
||||
|
||||
2. Update `machine_types.go` and `switch_types.go`
|
||||
|
||||
3. Implement logic to display machine status (status of its switches)
|
||||
|
||||
4. Implement logic to automatically create switches
|
||||
|
||||
5. Implement logic to flip all switches down immediately
|
||||
|
||||
6. Then tweak it so that a given machine doesn't flip more than one switch every 5 seconds
|
||||
|
||||
*See next slides for detailed steps!*
|
||||
|
||||
---
|
||||
|
||||
## Creating the new type
|
||||
Create the new resource type (but don't create a controller):
|
||||
|
||||
```bash
|
||||
kubebuilder create api --group useless --version v1alpha1 --kind Switch
|
||||
```
|
||||
|
||||
Note: this time, only create a new custom resource; not a new controller.
|
||||
Update `machine_types.go` and `switch_types.go`.
|
||||
|
||||
Implement the logic so that the controller flips all switches down immediately.
|
||||
|
||||
Then change it so that a given machine doesn't flip more than one switch every 5 seconds.
|
||||
|
||||
See next slides for hints!
|
||||
|
||||
---
|
||||
|
||||
## Updating our types
|
||||
## Listing objects
|
||||
|
||||
- Move the "switch position" and "seen at" to the new `Switch` type
|
||||
We can use the `List` method with filters:
|
||||
|
||||
- Update the `Machine` type to have:
|
||||
```go
|
||||
var switches uselessv1alpha1.SwitchList
|
||||
|
||||
- `spec.switches` (Go type: `int`, JSON type: `integer`)
|
||||
if err := r.List(ctx, &switches,
|
||||
client.InNamespace(req.Namespace),
|
||||
client.MatchingLabels{"machine": req.Name},
|
||||
); err != nil {
|
||||
log.Error(err, "unable to list switches of the machine")
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
- `status.positions` of type `string`
|
||||
|
||||
- Bonus points for adding [CRD Validation](https://book.kubebuilder.io/reference/markers/crd-validation.html) to the numbers of switches!
|
||||
|
||||
- Then install the new CRDs with `make install`
|
||||
|
||||
- Create a Machine, and a Switch linked to the Machine (by setting the `machine` label)
|
||||
|
||||
---
|
||||
|
||||
## Listing switches
|
||||
|
||||
- Switches are associated to Machines with a label
|
||||
|
||||
(`kubectl label switch switch-xyz machine=machine-xyz`)
|
||||
|
||||
- We can retrieve associated switches like this:
|
||||
|
||||
```go
|
||||
var switches uselessv1alpha1.SwitchList
|
||||
|
||||
if err := r.List(ctx, &switches,
|
||||
client.InNamespace(req.Namespace),
|
||||
client.MatchingLabels{"machine": req.Name},
|
||||
); err != nil {
|
||||
logger.Error(err, "unable to list switches of the machine")
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
logger.Info("Found switches", "switches", switches)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Updating status
|
||||
|
||||
- Each time we reconcile a Machine, let's update its status:
|
||||
|
||||
```go
|
||||
status := ""
|
||||
for _, sw := range switches.Items {
|
||||
status += string(sw.Spec.Position[0])
|
||||
}
|
||||
machine.Status.Positions = status
|
||||
if err := r.Status().Update(ctx, &machine); err != nil {
|
||||
...
|
||||
```
|
||||
|
||||
- Run the controller and check that POSITIONS gets updated
|
||||
|
||||
- Add more switches linked to the same machine
|
||||
|
||||
- ...The POSITIONS don't get updated, unless we restart the controller
|
||||
|
||||
- We'll see later how to fix that!
|
||||
log.Info("Found switches", "switches", switches)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -692,28 +590,20 @@ if err := r.Create(ctx, &sw); err != nil { ...
|
||||
|
||||
---
|
||||
|
||||
## Create missing switches
|
||||
|
||||
- In our reconciler, if a machine doesn't have enough switches, create them!
|
||||
|
||||
- Option 1: directly create the number of missing switches
|
||||
|
||||
- Option 2: create only one switch (and rely on later requeuing)
|
||||
|
||||
- Note: option 2 won't quite work yet, since we haven't set up *watches* yet
|
||||
|
||||
---
|
||||
|
||||
## Watches
|
||||
|
||||
- Our controller doesn't react when switches are created/updated/deleted
|
||||
- Our controller will correctly flip switches when it starts
|
||||
|
||||
- It will also react to machine updates
|
||||
|
||||
- But it won't react if we directly touch the switches!
|
||||
|
||||
- By default, it only monitors machines, not switches
|
||||
|
||||
- We need to tell it to watch switches
|
||||
|
||||
- We also need to tell it how to map a switch to its machine
|
||||
|
||||
(so that the correct machine gets queued and reconciled when a switch is updated)
|
||||
|
||||
---
|
||||
|
||||
## Mapping a switch to its machine
|
||||
@@ -721,15 +611,16 @@ if err := r.Create(ctx, &sw); err != nil { ...
|
||||
Define the following helper function:
|
||||
|
||||
```go
|
||||
func (r *MachineReconciler) machineOfSwitch(obj client.Object) []ctrl.Request {
|
||||
return []ctrl.Request{
|
||||
ctrl.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: obj.GetLabels()["machine"],
|
||||
Namespace: obj.GetNamespace(),
|
||||
},
|
||||
},
|
||||
}
|
||||
func (r *MachineReconciler) machineOfSwitch(obj handler.MapObject) []ctrl.Request {
|
||||
r.Log.Debug("mos", "obj", obj)
|
||||
return []ctrl.Request{
|
||||
ctrl.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: obj.Meta.GetLabels()["machine"],
|
||||
Namespace: obj.Meta.GetNamespace(),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -740,46 +631,24 @@ func (r *MachineReconciler) machineOfSwitch(obj client.Object) []ctrl.Request {
|
||||
Update the `SetupWithManager` method in the controller:
|
||||
|
||||
```go
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *MachineReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&uselessv1alpha1.Machine{}).
|
||||
Owns(&uselessv1alpha1.Switch{}).
|
||||
Watches(
|
||||
&source.Kind{Type: &uselessv1alpha1.Switch{}},
|
||||
handler.EnqueueRequestsFromMapFunc(r.machineOfSwitch),
|
||||
).
|
||||
Complete(r)
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&uselessv1alpha1.Machine{}).
|
||||
Owns(&uselessv1alpha1.Switch{}).
|
||||
Watches(
|
||||
&source.Kind{Type: &uselessv1alpha1.Switch{}},
|
||||
&handler.EnqueueRequestsFromMapFunc{
|
||||
ToRequests: handler.ToRequestsFunc(r.machineOfSwitch),
|
||||
}).
|
||||
Complete(r)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ...And a few extra imports
|
||||
|
||||
Import the following packages referenced by the previous code:
|
||||
|
||||
```go
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
```
|
||||
|
||||
After this, when we update a switch, it should reflect on the machine.
|
||||
|
||||
(Try to change switch positions and see the machine status update!)
|
||||
After this, our controller should now react to switch changes.
|
||||
|
||||
---
|
||||
|
||||
## Flipping switches
|
||||
|
||||
- Now re-add logic to flip switches that are not in "down" position
|
||||
|
||||
- Re-add logic to wait a few seconds before flipping a switch
|
||||
|
||||
- Change the logic to toggle one switch per machine every few seconds
|
||||
|
||||
(i.e. don't change all the switches for a machine; move them one at a time)
|
||||
## Bonus points
|
||||
|
||||
- Handle "scale down" of a machine (by deleting extraneous switches)
|
||||
|
||||
@@ -791,25 +660,9 @@ After this, when we update a switch, it should reflect on the machine.
|
||||
|
||||
---
|
||||
|
||||
## Other possible improvements
|
||||
|
||||
- Formalize resource ownership
|
||||
|
||||
(by setting `ownerReferences` in the switches)
|
||||
|
||||
- This can simplify the watch mechanism a bit
|
||||
|
||||
- Allow to define a selector
|
||||
|
||||
(instead of using the hard-coded `machine` label)
|
||||
|
||||
- And much more!
|
||||
|
||||
---
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
- Useless Operator, by [V Körbes](https://twitter.com/veekorbes)
|
||||
- Useless Operator, by [L Körbes](https://twitter.com/ellenkorbes)
|
||||
|
||||
[code](https://github.com/tilt-dev/uselessoperator)
|
||||
|
|
||||
|
||||
@@ -141,7 +141,7 @@ class: extra-details
|
||||
|
||||
- There are external tools to address these shortcomings
|
||||
|
||||
(e.g.: [Stern](https://github.com/stern/stern))
|
||||
(e.g.: [Stern](https://github.com/wercker/stern))
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -18,108 +18,6 @@
|
||||
|
||||
---
|
||||
|
||||
## Running containers with open ports
|
||||
|
||||
- Since `ping` doesn't have anything to connect to, we'll have to run something else
|
||||
|
||||
- We are going to use `jpetazzo/color`, a tiny HTTP server written in Go
|
||||
|
||||
- `jpetazzo/color` listens on port 80
|
||||
|
||||
- It serves a page showing the pod's name
|
||||
|
||||
(this will be useful when checking load balancing behavior)
|
||||
|
||||
- We could also use the `nginx` official image instead
|
||||
|
||||
(but we wouldn't be able to tell the backends from each other)
|
||||
|
||||
---
|
||||
|
||||
## Running our HTTP server
|
||||
|
||||
- We will create a deployment with `kubectl create deployment`
|
||||
|
||||
- This will create a Pod running our HTTP server
|
||||
|
||||
.lab[
|
||||
|
||||
- Create a deployment named `blue`:
|
||||
```bash
|
||||
kubectl create deployment blue --image=jpetazzo/color
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Connecting to the HTTP server
|
||||
|
||||
- Let's connect to the HTTP server directly
|
||||
|
||||
(just to make sure everything works fine; we'll add the Service later)
|
||||
|
||||
.lab[
|
||||
|
||||
- Get the IP address of the Pod:
|
||||
```bash
|
||||
kubectl get pods -o wide
|
||||
```
|
||||
|
||||
- Send an HTTP request to the Pod:
|
||||
```bash
|
||||
curl http://`IP-ADDRESSS`
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
You should see a response from the Pod.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running with a local cluster
|
||||
|
||||
If you're running with a local cluster (Docker Desktop, KinD, minikube...),
|
||||
you might get a connection timeout (or a message like "no route to host")
|
||||
because the Pod isn't reachable directly from your local machine.
|
||||
|
||||
In that case, you can test the connection to the Pod by running a shell
|
||||
*inside* the cluster:
|
||||
|
||||
```bash
|
||||
kubectl run -it --rm my-test-pod --image=fedora
|
||||
```
|
||||
|
||||
Then run `curl` in that Pod.
|
||||
|
||||
---
|
||||
|
||||
## The Pod doesn't have a "stable identity"
|
||||
|
||||
- The IP address that we used above isn't "stable"
|
||||
|
||||
(if the Pod gets deleted, the replacement Pod will have a different address)
|
||||
|
||||
.lab[
|
||||
|
||||
- Check the IP addresses of running Pods:
|
||||
```bash
|
||||
watch kubectl get pods -o wide
|
||||
```
|
||||
|
||||
- Delete the Pod:
|
||||
```bash
|
||||
kubectl delete pod `blue-xxxxxxxx-yyyyy`
|
||||
```
|
||||
|
||||
- Check that the replacement Pod has a different IP address
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Services in a nutshell
|
||||
|
||||
- Services give us a *stable endpoint* to connect to a pod or a group of pods
|
||||
@@ -138,164 +36,6 @@ Then run `curl` in that Pod.
|
||||
|
||||
---
|
||||
|
||||
## Exposing our deployment
|
||||
|
||||
- Let's create a Service for our Deployment
|
||||
|
||||
.lab[
|
||||
|
||||
- Expose the HTTP port of our server:
|
||||
```bash
|
||||
kubectl expose deployment blue --port=80
|
||||
```
|
||||
|
||||
- Look up which IP address was allocated:
|
||||
```bash
|
||||
kubectl get service
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- By default, this created a `ClusterIP` service
|
||||
|
||||
(we'll discuss later the different types of services)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Services are layer 4 constructs
|
||||
|
||||
- Services can have IP addresses, but they are still *layer 4*
|
||||
|
||||
(i.e. a service is not just an IP address; it's an IP address + protocol + port)
|
||||
|
||||
- As a result: you *have to* indicate the port number for your service
|
||||
|
||||
(with some exceptions, like `ExternalName` or headless services, covered later)
|
||||
|
||||
---
|
||||
|
||||
## Testing our service
|
||||
|
||||
- We will now send a few HTTP requests to our Pod
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's obtain the IP address that was allocated for our service, *programmatically:*
|
||||
```bash
|
||||
CLUSTER_IP=$(kubectl get svc blue -o go-template='{{ .spec.clusterIP }}')
|
||||
```
|
||||
|
||||
<!--
|
||||
```hide kubectl wait deploy blue --for condition=available```
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
- Send a few requests:
|
||||
```bash
|
||||
for i in $(seq 10); do curl http://$CLUSTER_IP; done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## A *stable* endpoint
|
||||
|
||||
- Let's see what happens when the Pod has a problem
|
||||
|
||||
.lab[
|
||||
|
||||
- Keep sending requests to the Service address:
|
||||
```bash
|
||||
while sleep 0.3; do curl http://$CLUSTER_IP; done
|
||||
```
|
||||
|
||||
- Meanwhile, delete the Pod:
|
||||
```bash
|
||||
kubectl delete pod `blue-xxxxxxxx-yyyyy`
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- There might be a short interruption when we delete the pod...
|
||||
|
||||
- ...But requests will keep flowing after that (without requiring a manual intervention)
|
||||
|
||||
---
|
||||
|
||||
## Load balancing
|
||||
|
||||
- The Service will also act as a load balancer
|
||||
|
||||
(if there are multiple Pods in the Deployment)
|
||||
|
||||
.lab[
|
||||
|
||||
- Scale up the Deployment:
|
||||
```bash
|
||||
kubectl scale deployment blue --replicas=3
|
||||
```
|
||||
|
||||
- Send a bunch of requests to the Service:
|
||||
```bash
|
||||
for i in $(seq 20); do curl http://$CLUSTER_IP; done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- Our requests are load balanced across the Pods!
|
||||
|
||||
---
|
||||
|
||||
## DNS integration
|
||||
|
||||
- Kubernetes provides an internal DNS resolver
|
||||
|
||||
- The resolver maps service names to their internal addresses
|
||||
|
||||
- By default, this only works *inside Pods* (not from the nodes themselves)
|
||||
|
||||
.lab[
|
||||
|
||||
- Get a shell in a Pod:
|
||||
```bash
|
||||
kubectl run --rm -it --image=fedora test-dns-integration
|
||||
```
|
||||
|
||||
- Try to resolve the `blue` Service from the Pod:
|
||||
```bash
|
||||
curl blue
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Under the hood...
|
||||
|
||||
- Check the content of `/etc/resolv.conf` inside a Pod
|
||||
|
||||
- It will have `nameserver X.X.X.X` (e.g. 10.96.0.10)
|
||||
|
||||
- Now check `kubectl get service kube-dns --namespace=kube-system`
|
||||
|
||||
- ...It's the same address! 😉
|
||||
|
||||
- The FQDN of a service is actually:
|
||||
|
||||
`<service-name>.<namespace>.svc.<cluster-domain>`
|
||||
|
||||
- `<cluster-domain>` defaults to `cluster.local`
|
||||
|
||||
- And the `search` includes `<namespace>.svc.<cluster-domain>`
|
||||
|
||||
---
|
||||
|
||||
## Advantages of services
|
||||
|
||||
- We don't need to look up the IP address of the pod(s)
|
||||
@@ -314,10 +54,510 @@ class: extra-details
|
||||
|
||||
(when a pod fails, the service seamlessly sends traffic to its replacement)
|
||||
|
||||
---
|
||||
|
||||
## Many kinds and flavors of service
|
||||
|
||||
- There are different types of services:
|
||||
|
||||
`ClusterIP`, `NodePort`, `LoadBalancer`, `ExternalName`
|
||||
|
||||
- There are also *headless services*
|
||||
|
||||
- Services can also have optional *external IPs*
|
||||
|
||||
- There is also another resource type called *Ingress*
|
||||
|
||||
(specifically for HTTP services)
|
||||
|
||||
- Wow, that's a lot! Let's start with the basics ...
|
||||
|
||||
---
|
||||
|
||||
## `ClusterIP`
|
||||
|
||||
- It's the default service type
|
||||
|
||||
- A virtual IP address is allocated for the service
|
||||
|
||||
(in an internal, private range; e.g. 10.96.0.0/12)
|
||||
|
||||
- This IP address is reachable only from within the cluster (nodes and pods)
|
||||
|
||||
- Our code can connect to the service using the original port number
|
||||
|
||||
- Perfect for internal communication, within the cluster
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
## `LoadBalancer`
|
||||
|
||||
- An external load balancer is allocated for the service
|
||||
|
||||
(typically a cloud load balancer, e.g. ELB on AWS, GLB on GCE ...)
|
||||
|
||||
- This is available only when the underlying infrastructure provides some kind of
|
||||
"load balancer as a service"
|
||||
|
||||
- Each service of that type will typically cost a little bit of money
|
||||
|
||||
(e.g. a few cents per hour on AWS or GCE)
|
||||
|
||||
- Ideally, traffic would flow directly from the load balancer to the pods
|
||||
|
||||
- In practice, it will often flow through a `NodePort` first
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
## `NodePort`
|
||||
|
||||
- A port number is allocated for the service
|
||||
|
||||
(by default, in the 30000-32767 range)
|
||||
|
||||
- That port is made available *on all our nodes* and anybody can connect to it
|
||||
|
||||
(we can connect to any node on that port to reach the service)
|
||||
|
||||
- Our code needs to be changed to connect to that new port number
|
||||
|
||||
- Under the hood: `kube-proxy` sets up a bunch of `iptables` rules on our nodes
|
||||
|
||||
- Sometimes, it's the only available option for external traffic
|
||||
|
||||
(e.g. most clusters deployed with kubeadm or on-premises)
|
||||
|
||||
---
|
||||
|
||||
## Running containers with open ports
|
||||
|
||||
- Since `ping` doesn't have anything to connect to, we'll have to run something else
|
||||
|
||||
- We could use the `nginx` official image, but ...
|
||||
|
||||
... we wouldn't be able to tell the backends from each other!
|
||||
|
||||
- We are going to use `jpetazzo/color`, a tiny HTTP server written in Go
|
||||
|
||||
- `jpetazzo/color` listens on port 80
|
||||
|
||||
- It serves a page showing the pod's name
|
||||
|
||||
(this will be useful when checking load balancing behavior)
|
||||
|
||||
---
|
||||
|
||||
## Creating a deployment for our HTTP server
|
||||
|
||||
- We will create a deployment with `kubectl create deployment`
|
||||
|
||||
- Then we will scale it with `kubectl scale`
|
||||
|
||||
.lab[
|
||||
|
||||
- In another window, watch the pods (to see when they are created):
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait NAME```
|
||||
```tmux split-pane -h```
|
||||
-->
|
||||
|
||||
- Create a deployment for this very lightweight HTTP server:
|
||||
```bash
|
||||
kubectl create deployment blue --image=jpetazzo/color
|
||||
```
|
||||
|
||||
- Scale it to 10 replicas:
|
||||
```bash
|
||||
kubectl scale deployment blue --replicas=10
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Exposing our deployment
|
||||
|
||||
- We'll create a default `ClusterIP` service
|
||||
|
||||
.lab[
|
||||
|
||||
- Expose the HTTP port of our server:
|
||||
```bash
|
||||
kubectl expose deployment blue --port=80
|
||||
```
|
||||
|
||||
- Look up which IP address was allocated:
|
||||
```bash
|
||||
kubectl get service
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Services are layer 4 constructs
|
||||
|
||||
- You can assign IP addresses to services, but they are still *layer 4*
|
||||
|
||||
(i.e. a service is not an IP address; it's an IP address + protocol + port)
|
||||
|
||||
- This is caused by the current implementation of `kube-proxy`
|
||||
|
||||
(it relies on mechanisms that don't support layer 3)
|
||||
|
||||
- As a result: you *have to* indicate the port number for your service
|
||||
|
||||
(with some exceptions, like `ExternalName` or headless services, covered later)
|
||||
|
||||
---
|
||||
|
||||
## Testing our service
|
||||
|
||||
- We will now send a few HTTP requests to our pods
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's obtain the IP address that was allocated for our service, *programmatically:*
|
||||
```bash
|
||||
IP=$(kubectl get svc blue -o go-template --template '{{ .spec.clusterIP }}')
|
||||
```
|
||||
|
||||
<!--
|
||||
```hide kubectl wait deploy blue --for condition=available```
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
- Send a few requests:
|
||||
```bash
|
||||
curl http://$IP:80/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
Try it a few times! Our requests are load balanced across multiple pods.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `ExternalName`
|
||||
|
||||
- Services of type `ExternalName` are quite different
|
||||
|
||||
- No load balancer (internal or external) is created
|
||||
|
||||
- Only a DNS entry gets added to the DNS managed by Kubernetes
|
||||
|
||||
- That DNS entry will just be a `CNAME` to a provided record
|
||||
|
||||
Example:
|
||||
```bash
|
||||
kubectl create service externalname k8s --external-name kubernetes.io
|
||||
```
|
||||
*Creates a CNAME `k8s` pointing to `kubernetes.io`*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## External IPs
|
||||
|
||||
- We can add an External IP to a service, e.g.:
|
||||
```bash
|
||||
kubectl expose deploy my-little-deploy --port=80 --external-ip=1.2.3.4
|
||||
```
|
||||
|
||||
- `1.2.3.4` should be the address of one of our nodes
|
||||
|
||||
(it could also be a virtual address, service address, or VIP, shared by multiple nodes)
|
||||
|
||||
- Connections to `1.2.3.4:80` will be sent to our service
|
||||
|
||||
- External IPs will also show up on services of type `LoadBalancer`
|
||||
|
||||
(they will be added automatically by the process provisioning the load balancer)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Headless services
|
||||
|
||||
- Sometimes, we want to access our scaled services directly:
|
||||
|
||||
- if we want to save a tiny little bit of latency (typically less than 1ms)
|
||||
|
||||
- if we need to connect over arbitrary ports (instead of a few fixed ones)
|
||||
|
||||
- if we need to communicate over another protocol than UDP or TCP
|
||||
|
||||
- if we want to decide how to balance the requests client-side
|
||||
|
||||
- ...
|
||||
|
||||
- In that case, we can use a "headless service"
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Creating a headless services
|
||||
|
||||
- A headless service is obtained by setting the `clusterIP` field to `None`
|
||||
|
||||
(Either with `--cluster-ip=None`, or by providing a custom YAML)
|
||||
|
||||
- As a result, the service doesn't have a virtual IP address
|
||||
|
||||
- Since there is no virtual IP address, there is no load balancer either
|
||||
|
||||
- CoreDNS will return the pods' IP addresses as multiple `A` records
|
||||
|
||||
- This gives us an easy way to discover all the replicas for a deployment
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Services and endpoints
|
||||
|
||||
- A service has a number of "endpoints"
|
||||
|
||||
- Each endpoint is a host + port where the service is available
|
||||
|
||||
- The endpoints are maintained and updated automatically by Kubernetes
|
||||
|
||||
.lab[
|
||||
|
||||
- Check the endpoints that Kubernetes has associated with our `blue` service:
|
||||
```bash
|
||||
kubectl describe service blue
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
In the output, there will be a line starting with `Endpoints:`.
|
||||
|
||||
That line will list a bunch of addresses in `host:port` format.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Viewing endpoint details
|
||||
|
||||
- When we have many endpoints, our display commands truncate the list
|
||||
```bash
|
||||
kubectl get endpoints
|
||||
```
|
||||
|
||||
- If we want to see the full list, we can use one of the following commands:
|
||||
```bash
|
||||
kubectl describe endpoints blue
|
||||
kubectl get endpoints blue -o yaml
|
||||
```
|
||||
|
||||
- These commands will show us a list of IP addresses
|
||||
|
||||
- These IP addresses should match the addresses of the corresponding pods:
|
||||
```bash
|
||||
kubectl get pods -l app=blue -o wide
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `endpoints` not `endpoint`
|
||||
|
||||
- `endpoints` is the only resource that cannot be singular
|
||||
|
||||
```bash
|
||||
$ kubectl get endpoint
|
||||
error: the server doesn't have a resource type "endpoint"
|
||||
```
|
||||
|
||||
- This is because the type itself is plural (unlike every other resource)
|
||||
|
||||
- There is no `endpoint` object: `type Endpoints struct`
|
||||
|
||||
- The type doesn't represent a single endpoint, but a list of endpoints
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The DNS zone
|
||||
|
||||
- In the `kube-system` namespace, there should be a service named `kube-dns`
|
||||
|
||||
- This is the internal DNS server that can resolve service names
|
||||
|
||||
- The default domain name for the service we created is `default.svc.cluster.local`
|
||||
|
||||
.lab[
|
||||
|
||||
- Get the IP address of the internal DNS server:
|
||||
```bash
|
||||
IP=$(kubectl -n kube-system get svc kube-dns -o jsonpath={.spec.clusterIP})
|
||||
```
|
||||
|
||||
- Resolve the cluster IP for the `blue` service:
|
||||
```bash
|
||||
host blue.default.svc.cluster.local $IP
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `Ingress`
|
||||
|
||||
- Ingresses are another type (kind) of resource
|
||||
|
||||
- They are specifically for HTTP services
|
||||
|
||||
(not TCP or UDP)
|
||||
|
||||
- They can also handle TLS certificates, URL rewriting ...
|
||||
|
||||
- They require an *Ingress Controller* to function
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
???
|
||||
|
||||
:EN:- Accessing pods through services
|
||||
:EN:- Service discovery and load balancing
|
||||
:EN:- Accessing pods through services
|
||||
:EN:- Service types: ClusterIP, NodePort, LoadBalancer
|
||||
|
||||
:FR:- Exposer un service
|
||||
:FR:- Le DNS interne de Kubernetes et la *service discovery*
|
||||
:FR:- Différents types de services : ClusterIP, NodePort, LoadBalancer
|
||||
:FR:- Utiliser CoreDNS pour la *service discovery*
|
||||
|
||||
@@ -170,9 +170,9 @@ def hash_bytes(data):
|
||||
headers={"Content-Type": "application/octet-stream"})
|
||||
```
|
||||
|
||||
(Feel free to check the [full source code][dockercoins-worker-code] of the worker!)
|
||||
|
||||
[dockercoins-worker-code]: https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17
|
||||
(Full source code available [here](
|
||||
https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17
|
||||
))
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ configMapGenerator:
|
||||
|
||||
- overlays can only *add* resources, not *remove* them
|
||||
|
||||
- See the full list of [eschewed features](https://kubectl.docs.kubernetes.io/faq/kustomize/eschewedfeatures/) for more details
|
||||
- See the full list of [eschewed features](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/eschewedFeatures.md) for more details
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
|
||||
- Install Kyverno:
|
||||
```bash
|
||||
kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/release-1.7/config/release/install.yaml
|
||||
kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/release-1.5/definitions/release/install.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -302,35 +302,23 @@
|
||||
|
||||
---
|
||||
|
||||
## Comparing "old" and "new"
|
||||
|
||||
- The fields of the webhook payload are available through `{{ request }}`
|
||||
|
||||
- For UPDATE requests, we can access:
|
||||
|
||||
`{{ request.oldObject }}` → the object as it is right now (before the request)
|
||||
|
||||
`{{ request.object }}` → the object with the changes made by the request
|
||||
|
||||
---
|
||||
|
||||
## Missing labels
|
||||
## Invalid references
|
||||
|
||||
- We can access the `color` label through `{{ request.object.metadata.labels.color }}`
|
||||
|
||||
- If we reference a label (or any field) that doesn't exist, the policy fails
|
||||
|
||||
(with an error similar to `JMESPAth query failed: Unknown key ... in path`)
|
||||
- Except in *preconditions*: it then evaluates to an empty string
|
||||
|
||||
- To work around that, [use an OR expression][non-existence-checks]:
|
||||
- We use a *precondition* to makes sure the label exists in both "old" and "new" objects
|
||||
|
||||
`{{ requests.object.metadata.labels.color || '' }}`
|
||||
- Then in the *deny* block we can compare the old and new values
|
||||
|
||||
- Note that in older versions of Kyverno, this wasn't always necessary
|
||||
(and reject changes)
|
||||
|
||||
(e.g. in *preconditions*, a missing label would evalute to an empty string)
|
||||
- "Old" and "new" versions of the pod can be referenced through
|
||||
|
||||
[non-existence-checks]: https://kyverno.io/docs/writing-policies/jmespath/#non-existence-checks
|
||||
`{{ request.oldObject }}` and `{{ request.object }}`
|
||||
|
||||
---
|
||||
|
||||
@@ -566,28 +554,6 @@ Note: the `apiVersion` field appears to be optional.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Managing `ownerReferences`
|
||||
|
||||
- By default, the generated object and triggering object have independent lifecycles
|
||||
|
||||
(deleting the triggering object doesn't affect the generated object)
|
||||
|
||||
- It is possible to associate the generated object with the triggering object
|
||||
|
||||
(so that deleting the triggering object also deletes the generated object)
|
||||
|
||||
- This is done by adding the triggering object information to `ownerReferences`
|
||||
|
||||
(in the generated object `metadata`)
|
||||
|
||||
- See [Linking resources with ownerReferences][ownerref] for an example
|
||||
|
||||
[ownerref]: https://kyverno.io/docs/writing-policies/generate/#linking-resources-with-ownerreferences
|
||||
|
||||
---
|
||||
|
||||
## Asynchronous creation
|
||||
|
||||
- Kyverno creates resources asynchronously
|
||||
@@ -606,7 +572,7 @@ class: extra-details
|
||||
|
||||
## Footprint
|
||||
|
||||
- 8 CRDs
|
||||
- 7 CRDs
|
||||
|
||||
- 5 webhooks
|
||||
|
||||
|
||||
@@ -69,14 +69,12 @@ Exactly what we need!
|
||||
|
||||
(no dependencies, extra libraries to install, etc)
|
||||
|
||||
- Binary releases are available [on GitHub][stern-releases]
|
||||
- Binary releases are available [here](https://github.com/stern/stern/releases) on GitHub
|
||||
|
||||
- Stern is also available through most package managers
|
||||
|
||||
(e.g. on macOS, we can `brew install stern` or `sudo port install stern`)
|
||||
|
||||
[stern-releases]: https://github.com/stern/stern/releases
|
||||
|
||||
---
|
||||
|
||||
## Using Stern
|
||||
|
||||
@@ -256,9 +256,9 @@ class: extra-details
|
||||
|
||||
- or stored in the node's `spec.podCIDR` field
|
||||
|
||||
.footnote[See [here][kubenet-plugin] for more details about this `kubenet` plugin.]
|
||||
.footnote[See [here] for more details about this `kubenet` plugin.]
|
||||
|
||||
[kubenet-plugin]: https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#kubenet
|
||||
[here]: https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#kubenet
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Writing a tiny operator
|
||||
# Writing an tiny operator
|
||||
|
||||
- Let's look at a simple operator
|
||||
|
||||
|
||||
@@ -14,20 +14,32 @@
|
||||
|
||||
- CPU is a *compressible resource*
|
||||
|
||||
- it can be preempted immediately without adverse effect
|
||||
|
||||
- if we have N CPU and need 2N, we run at 50% speed
|
||||
(it can be preempted immediately without adverse effect)
|
||||
|
||||
- Memory is an *incompressible resource*
|
||||
|
||||
- it needs to be swapped out to be reclaimed; and this is costly
|
||||
|
||||
- if we have N GB RAM and need 2N, we might run at... 0.1% speed!
|
||||
(it needs to be swapped out to be reclaimed; and this is costly)
|
||||
|
||||
- As a result, exceeding limits will have different consequences for CPU and memory
|
||||
|
||||
---
|
||||
|
||||
## Exceeding CPU limits
|
||||
|
||||
- CPU can be reclaimed instantaneously
|
||||
|
||||
(in fact, it is preempted hundreds of times per second, at each context switch)
|
||||
|
||||
- If a container uses too much CPU, it can be throttled
|
||||
|
||||
(it will be scheduled less often)
|
||||
|
||||
- The processes in that container will run slower
|
||||
|
||||
(or rather: they will not run faster)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## CPU limits implementation details
|
||||
@@ -134,59 +146,39 @@ For more details, check [this blog post](https://erickhun.com/posts/kubernetes-f
|
||||
|
||||
---
|
||||
|
||||
## Running low on memory
|
||||
## Exceeding memory limits
|
||||
|
||||
- When the system runs low on memory, it starts to reclaim used memory
|
||||
- Memory needs to be swapped out before being reclaimed
|
||||
|
||||
(we talk about "memory pressure")
|
||||
- "Swapping" means writing memory pages to disk, which is very slow
|
||||
|
||||
- Option 1: free up some buffers and caches
|
||||
- On a classic system, a process that swaps can get 1000x slower
|
||||
|
||||
(fastest option; might affect performance if cache memory runs very low)
|
||||
(because disk I/O is 1000x slower than memory I/O)
|
||||
|
||||
- Option 2: swap, i.e. write to disk some memory of one process to give it to another
|
||||
- Exceeding the memory limit (even by a small amount) can reduce performance *a lot*
|
||||
|
||||
(can have a huge negative impact on performance because disks are slow)
|
||||
- Kubernetes *does not support swap* (more on that later!)
|
||||
|
||||
- Option 3: terminate a process and reclaim all its memory
|
||||
|
||||
(OOM or Out Of Memory Killer on Linux)
|
||||
- Exceeding the memory limit will cause the container to be killed
|
||||
|
||||
---
|
||||
|
||||
## Memory limits on Kubernetes
|
||||
## Limits vs requests
|
||||
|
||||
- Kubernetes *does not support swap*
|
||||
|
||||
(but it may support it in the future, thanks to [KEP 2400])
|
||||
|
||||
- If a container exceeds its memory *limit*, it gets killed immediately
|
||||
|
||||
- If a node is overcommitted and under memory pressure, it will terminate some pods
|
||||
|
||||
(see next slide for some details about what "overcommit" means here!)
|
||||
|
||||
[KEP 2400]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2400-node-swap/README.md#implementation-history
|
||||
|
||||
---
|
||||
|
||||
## Overcommitting resources
|
||||
|
||||
- *Limits* are "hard limits" (a container *cannot* exceed its limits)
|
||||
- Limits are "hard limits" (they can't be exceeded)
|
||||
|
||||
- a container exceeding its memory limit is killed
|
||||
|
||||
- a container exceeding its CPU limit is throttled
|
||||
|
||||
- On a given node, the sum of pod *limits* can be higher than the node size
|
||||
- Requests are used for scheduling purposes
|
||||
|
||||
- *Requests* are used for scheduling purposes
|
||||
- a container using *less* than what it requested will never be killed or throttled
|
||||
|
||||
- a container can use more than its requested CPU or RAM amounts
|
||||
- the scheduler uses the requested sizes to determine placement
|
||||
|
||||
- a container using *less* than what it requested should never be killed or throttled
|
||||
|
||||
- On a given node, the sum of pod *requests* cannot be higher than the node size
|
||||
- the resources requested by all pods on a node will never exceed the node size
|
||||
|
||||
---
|
||||
|
||||
@@ -230,31 +222,9 @@ Each pod is assigned a QoS class (visible in `status.qosClass`).
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
## Where is my swap?
|
||||
|
||||
## CPU and RAM reservation
|
||||
|
||||
- Kubernetes passes resources requests and limits to the container engine
|
||||
|
||||
- The container engine applies these requests and limits with specific mechanisms
|
||||
|
||||
- Example: on Linux, this is typically done with control groups aka cgroups
|
||||
|
||||
- Most systems use cgroups v1, but cgroups v2 are slowly being rolled out
|
||||
|
||||
(e.g. available in Ubuntu 22.04 LTS)
|
||||
|
||||
- Cgroups v2 have new, interesting features for memory control:
|
||||
|
||||
- ability to set "minimum" memory amounts (to effectively reserve memory)
|
||||
|
||||
- better control on the amount of swap used by a container
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's the deal with swap?
|
||||
- The semantics of memory and swap limits on Linux cgroups are complex
|
||||
|
||||
- With cgroups v1, it's not possible to disable swap for a cgroup
|
||||
|
||||
@@ -268,8 +238,6 @@ class: extra-details
|
||||
|
||||
- The simplest solution was to disable swap entirely
|
||||
|
||||
- Kubelet will refuse to start if it detects that swap is enabled!
|
||||
|
||||
---
|
||||
|
||||
## Alternative point of view
|
||||
@@ -300,7 +268,7 @@ class: extra-details
|
||||
|
||||
- You will need to add the flag `--fail-swap-on=false` to kubelet
|
||||
|
||||
(remember: it won't otherwise start if it detects that swap is enabled)
|
||||
(otherwise, it won't start!)
|
||||
|
||||
---
|
||||
|
||||
@@ -698,18 +666,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Underutilization
|
||||
|
||||
- Remember: when assigning a pod to a node, the scheduler looks at *requests*
|
||||
|
||||
(not at current utilization on the node)
|
||||
|
||||
- If pods request resources but don't use them, this can lead to underutilization
|
||||
|
||||
(because the scheduler will consider that the node is full and can't fit new pods)
|
||||
|
||||
---
|
||||
|
||||
## Viewing a namespace limits and quotas
|
||||
|
||||
- `kubectl describe namespace` will display resource limits and quotas
|
||||
|
||||
@@ -1,359 +0,0 @@
|
||||
# Service Types
|
||||
|
||||
- There are different types of services:
|
||||
|
||||
`ClusterIP`, `NodePort`, `LoadBalancer`, `ExternalName`
|
||||
|
||||
- There are also *headless services*
|
||||
|
||||
- Services can also have optional *external IPs*
|
||||
|
||||
- There is also another resource type called *Ingress*
|
||||
|
||||
(specifically for HTTP services)
|
||||
|
||||
- Wow, that's a lot! Let's start with the basics ...
|
||||
|
||||
---
|
||||
|
||||
## `ClusterIP`
|
||||
|
||||
- It's the default service type
|
||||
|
||||
- A virtual IP address is allocated for the service
|
||||
|
||||
(in an internal, private range; e.g. 10.96.0.0/12)
|
||||
|
||||
- This IP address is reachable only from within the cluster (nodes and pods)
|
||||
|
||||
- Our code can connect to the service using the original port number
|
||||
|
||||
- Perfect for internal communication, within the cluster
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
## `LoadBalancer`
|
||||
|
||||
- An external load balancer is allocated for the service
|
||||
|
||||
(typically a cloud load balancer, e.g. ELB on AWS, GLB on GCE ...)
|
||||
|
||||
- This is available only when the underlying infrastructure provides some kind of
|
||||
"load balancer as a service"
|
||||
|
||||
- Each service of that type will typically cost a little bit of money
|
||||
|
||||
(e.g. a few cents per hour on AWS or GCE)
|
||||
|
||||
- Ideally, traffic would flow directly from the load balancer to the pods
|
||||
|
||||
- In practice, it will often flow through a `NodePort` first
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
## `NodePort`
|
||||
|
||||
- A port number is allocated for the service
|
||||
|
||||
(by default, in the 30000-32767 range)
|
||||
|
||||
- That port is made available *on all our nodes* and anybody can connect to it
|
||||
|
||||
(we can connect to any node on that port to reach the service)
|
||||
|
||||
- Our code needs to be changed to connect to that new port number
|
||||
|
||||
- Under the hood: `kube-proxy` sets up a bunch of `iptables` rules on our nodes
|
||||
|
||||
- Sometimes, it's the only available option for external traffic
|
||||
|
||||
(e.g. most clusters deployed with kubeadm or on-premises)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `ExternalName`
|
||||
|
||||
- Services of type `ExternalName` are quite different
|
||||
|
||||
- No load balancer (internal or external) is created
|
||||
|
||||
- Only a DNS entry gets added to the DNS managed by Kubernetes
|
||||
|
||||
- That DNS entry will just be a `CNAME` to a provided record
|
||||
|
||||
Example:
|
||||
```bash
|
||||
kubectl create service externalname k8s --external-name kubernetes.io
|
||||
```
|
||||
*Creates a CNAME `k8s` pointing to `kubernetes.io`*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## External IPs
|
||||
|
||||
- We can add an External IP to a service, e.g.:
|
||||
```bash
|
||||
kubectl expose deploy my-little-deploy --port=80 --external-ip=1.2.3.4
|
||||
```
|
||||
|
||||
- `1.2.3.4` should be the address of one of our nodes
|
||||
|
||||
(it could also be a virtual address, service address, or VIP, shared by multiple nodes)
|
||||
|
||||
- Connections to `1.2.3.4:80` will be sent to our service
|
||||
|
||||
- External IPs will also show up on services of type `LoadBalancer`
|
||||
|
||||
(they will be added automatically by the process provisioning the load balancer)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Headless services
|
||||
|
||||
- Sometimes, we want to access our scaled services directly:
|
||||
|
||||
- if we want to save a tiny little bit of latency (typically less than 1ms)
|
||||
|
||||
- if we need to connect over arbitrary ports (instead of a few fixed ones)
|
||||
|
||||
- if we need to communicate over another protocol than UDP or TCP
|
||||
|
||||
- if we want to decide how to balance the requests client-side
|
||||
|
||||
- ...
|
||||
|
||||
- In that case, we can use a "headless service"
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Creating a headless services
|
||||
|
||||
- A headless service is obtained by setting the `clusterIP` field to `None`
|
||||
|
||||
(Either with `--cluster-ip=None`, or by providing a custom YAML)
|
||||
|
||||
- As a result, the service doesn't have a virtual IP address
|
||||
|
||||
- Since there is no virtual IP address, there is no load balancer either
|
||||
|
||||
- CoreDNS will return the pods' IP addresses as multiple `A` records
|
||||
|
||||
- This gives us an easy way to discover all the replicas for a deployment
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Services and endpoints
|
||||
|
||||
- A service has a number of "endpoints"
|
||||
|
||||
- Each endpoint is a host + port where the service is available
|
||||
|
||||
- The endpoints are maintained and updated automatically by Kubernetes
|
||||
|
||||
.lab[
|
||||
|
||||
- Check the endpoints that Kubernetes has associated with our `blue` service:
|
||||
```bash
|
||||
kubectl describe service blue
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
In the output, there will be a line starting with `Endpoints:`.
|
||||
|
||||
That line will list a bunch of addresses in `host:port` format.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Viewing endpoint details
|
||||
|
||||
- When we have many endpoints, our display commands truncate the list
|
||||
```bash
|
||||
kubectl get endpoints
|
||||
```
|
||||
|
||||
- If we want to see the full list, we can use one of the following commands:
|
||||
```bash
|
||||
kubectl describe endpoints blue
|
||||
kubectl get endpoints blue -o yaml
|
||||
```
|
||||
|
||||
- These commands will show us a list of IP addresses
|
||||
|
||||
- These IP addresses should match the addresses of the corresponding pods:
|
||||
```bash
|
||||
kubectl get pods -l app=blue -o wide
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `endpoints` not `endpoint`
|
||||
|
||||
- `endpoints` is the only resource that cannot be singular
|
||||
|
||||
```bash
|
||||
$ kubectl get endpoint
|
||||
error: the server doesn't have a resource type "endpoint"
|
||||
```
|
||||
|
||||
- This is because the type itself is plural (unlike every other resource)
|
||||
|
||||
- There is no `endpoint` object: `type Endpoints struct`
|
||||
|
||||
- The type doesn't represent a single endpoint, but a list of endpoints
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `Ingress`
|
||||
|
||||
- Ingresses are another type (kind) of resource
|
||||
|
||||
- They are specifically for HTTP services
|
||||
|
||||
(not TCP or UDP)
|
||||
|
||||
- They can also handle TLS certificates, URL rewriting ...
|
||||
|
||||
- They require an *Ingress Controller* to function
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
???
|
||||
|
||||
:EN:- Service types: ClusterIP, NodePort, LoadBalancer
|
||||
|
||||
:FR:- Différents types de services : ClusterIP, NodePort, LoadBalancer
|
||||
@@ -18,37 +18,15 @@
|
||||
|
||||
---
|
||||
|
||||
### CoLiMa
|
||||
|
||||
- Container runtimes for LiMa
|
||||
|
||||
(LiMa = Linux on macOS)
|
||||
|
||||
- For macOS only (Intel and ARM architectures)
|
||||
|
||||
- CLI-driven (no GUI like Docker/Rancher Desktop)
|
||||
|
||||
- Supports containerd, Docker, Kubernetes
|
||||
|
||||
- Installable with brew, nix, or ports
|
||||
|
||||
- More info: https://github.com/abiosoft/colima
|
||||
|
||||
---
|
||||
|
||||
## Docker Desktop
|
||||
|
||||
- Available on Linux, Mac, and Windows
|
||||
|
||||
- Free for personal use and small businesses
|
||||
|
||||
(less than 250 employees and less than $10 millions in annual revenue)
|
||||
- Available on Mac and Windows
|
||||
|
||||
- Gives you one cluster with one node
|
||||
|
||||
- Streamlined installation and user experience
|
||||
- Very easy to use if you are already using Docker Desktop:
|
||||
|
||||
- Great integration with various network stacks and e.g. corporate VPNs
|
||||
go to Docker Desktop preferences and enable Kubernetes
|
||||
|
||||
- Ideal for Docker users who need good integration between both platforms
|
||||
|
||||
@@ -62,11 +40,13 @@
|
||||
|
||||
- Runs Kubernetes nodes in Docker containers
|
||||
|
||||
- Can deploy multiple clusters, with multiple nodes
|
||||
- Can deploy multiple clusters, with multiple nodes, and multiple master nodes
|
||||
|
||||
- Runs the control plane on Kubernetes nodes
|
||||
- As of June 2020, two versions co-exist: stable (1.7) and beta (3.0)
|
||||
|
||||
- Control plane can also run on multiple nodes
|
||||
- They have different syntax and options, this can be confusing
|
||||
|
||||
(but don't let that stop you!)
|
||||
|
||||
---
|
||||
|
||||
@@ -97,8 +77,6 @@
|
||||
|
||||
- Requires Docker (obviously!)
|
||||
|
||||
- Should also work with Podman and Rootless Docker
|
||||
|
||||
- Deploying a single node cluster using the latest version is simple:
|
||||
```bash
|
||||
kind create cluster
|
||||
@@ -106,26 +84,12 @@
|
||||
|
||||
- More advanced scenarios require writing a short [config file](https://kind.sigs.k8s.io/docs/user/quick-start#configuring-your-kind-cluster)
|
||||
|
||||
(to define multiple nodes, multiple control plane nodes, set Kubernetes versions ...)
|
||||
(to define multiple nodes, multiple master nodes, set Kubernetes versions ...)
|
||||
|
||||
- Can deploy multiple clusters
|
||||
|
||||
---
|
||||
|
||||
## [MicroK8s](https://microk8s.io/)
|
||||
|
||||
- Available on Linux, and since recently, on Mac and Windows as well
|
||||
|
||||
- The Linux version is installed through Snap
|
||||
|
||||
(which is pre-installed on all recent versions of Ubuntu)
|
||||
|
||||
- Also supports clustering (as in, multiple machines running MicroK8s)
|
||||
|
||||
- DNS is not enabled by default; enable it with `microk8s enable dns`
|
||||
|
||||
---
|
||||
|
||||
## [Minikube](https://minikube.sigs.k8s.io/docs/)
|
||||
|
||||
- The "legacy" option!
|
||||
@@ -144,11 +108,23 @@
|
||||
|
||||
---
|
||||
|
||||
## [MicroK8s](https://microk8s.io/)
|
||||
|
||||
- Available on Linux, and since recently, on Mac and Windows as well
|
||||
|
||||
- The Linux version is installed through Snap
|
||||
|
||||
(which is pre-installed on all recent versions of Ubuntu)
|
||||
|
||||
- Also supports clustering (as in, multiple machines running MicroK8s)
|
||||
|
||||
- DNS is not enabled by default; enable it with `microk8s enable dns`
|
||||
|
||||
---
|
||||
|
||||
## [Rancher Desktop](https://rancherdesktop.io/)
|
||||
|
||||
- Available on Linux, Mac, and Windows
|
||||
|
||||
- Free and open-source
|
||||
- Available on Mac and Windows
|
||||
|
||||
- Runs a single cluster with a single node
|
||||
|
||||
@@ -158,7 +134,7 @@
|
||||
|
||||
- Emphasis on ease of use (like Docker Desktop)
|
||||
|
||||
- Relatively young product (first release in May 2021)
|
||||
- Very young product (first release in May 2021)
|
||||
|
||||
- Based on k3s and other proven components
|
||||
|
||||
|
||||
@@ -389,7 +389,7 @@ class: extra-details
|
||||
|
||||
- A replacement Pod is created on another Node
|
||||
|
||||
- ... But it doesn't start yet!
|
||||
- ... But it doens't start yet!
|
||||
|
||||
- Why? 🤔
|
||||
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for Admins and Ops
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- static-pods-exercise
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
#- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
-
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/interco.md
|
||||
-
|
||||
- k8s/apilb.md
|
||||
#- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/staticpods.md
|
||||
-
|
||||
#- k8s/cloud-controller-manager.md
|
||||
#- k8s/bootstrap.md
|
||||
- k8s/control-plane-auth.md
|
||||
- k8s/pod-security-intro.md
|
||||
- k8s/pod-security-policies.md
|
||||
- k8s/pod-security-admission.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
-
|
||||
#- k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,92 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for administrators
|
||||
and operators
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
- - k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
- - k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/interco.md
|
||||
- - k8s/apilb.md
|
||||
- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
# DAY 2
|
||||
- - k8s/kubercoins.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- - k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
###- k8s/bootstrap.md
|
||||
- k8s/netpol.md
|
||||
- k8s/pod-security-intro.md
|
||||
- k8s/pod-security-policies.md
|
||||
- k8s/pod-security-admission.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- - k8s/prometheus.md
|
||||
#- k8s/prometheus-stack.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/crd.md
|
||||
- k8s/operators.md
|
||||
- k8s/eck.md
|
||||
###- k8s/operators-design.md
|
||||
###- k8s/operators-example.md
|
||||
# CONCLUSION
|
||||
- - k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
- |
|
||||
# (All content after this slide is bonus material)
|
||||
# EXTRA
|
||||
- - k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
#- k8s/portworx.md
|
||||
- k8s/openebs.md
|
||||
- k8s/stateful-failover.md
|
||||
@@ -1,90 +0,0 @@
|
||||
title: |
|
||||
Advanced
|
||||
Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- #1
|
||||
- k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
- #2
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/interco.md
|
||||
- #3
|
||||
- k8s/cni-internals.md
|
||||
- k8s/apilb.md
|
||||
- k8s/control-plane-auth.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/staticpods.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- #4
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
- k8s/ytt.md
|
||||
- #5
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/crd.md
|
||||
- #6
|
||||
- k8s/ingress-tls.md
|
||||
- k8s/ingress-advanced.md
|
||||
#- k8s/ingress-canary.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/cainjector.md
|
||||
- k8s/eck.md
|
||||
- #7
|
||||
- k8s/admission.md
|
||||
- k8s/kyverno.md
|
||||
- #8
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/prometheus-stack.md
|
||||
- k8s/hpa-v2.md
|
||||
- #9
|
||||
- k8s/operators-design.md
|
||||
- k8s/operators-example.md
|
||||
- k8s/kubebuilder.md
|
||||
- k8s/events.md
|
||||
- k8s/finalizers.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/apiserver-deepdive.md
|
||||
#- k8s/record.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectl-run.md
|
||||
#- k8s/batch-jobs.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/service-types.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
-
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
-
|
||||
- k8s/dashboard.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/openebs.md
|
||||
#- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
#- k8s/scalingdockercoins.md
|
||||
#- shared/hastyconclusions.md
|
||||
#- k8s/daemonset.md
|
||||
#- k8s/authoring-yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
#- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/healthchecks-more.md
|
||||
#- k8s/record.md
|
||||
#- k8s/ingress-tls.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/helm-intro.md
|
||||
#- k8s/helm-chart-format.md
|
||||
#- k8s/helm-create-basic-chart.md
|
||||
#- k8s/helm-create-better-chart.md
|
||||
#- k8s/helm-dependencies.md
|
||||
#- k8s/helm-values-schema-validation.md
|
||||
#- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
#- k8s/ytt.md
|
||||
#- k8s/gitlab.md
|
||||
#- k8s/create-chart.md
|
||||
#- k8s/create-more-charts.md
|
||||
#- k8s/netpol.md
|
||||
#- k8s/authn-authz.md
|
||||
#- k8s/user-cert.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/pod-security-intro.md
|
||||
#- k8s/pod-security-policies.md
|
||||
#- k8s/pod-security-admission.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
#- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/prometheus-stack.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/consul.md
|
||||
#- k8s/pv-pvc-sc.md
|
||||
#- k8s/volume-claim-templates.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/openebs.md
|
||||
#- k8s/stateful-failover.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/crd.md
|
||||
#- k8s/admission.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/operators-example.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/finalizers.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
-
|
||||
#- k8s/whatsnext.md
|
||||
- k8s/lastwords.md
|
||||
#- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,90 +0,0 @@
|
||||
title: |
|
||||
Kubernetes 101
|
||||
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
# Bridget-specific; others use logistics.md
|
||||
- logistics-bridget.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# Bridget doesn't go into as much depth with compose
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
#- k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- - k8s/kubectl-run.md
|
||||
#- k8s/batch-jobs.md
|
||||
#- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
#- k8s/service-types.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
#- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- - k8s/dashboard.md
|
||||
#- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
#- k8s/record.md
|
||||
- - k8s/logs-cli.md
|
||||
# Bridget hasn't added EFK yet
|
||||
#- k8s/logs-centralized.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/helm-intro.md
|
||||
#- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
#- k8s/helm-create-better-chart.md
|
||||
#- k8s/helm-dependencies.md
|
||||
#- k8s/helm-values-schema-validation.md
|
||||
#- k8s/helm-secrets.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/ytt.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/whatsnext.md
|
||||
# - k8s/links.md
|
||||
# Bridget-specific
|
||||
- k8s/links-bridget.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,167 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Docker and Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
-
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
-
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/service-types.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
- k8s/yamldeploy.md
|
||||
-
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/authoring-yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
-
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
-
|
||||
- k8s/ingress.md
|
||||
- k8s/ingress-advanced.md
|
||||
#- k8s/ingress-canary.md
|
||||
- k8s/ingress-tls.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/cainjector.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
- k8s/gitlab.md
|
||||
- k8s/ytt.md
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/pod-security-intro.md
|
||||
- k8s/pod-security-policies.md
|
||||
- k8s/pod-security-admission.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
-
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
- k8s/portworx.md
|
||||
- k8s/openebs.md
|
||||
- k8s/stateful-failover.md
|
||||
-
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/prometheus-stack.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/cluster-autoscaler.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/hpa-v2.md
|
||||
-
|
||||
- k8s/extending-api.md
|
||||
- k8s/apiserver-deepdive.md
|
||||
- k8s/crd.md
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/admission.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/operators-example.md
|
||||
- k8s/kubebuilder.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/kyverno.md
|
||||
- k8s/eck.md
|
||||
- k8s/finalizers.md
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/events.md
|
||||
-
|
||||
- k8s/dmuc.md
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/apilb.md
|
||||
- k8s/staticpods.md
|
||||
-
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/gitworkflows.md
|
||||
-
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,135 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/service-types.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
-
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/authoring-yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
-
|
||||
- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-advanced.md
|
||||
#- k8s/ingress-canary.md
|
||||
#- k8s/ingress-tls.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
#- k8s/ytt.md
|
||||
- k8s/gitlab.md
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/pod-security-intro.md
|
||||
#- k8s/pod-security-policies.md
|
||||
#- k8s/pod-security-admission.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/prometheus-stack.md
|
||||
-
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
#- k8s/portworx.md
|
||||
- k8s/openebs.md
|
||||
- k8s/stateful-failover.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/admission.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/operators-example.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
-
|
||||
- k8s/whatsnext.md
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
45
slides/kube.yml
Normal file
45
slides/kube.yml
Normal file
@@ -0,0 +1,45 @@
|
||||
title: |
|
||||
Thoughtworks Infrastructure
|
||||
(Starring: Kubernetes!)
|
||||
|
||||
chat: "[thoughtworks-infrastructure Slack](https://skillsmatter.slack.com/archives/C03E90W6Z6U)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2022-08-thoughtworks.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- shared/toc.md
|
||||
- #1
|
||||
- k8s/demo-apps.md
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- exercises/netpol-details.md
|
||||
- exercises/rbac-details.md
|
||||
- #2
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/setup-devel.md
|
||||
- exercises/localcluster-details.md
|
||||
- exercises/healthchecks-details.md
|
||||
- #3
|
||||
- |
|
||||
# (Extra material)
|
||||
- k8s/deploymentslideshow.md
|
||||
@@ -1,51 +1,25 @@
|
||||
## Introductions (en 🇫🇷)
|
||||
## Introductions
|
||||
|
||||
- Bonjour !
|
||||
- Hello! I'm Jérôme Petazzoni ([@jpetazzo])
|
||||
|
||||
- Sur scène : Jérôme ([@jpetazzo])
|
||||
- We'll have two 2-hour workshops
|
||||
|
||||
- En backstage : Alexandre, Antoine, Aurélien (x2), Benji, David, Julien, Kostas, Nicolas, Paul, Thibault...
|
||||
(August 17th and 24th)
|
||||
|
||||
- Horaires : tous les jours de 9h à 13h
|
||||
- We'll do a short 5-minute break in the middle of each workshop
|
||||
|
||||
- On fera une pause vers (environ) 11h
|
||||
- Feel free to interrupt for questions at any time!
|
||||
|
||||
- N'hésitez pas à poser un maximum de questions!
|
||||
- Live feedback, questions, help, useful links:
|
||||
|
||||
- Utilisez @@CHAT@@ pour les questions, demander de l'aide, etc.
|
||||
@@CHAT@@
|
||||
|
||||
- I'll be available on that Slack channel after the workshop, too!
|
||||
|
||||
<!-- -->
|
||||
|
||||
[@alexbuisine]: https://twitter.com/alexbuisine
|
||||
[EphemeraSearch]: https://ephemerasearch.com/
|
||||
[@jpetazzo]: https://twitter.com/jpetazzo
|
||||
[@s0ulshake]: https://twitter.com/s0ulshake
|
||||
[Quantgene]: https://www.quantgene.com/
|
||||
|
||||
---
|
||||
|
||||
## Les 15 minutes du matin
|
||||
|
||||
- Chaque jour, on commencera à 9h par une mini-présentation de 15 minutes
|
||||
|
||||
(sur un sujet choisi ensemble, pas forcément en relation avec la formation!)
|
||||
|
||||
- L'occasion de s'échauffer les neurones avec 🥐/☕️/🍊
|
||||
|
||||
(avant d'attaquer les choses sérieuses)
|
||||
|
||||
- Puis à 9h15 on rentre dans le vif du sujet
|
||||
|
||||
---
|
||||
|
||||
## Exercises
|
||||
|
||||
- At the end of each day, there is a series of exercises
|
||||
|
||||
- To make the most out of the training, please try the exercises!
|
||||
|
||||
(it will help to practice and memorize the content of the day)
|
||||
|
||||
- We recommend to take at least one hour to work on the exercises
|
||||
|
||||
(if you understood the content of the day, it will be much faster)
|
||||
|
||||
- Each day will start with a quick review of the exercises of the previous day
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
|
||||
@@SLIDES@@
|
||||
|
||||
- This is a public URL, you're welcome to share it with others!
|
||||
|
||||
- Use arrows to move to next/previous slide
|
||||
|
||||
(up, down, left, right, page up, page down)
|
||||
@@ -18,28 +16,6 @@
|
||||
|
||||
---
|
||||
|
||||
## These slides are open source
|
||||
|
||||
- The sources of these slides are available in a public GitHub repository:
|
||||
|
||||
https://@@GITREPO@@
|
||||
|
||||
- These slides are written in Markdown
|
||||
|
||||
- You are welcome to share, re-use, re-mix these slides
|
||||
|
||||
- Typos? Mistakes? Questions? Feel free to hover over the bottom of the slide ...
|
||||
|
||||
.footnote[👇 Try it! The source file will be shown and you can view it on GitHub and fork and edit it.]
|
||||
|
||||
<!--
|
||||
.lab[
|
||||
```open https://@@GITREPO@@/tree/master/slides/common/about-slides.md```
|
||||
]
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
## Accessing these slides later
|
||||
|
||||
- Slides will remain online so you can review them later if needed
|
||||
@@ -52,23 +28,31 @@
|
||||
|
||||
(then open the file `@@HTML@@`)
|
||||
|
||||
- You can also generate a PDF of the slides
|
||||
- You will find new versions of these slides on:
|
||||
|
||||
(by printing them to a file; but be patient with your browser!)
|
||||
https://container.training/
|
||||
|
||||
---
|
||||
|
||||
## These slides are constantly updated
|
||||
## These slides are open source
|
||||
|
||||
- Feel free to check the GitHub repository for updates:
|
||||
- You are welcome to use, re-use, share these slides
|
||||
|
||||
- These slides are written in Markdown
|
||||
|
||||
- The sources of these slides are available in a public GitHub repository:
|
||||
|
||||
https://@@GITREPO@@
|
||||
|
||||
- Look for branches named YYYY-MM-...
|
||||
- Typos? Mistakes? Questions? Feel free to hover over the bottom of the slide ...
|
||||
|
||||
- You can also find specific decks and other resources on:
|
||||
.footnote[👇 Try it! The source file will be shown and you can view it on GitHub and fork and edit it.]
|
||||
|
||||
https://container.training/
|
||||
<!--
|
||||
.lab[
|
||||
```open https://@@GITREPO@@/tree/master/slides/common/about-slides.md```
|
||||
]
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -149,7 +149,7 @@ You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheat sheet (basic)
|
||||
## Tmux cheat sheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
@@ -159,35 +159,13 @@ But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- You can start a new session with `tmux`
|
||||
<br/>
|
||||
(or resume or share an existing session with `tmux attach`)
|
||||
|
||||
- Then use these keyboard shortcuts:
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b arrows → navigate within split windows
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheat sheet (advanced)
|
||||
|
||||
- Ctrl-b d → detach session
|
||||
<br/>
|
||||
(resume it later with `tmux attach`)
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
|
||||
- Ctrl-b , → rename window
|
||||
|
||||
- Ctrl-b Ctrl-o → cycle pane position (e.g. switch top/bottom)
|
||||
|
||||
- Ctrl-b PageUp → enter scrollback mode
|
||||
<br/>
|
||||
(use PageUp/PageDown to scroll; Ctrl-c or Enter to exit scrollback)
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → re-attach to session
|
||||
|
||||
@@ -1,24 +1,11 @@
|
||||
class: title
|
||||
class: title, self-paced
|
||||
|
||||
Merci !
|
||||
|
||||

|
||||
Thank you!
|
||||
|
||||
---
|
||||
|
||||
## Derniers mots...
|
||||
class: title, in-person
|
||||
|
||||
- Le portail de formation reste en ligne après la formation
|
||||
|
||||
- N'hésitez pas à nous contacter via la messagerie instantanée !
|
||||
|
||||
- Les VM ENIX restent en ligne au moins une semaine après la formation
|
||||
|
||||
(mais pas les clusters cloud ; eux on les éteint très vite)
|
||||
|
||||
- N'oubliez pas de remplier les formulaires d'évaluation
|
||||
|
||||
(c'est pas pour nous, c'est une obligation légale😅)
|
||||
|
||||
- Encore **merci** à vous !
|
||||
That's all, folks! <br/> Questions?
|
||||
|
||||

|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-auto
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/gui.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,70 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-manual
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
#- swarm/hostingregistry.md
|
||||
#- swarm/testingregistry.md
|
||||
#- swarm/btp-manual.md
|
||||
#- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/ipsec.md
|
||||
#- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
#- swarm/secrets.md
|
||||
#- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
#- swarm/stateful.md
|
||||
#- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,79 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,74 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
#- swarm/logging.md
|
||||
#- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
Reference in New Issue
Block a user