mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-15 18:19:56 +00:00
Compare commits
62 Commits
gitpod
...
2020-06-ar
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
38f3764dc5 | ||
|
|
71ae28e147 | ||
|
|
3e869ddca9 | ||
|
|
276a2dbdda | ||
|
|
2836b58078 | ||
|
|
0d065788a4 | ||
|
|
14271a4df0 | ||
|
|
412d029d0c | ||
|
|
f960230f8e | ||
|
|
774c8a0e31 | ||
|
|
4671a981a7 | ||
|
|
b9743a5f8c | ||
|
|
df4980750c | ||
|
|
9467c7309e | ||
|
|
86b0380a77 | ||
|
|
eb9052ae9a | ||
|
|
8f85332d8a | ||
|
|
0479ad2285 | ||
|
|
986d7eb9c2 | ||
|
|
3fafbb8d4e | ||
|
|
5a24df3fd4 | ||
|
|
1bbfba0531 | ||
|
|
8d98431ba0 | ||
|
|
c31c81a286 | ||
|
|
a0314fc5f5 | ||
|
|
3f088236a4 | ||
|
|
ce4e2ffe46 | ||
|
|
c3a05a6393 | ||
|
|
40b2b8e62e | ||
|
|
efdcf4905d | ||
|
|
bdb57c05b4 | ||
|
|
af0762a0a2 | ||
|
|
0d6c364a95 | ||
|
|
690a1eb75c | ||
|
|
c796a6bfc1 | ||
|
|
0b10d3d40d | ||
|
|
cdb50925da | ||
|
|
ca1f8ec828 | ||
|
|
7302d3533f | ||
|
|
d3c931e602 | ||
|
|
7402c8e6a8 | ||
|
|
1de539bff8 | ||
|
|
a6c7d69986 | ||
|
|
b0bff595cf | ||
|
|
6f806ed200 | ||
|
|
0c8b20f6b6 | ||
|
|
2ba35e1f8d | ||
|
|
eb0d9bed2a | ||
|
|
bab493a926 | ||
|
|
f4f2d83fa4 | ||
|
|
9f049951ab | ||
|
|
7257a5c594 | ||
|
|
102aef5ac5 | ||
|
|
d2b3a1d663 | ||
|
|
d84ada0927 | ||
|
|
0e04b4a07d | ||
|
|
aef910b4b7 | ||
|
|
298b6db20c | ||
|
|
7ec6e871c9 | ||
|
|
a0558e4ee5 | ||
|
|
16a62f9f84 | ||
|
|
2ce50007d2 |
@@ -1,3 +1,10 @@
|
||||
# This file is based on the following manifest:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
# It adds the "skip login" flag, as well as an insecure hack to defeat SSL.
|
||||
# As its name implies, it is INSECURE and you should not use it in production,
|
||||
# or on clusters that contain any kind of important or sensitive data, or on
|
||||
# clusters that have a life span of more than a few hours.
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -187,7 +194,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0-rc2
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
@@ -226,7 +233,7 @@ spec:
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
@@ -272,7 +279,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.2
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
@@ -293,7 +300,7 @@ spec:
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# This is a copy of the following file:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,19 +15,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
@@ -32,62 +28,147 @@ metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
@@ -95,7 +176,7 @@ metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
@@ -108,55 +189,117 @@ spec:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -22,7 +22,10 @@ spec:
|
||||
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:11
|
||||
image: postgres:12
|
||||
env:
|
||||
- name: POSTGRES_HOST_AUTH_METHOD
|
||||
value: trust
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
|
||||
@@ -8,24 +8,24 @@ metadata:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
name: user=jean.doe
|
||||
rules:
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resourceNames: [ users:jean.doe ]
|
||||
resourceNames: [ user=jean.doe ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ get, create, delete, watch ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
name: user=jean.doe
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: users:jean.doe
|
||||
name: user=jean.doe
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jean.doe
|
||||
@@ -246,11 +246,21 @@ EOF"
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
fi"
|
||||
|
||||
# Install kustomize
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kustomize ]; then
|
||||
curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v3.5.4/kustomize_v3.5.1_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx kustomize
|
||||
echo complete -C /usr/local/bin/kustomize kustomize | sudo tee /etc/bash_completion.d/kustomize
|
||||
fi"
|
||||
|
||||
# Install ship
|
||||
# Note: 0.51.3 is the last version that doesn't display GIN-debug messages
|
||||
# (don't want to get folks confused by that!)
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ship ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
|
||||
curl -L https://github.com/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
@@ -329,7 +339,7 @@ _cmd_maketag() {
|
||||
if [ -z $USER ]; then
|
||||
export USER=anonymous
|
||||
fi
|
||||
MS=$(($(date +%N)/1000000))
|
||||
MS=$(($(date +%N | tr -d 0)/1000000))
|
||||
date +%Y-%m-%d-%H-%M-$MS-$USER
|
||||
}
|
||||
|
||||
@@ -483,6 +493,7 @@ _cmd_start() {
|
||||
--settings) SETTINGS=$2; shift 2;;
|
||||
--count) COUNT=$2; shift 2;;
|
||||
--tag) TAG=$2; shift 2;;
|
||||
--students) STUDENTS=$2; shift 2;;
|
||||
*) die "Unrecognized parameter: $1."
|
||||
esac
|
||||
done
|
||||
@@ -494,8 +505,14 @@ _cmd_start() {
|
||||
die "Please add --settings flag to specify which settings file to use."
|
||||
fi
|
||||
if [ -z "$COUNT" ]; then
|
||||
COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
warning "No --count option was specified. Using value from settings file ($COUNT)."
|
||||
CLUSTERSIZE=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
if [ -z "$STUDENTS" ]; then
|
||||
warning "Neither --count nor --students was specified."
|
||||
warning "According to the settings file, the cluster size is $CLUSTERSIZE."
|
||||
warning "Deploying one cluster of $CLUSTERSIZE nodes."
|
||||
STUDENTS=1
|
||||
fi
|
||||
COUNT=$(($STUDENTS*$CLUSTERSIZE))
|
||||
fi
|
||||
|
||||
# Check that the specified settings and infrastructure are valid.
|
||||
@@ -513,11 +530,41 @@ _cmd_start() {
|
||||
infra_start $COUNT
|
||||
sep
|
||||
info "Successfully created $COUNT instances with tag $TAG"
|
||||
sep
|
||||
echo created > tags/$TAG/status
|
||||
|
||||
info "To deploy Docker on these instances, you can run:"
|
||||
info "$0 deploy $TAG"
|
||||
# If the settings.yaml file has a "steps" field,
|
||||
# automatically execute all the actions listed in that field.
|
||||
# If an action fails, retry it up to 10 times.
|
||||
python -c 'if True: # hack to deal with indentation
|
||||
import sys, yaml
|
||||
settings = yaml.safe_load(sys.stdin)
|
||||
print ("\n".join(settings.get("steps", [])))
|
||||
' < tags/$TAG/settings.yaml \
|
||||
| while read step; do
|
||||
if [ -z "$step" ]; then
|
||||
break
|
||||
fi
|
||||
sep
|
||||
info "Automatically executing step '$step'."
|
||||
TRY=1
|
||||
MAXTRY=10
|
||||
while ! $0 $step $TAG ; do
|
||||
TRY=$(($TRY+1))
|
||||
if [ $TRY -gt $MAXTRY ]; then
|
||||
error "This step ($step) failed after $MAXTRY attempts."
|
||||
info "You can troubleshoot the situation manually, or terminate these instances with:"
|
||||
info "$0 stop $TAG"
|
||||
die "Giving up."
|
||||
else
|
||||
sep
|
||||
info "Step '$step' failed. Let's wait 10 seconds and try again."
|
||||
info "(Attempt $TRY out of $MAXTRY.)"
|
||||
sleep 10
|
||||
fi
|
||||
done
|
||||
done
|
||||
sep
|
||||
info "Deployment successful."
|
||||
info "To terminate these instances, you can run:"
|
||||
info "$0 stop $TAG"
|
||||
}
|
||||
|
||||
@@ -21,3 +21,9 @@ machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
steps:
|
||||
- deploy
|
||||
- webssh
|
||||
- tailhist
|
||||
- cards
|
||||
|
||||
@@ -20,3 +20,10 @@ machine_version: 0.14.0
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
steps:
|
||||
- deploy
|
||||
- webssh
|
||||
- tailhist
|
||||
- kube
|
||||
- cards
|
||||
- kubetest
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Uncomment and/or edit one of the the following lines if necessary.
|
||||
#/ /kube-halfday.yml.html 200
|
||||
#/ /kube-fullday.yml.html 200
|
||||
#/ /kube-twodays.yml.html 200
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
@@ -13,3 +13,10 @@
|
||||
|
||||
# Shortlink for the QRCode
|
||||
/q /qrcode.html 200
|
||||
|
||||
# Shortlinks for next training in English and French
|
||||
/next https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
|
||||
/hi5 https://enix.io/fr/services/formation/online/
|
||||
|
||||
/ /k8s-bootcamp.yml.html 200!
|
||||
/chat https://gitter.im/jpetazzo/training-20200609-online
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Advanced Dockerfiles
|
||||
# Advanced Dockerfile Syntax
|
||||
|
||||

|
||||
|
||||
@@ -12,7 +12,10 @@ class: title
|
||||
We have seen simple Dockerfiles to illustrate how Docker build
|
||||
container images.
|
||||
|
||||
In this section, we will see more Dockerfile commands.
|
||||
In this section, we will give a recap of the Dockerfile syntax,
|
||||
and introduce advanced Dockerfile commands that we might
|
||||
come across sometimes; or that we might want to use in some
|
||||
specific scenarios.
|
||||
|
||||
---
|
||||
|
||||
@@ -420,3 +423,8 @@ ONBUILD COPY . /src
|
||||
|
||||
* You can't chain `ONBUILD` instructions with `ONBUILD`.
|
||||
* `ONBUILD` can't be used to trigger `FROM` instructions.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Advanced Dockerfile syntax
|
||||
:FR:- Dockerfile niveau expert
|
||||
|
||||
@@ -280,3 +280,8 @@ CONTAINER ID IMAGE ... CREATED STATUS
|
||||
5c1dfd4d81f1 jpetazzo/clock ... 40 min. ago Exited (0) 40 min. ago
|
||||
b13c164401fb ubuntu ... 55 min. ago Exited (130) 53 min. ago
|
||||
```
|
||||
|
||||
???
|
||||
|
||||
:EN:- Foreground and background containers
|
||||
:FR:- Exécution interactive ou en arrière-plan
|
||||
|
||||
@@ -167,3 +167,8 @@ Automated process = good.
|
||||
|
||||
In the next chapter, we will learn how to automate the build
|
||||
process by writing a `Dockerfile`.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Building our first images interactively
|
||||
:FR:- Fabriquer nos premières images à la main
|
||||
|
||||
@@ -363,3 +363,10 @@ In this example, `sh -c` will still be used, but
|
||||
The shell gets replaced by `figlet` when `figlet` starts execution.
|
||||
|
||||
This allows to run processes as PID 1 without using JSON.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Towards automated, reproducible builds
|
||||
:EN:- Writing our first Dockerfile
|
||||
:FR:- Rendre le processus automatique et reproductible
|
||||
:FR:- Écrire son premier Dockerfile
|
||||
|
||||
@@ -272,3 +272,7 @@ $ docker run -it --entrypoint bash myfiglet
|
||||
root@6027e44e2955:/#
|
||||
```
|
||||
|
||||
???
|
||||
|
||||
:EN:- CMD and ENTRYPOINT
|
||||
:FR:- CMD et ENTRYPOINT
|
||||
|
||||
@@ -322,3 +322,11 @@ You can:
|
||||
Each copy will run in a different network, totally isolated from the other.
|
||||
|
||||
This is ideal to debug regressions, do side-by-side comparisons, etc.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Using compose to describe an environment
|
||||
:EN:- Connecting services together with a *Compose file*
|
||||
|
||||
:FR:- Utiliser Compose pour décrire son environnement
|
||||
:FR:- Écrire un *Compose file* pour connecter les services entre eux
|
||||
@@ -226,3 +226,13 @@ We've learned how to:
|
||||
|
||||
In the next chapter, we will see how to connect
|
||||
containers together without exposing their ports.
|
||||
|
||||
???
|
||||
|
||||
:EN:Connecting containers
|
||||
:EN:- Container networking basics
|
||||
:EN:- Exposing a container
|
||||
|
||||
:FR:Connecter les conteneurs
|
||||
:FR:- Description du modèle réseau des conteneurs
|
||||
:FR:- Exposer un conteneur
|
||||
|
||||
@@ -98,3 +98,8 @@ Success!
|
||||
* Place it in a different directory, with the `WORKDIR` instruction.
|
||||
|
||||
* Even better, use the `gcc` official image.
|
||||
|
||||
???
|
||||
|
||||
:EN:- The build cache
|
||||
:FR:- Tirer parti du cache afin d'optimiser la vitesse de *build*
|
||||
|
||||
@@ -431,3 +431,8 @@ services:
|
||||
- It's OK (and even encouraged) to start simple and evolve as needed.
|
||||
|
||||
- Feel free to review this chapter later (after writing a few Dockerfiles) for inspiration!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Dockerfile tips, tricks, and best practices
|
||||
:FR:- Bonnes pratiques pour la construction des images
|
||||
|
||||
@@ -290,3 +290,8 @@ bash: figlet: command not found
|
||||
* We have a clear definition of our environment, and can share it reliably with others.
|
||||
|
||||
* Let's see in the next chapters how to bake a custom image with `figlet`!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Running our first container
|
||||
:FR:- Lancer nos premiers conteneurs
|
||||
|
||||
@@ -226,3 +226,8 @@ docker export <container_id> | tar tv
|
||||
```
|
||||
|
||||
This will give a detailed listing of the content of the container.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Troubleshooting and getting inside a container
|
||||
:FR:- Inspecter un conteneur en détail, en *live* ou *post-mortem*
|
||||
|
||||
@@ -375,3 +375,13 @@ We've learned how to:
|
||||
* Understand Docker image namespacing.
|
||||
* Search and download images.
|
||||
|
||||
???
|
||||
|
||||
:EN:Building images
|
||||
:EN:- Containers, images, and layers
|
||||
:EN:- Image addresses and tags
|
||||
:EN:- Finding and transferring images
|
||||
|
||||
:FR:Construire des images
|
||||
:FR:- La différence entre un conteneur et une image
|
||||
:FR:- La notion de *layer* partagé entre images
|
||||
|
||||
@@ -80,3 +80,8 @@ $ docker ps --filter label=owner=alice
|
||||
(To determine internal cross-billing, or who to page in case of outage.)
|
||||
|
||||
* etc.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Using labels to identify containers
|
||||
:FR:- Étiqueter ses conteneurs avec des méta-données
|
||||
|
||||
@@ -391,3 +391,10 @@ We've learned how to:
|
||||
|
||||
* Use a simple local development workflow.
|
||||
|
||||
???
|
||||
|
||||
:EN:Developing with containers
|
||||
:EN:- “Containerize” a development environment
|
||||
|
||||
:FR:Développer au jour le jour
|
||||
:FR:- « Containeriser » son environnement de développement
|
||||
@@ -313,3 +313,11 @@ virtually "free."
|
||||
* Sometimes, we want to inspect a specific intermediary build stage.
|
||||
|
||||
* Or, we want to describe multiple images using a single Dockerfile.
|
||||
|
||||
???
|
||||
|
||||
:EN:Optimizing our images and their build process
|
||||
:EN:- Leveraging multi-stage builds
|
||||
|
||||
:FR:Optimiser les images et leur construction
|
||||
:FR:- Utilisation d'un *multi-stage build*
|
||||
|
||||
@@ -130,3 +130,12 @@ $ docker inspect --format '{{ json .Created }}' <containerID>
|
||||
|
||||
* The optional `json` keyword asks for valid JSON output.
|
||||
<br/>(e.g. here it adds the surrounding double-quotes.)
|
||||
|
||||
???
|
||||
|
||||
:EN:Managing container lifecycle
|
||||
:EN:- Naming and inspecting containers
|
||||
|
||||
:FR:Suivre ses conteneurs à la loupe
|
||||
:FR:- Obtenir des informations détaillées sur un conteneur
|
||||
:FR:- Associer un identifiant unique à un conteneur
|
||||
|
||||
@@ -175,3 +175,10 @@ class: extra-details
|
||||
* This will cause some CLI and TUI programs to redraw the screen.
|
||||
|
||||
* But not all of them.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Restarting old containers
|
||||
:EN:- Detaching and reattaching to container
|
||||
:FR:- Redémarrer des anciens conteneurs
|
||||
:FR:- Se détacher et rattacher à des conteneurs
|
||||
|
||||
@@ -125,3 +125,11 @@ Server:
|
||||
]
|
||||
|
||||
If this doesn't work, raise your hand so that an instructor can assist you!
|
||||
|
||||
???
|
||||
|
||||
:EN:Container concepts
|
||||
:FR:Premier contact avec les conteneurs
|
||||
|
||||
:EN:- What's a container engine?
|
||||
:FR:- Qu'est-ce qu'un *container engine* ?
|
||||
|
||||
@@ -11,10 +11,10 @@ class State(object):
|
||||
self.section_title = None
|
||||
self.section_start = 0
|
||||
self.section_slides = 0
|
||||
self.chapters = {}
|
||||
self.modules = {}
|
||||
self.sections = {}
|
||||
def show(self):
|
||||
if self.section_title.startswith("chapter-"):
|
||||
if self.section_title.startswith("module-"):
|
||||
return
|
||||
print("{0.section_title}\t{0.section_start}\t{0.section_slides}".format(self))
|
||||
self.sections[self.section_title] = self.section_slides
|
||||
@@ -38,10 +38,10 @@ for line in open(sys.argv[1]):
|
||||
if line == "--":
|
||||
state.current_slide += 1
|
||||
toc_links = re.findall("\(#toc-(.*)\)", line)
|
||||
if toc_links and state.section_title.startswith("chapter-"):
|
||||
if state.section_title not in state.chapters:
|
||||
state.chapters[state.section_title] = []
|
||||
state.chapters[state.section_title].append(toc_links[0])
|
||||
if toc_links and state.section_title.startswith("module-"):
|
||||
if state.section_title not in state.modules:
|
||||
state.modules[state.section_title] = []
|
||||
state.modules[state.section_title].append(toc_links[0])
|
||||
# This is really hackish
|
||||
if line.startswith("class:"):
|
||||
for klass in EXCLUDED:
|
||||
@@ -51,7 +51,7 @@ for line in open(sys.argv[1]):
|
||||
|
||||
state.show()
|
||||
|
||||
for chapter in sorted(state.chapters, key=lambda f: int(f.split("-")[1])):
|
||||
chapter_size = sum(state.sections[s] for s in state.chapters[chapter])
|
||||
print("{}\t{}\t{}".format("total size for", chapter, chapter_size))
|
||||
for module in sorted(state.modules, key=lambda f: int(f.split("-")[1])):
|
||||
module_size = sum(state.sections[s] for s in state.modules[module])
|
||||
print("{}\t{}\t{}".format("total size for", module, module_size))
|
||||
|
||||
|
||||
118
slides/fix-redirects.sh
Executable file
118
slides/fix-redirects.sh
Executable file
@@ -0,0 +1,118 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script helps to add "force-redirects" where needed.
|
||||
# This might replace your entire git repos with Vogon poetry.
|
||||
# Use at your own peril!
|
||||
|
||||
set -eu
|
||||
|
||||
# The easiest way to set this env var is by copy-pasting from
|
||||
# the netlify web dashboard, then doctoring the output a bit.
|
||||
# Yeah, that's gross, but after spending 10 minutes with the
|
||||
# API and the CLI and OAuth, it took about 10 seconds to do it
|
||||
# with le copier-coller, so ... :)
|
||||
|
||||
SITES="
|
||||
2020-01-caen
|
||||
2020-01-zr
|
||||
2020-02-caen
|
||||
2020-02-enix
|
||||
2020-02-outreach
|
||||
2020-02-vmware
|
||||
2020-03-ardan
|
||||
2020-03-qcon
|
||||
alfun-2019-06
|
||||
boosterconf2018
|
||||
clt-2019-10
|
||||
dc17eu
|
||||
decembre2018
|
||||
devopsdaysams2018
|
||||
devopsdaysmsp2018
|
||||
gotochgo2018
|
||||
gotochgo2019
|
||||
indexconf2018
|
||||
intro-2019-01
|
||||
intro-2019-04
|
||||
intro-2019-06
|
||||
intro-2019-08
|
||||
intro-2019-09
|
||||
intro-2019-11
|
||||
intro-2019-12
|
||||
k8s2d
|
||||
kadm-2019-04
|
||||
kadm-2019-06
|
||||
kube
|
||||
kube-2019-01
|
||||
kube-2019-02
|
||||
kube-2019-03
|
||||
kube-2019-04
|
||||
kube-2019-06
|
||||
kube-2019-08
|
||||
kube-2019-09
|
||||
kube-2019-10
|
||||
kube-2019-11
|
||||
lisa-2019-10
|
||||
lisa16t1
|
||||
lisa17m7
|
||||
lisa17t9
|
||||
maersk-2019-07
|
||||
maersk-2019-08
|
||||
ndcminnesota2018
|
||||
nr-2019-08
|
||||
oscon2018
|
||||
oscon2019
|
||||
osseu17
|
||||
pycon2019
|
||||
qconsf18wkshp
|
||||
qconsf2017intro
|
||||
qconsf2017swarm
|
||||
qconsf2018
|
||||
qconuk2019
|
||||
septembre2018
|
||||
sfsf-2019-06
|
||||
srecon2018
|
||||
swarm2017
|
||||
velny-k8s101-2018
|
||||
velocity-2019-11
|
||||
velocityeu2018
|
||||
velocitysj2018
|
||||
vmware-2019-11
|
||||
weka
|
||||
wwc-2019-10
|
||||
wwrk-2019-05
|
||||
wwrk-2019-06
|
||||
"
|
||||
|
||||
for SITE in $SITES; do
|
||||
echo "##### $SITE"
|
||||
git checkout -q origin/$SITE
|
||||
# No _redirects? No problem.
|
||||
if ! [ -f _redirects ]; then
|
||||
continue
|
||||
fi
|
||||
# If there is already a force redirect on /, we're good.
|
||||
if grep '^/ .* 200!' _redirects; then
|
||||
continue
|
||||
fi
|
||||
# If there is a redirect on / ... and it's not forced ... do something.
|
||||
if grep "^/ .* 200$" _redirects; then
|
||||
echo "##### $SITE needs to be patched"
|
||||
sed -i 's,^/ \(.*\) 200$,/ \1 200!,' _redirects
|
||||
git add _redirects
|
||||
git commit -m "fix-redirects.sh: adding forced redirect"
|
||||
git push origin HEAD:$SITE
|
||||
continue
|
||||
fi
|
||||
if grep "^/ " _redirects; then
|
||||
echo "##### $SITE with / but no status code"
|
||||
echo "##### Should I add '200!' ?"
|
||||
read foo
|
||||
sed -i 's,^/ \(.*\)$,/ \1 200!,' _redirects
|
||||
git add _redirects
|
||||
git commit -m "fix-redirects.sh: adding status code and forced redirect"
|
||||
git push origin HEAD:$SITE
|
||||
continue
|
||||
fi
|
||||
echo "##### $SITE without / ?"
|
||||
cat _redirects
|
||||
done
|
||||
@@ -7,6 +7,7 @@ FLAGS=dict(
|
||||
fr=u"🇫🇷",
|
||||
uk=u"🇬🇧",
|
||||
us=u"🇺🇸",
|
||||
www=u"🌐",
|
||||
)
|
||||
|
||||
TEMPLATE="""<html>
|
||||
@@ -19,9 +20,9 @@ TEMPLATE="""<html>
|
||||
<div class="main">
|
||||
<table>
|
||||
<tr><td class="header" colspan="3">{{ title }}</td></tr>
|
||||
<tr><td class="details" colspan="3">Note: while some workshops are delivered in French, slides are always in English.</td></tr>
|
||||
<tr><td class="details" colspan="3">Note: while some workshops are delivered in other languages, slides are always in English.</td></tr>
|
||||
|
||||
<tr><td class="title" colspan="3">Free video of our latest workshop</td></tr>
|
||||
<tr><td class="title" colspan="3">Free Kubernetes intro course</td></tr>
|
||||
|
||||
<tr>
|
||||
<td>Getting Started With Kubernetes and Container Orchestration</td>
|
||||
@@ -35,11 +36,11 @@ TEMPLATE="""<html>
|
||||
<td class="details">If you're interested, we can deliver that workshop (or longer courses) to your team or organization.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="details">Contact <a href="mailto:jerome.petazzoni@gmail.com">Jérôme Petazzoni</a> to make that happen!</a></td>
|
||||
<td class="details">Contact <a href="mailto:jerome.petazzoni@gmail.com">Jérôme Petazzoni</a> to make that happen!</td>
|
||||
</tr>
|
||||
|
||||
{% if coming_soon %}
|
||||
<tr><td class="title" colspan="3">Coming soon near you</td></tr>
|
||||
<tr><td class="title" colspan="3">Coming soon</td></tr>
|
||||
|
||||
{% for item in coming_soon %}
|
||||
<tr>
|
||||
@@ -140,13 +141,26 @@ import yaml
|
||||
|
||||
items = yaml.safe_load(open("index.yaml"))
|
||||
|
||||
|
||||
def prettyparse(date):
|
||||
months = [
|
||||
"January", "February", "March", "April", "May", "June",
|
||||
"July", "August", "September", "October", "November", "December"
|
||||
]
|
||||
month = months[date.month-1]
|
||||
suffix = {
|
||||
1: "st", 2: "nd", 3: "rd",
|
||||
21: "st", 22: "nd", 23: "rd",
|
||||
31: "st"}.get(date.day, "th")
|
||||
return date.year, month, "{}{}".format(date.day, suffix)
|
||||
|
||||
|
||||
# Items with a date correspond to scheduled sessions.
|
||||
# Items without a date correspond to self-paced content.
|
||||
# The date should be specified as a string (e.g. 2018-11-26).
|
||||
# It can also be a list of two elements (e.g. [2018-11-26, 2018-11-28]).
|
||||
# The latter indicates an event spanning multiple dates.
|
||||
# The first date will be used in the generated page, but the event
|
||||
# will be considered "current" (and therefore, shown in the list of
|
||||
# The event will be considered "current" (shown in the list of
|
||||
# upcoming events) until the second date.
|
||||
|
||||
for item in items:
|
||||
@@ -156,19 +170,23 @@ for item in items:
|
||||
date_begin, date_end = date
|
||||
else:
|
||||
date_begin, date_end = date, date
|
||||
suffix = {
|
||||
1: "st", 2: "nd", 3: "rd",
|
||||
21: "st", 22: "nd", 23: "rd",
|
||||
31: "st"}.get(date_begin.day, "th")
|
||||
# %e is a non-standard extension (it displays the day, but without a
|
||||
# leading zero). If strftime fails with ValueError, try to fall back
|
||||
# on %d (which displays the day but with a leading zero when needed).
|
||||
try:
|
||||
item["prettydate"] = date_begin.strftime("%B %e{}, %Y").format(suffix)
|
||||
except ValueError:
|
||||
item["prettydate"] = date_begin.strftime("%B %d{}, %Y").format(suffix)
|
||||
y1, m1, d1 = prettyparse(date_begin)
|
||||
y2, m2, d2 = prettyparse(date_end)
|
||||
if (y1, m1, d1) == (y2, m2, d2):
|
||||
# Single day event
|
||||
pretty_date = "{} {}, {}".format(m1, d1, y1)
|
||||
elif (y1, m1) == (y2, m2):
|
||||
# Multi-day event within a single month
|
||||
pretty_date = "{} {}-{}, {}".format(m1, d1, d2, y1)
|
||||
elif y1 == y2:
|
||||
# Multi-day event spanning more than a month
|
||||
pretty_date = "{} {}-{} {}, {}".format(m1, d1, m2, d2, y1)
|
||||
else:
|
||||
# Event spanning the turn of the year (REALLY???)
|
||||
pretty_date = "{} {}, {}-{} {}, {}".format(m1, d1, y1, m2, d2, y2)
|
||||
item["begin"] = date_begin
|
||||
item["end"] = date_end
|
||||
item["prettydate"] = pretty_date
|
||||
item["flag"] = FLAGS.get(item.get("country"),"")
|
||||
|
||||
today = datetime.date.today()
|
||||
|
||||
@@ -1,3 +1,81 @@
|
||||
- date: [2020-07-07, 2020-07-09]
|
||||
country: www
|
||||
city: streaming
|
||||
event: Ardan Live
|
||||
speaker: jpetazzo
|
||||
title: Intensive Docker Bootcamp
|
||||
attend: https://www.eventbrite.com/e/livestream-intensive-docker-bootcamp-tickets-103258886108
|
||||
|
||||
- date: [2020-06-15, 2020-06-16]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-06-17, 2020-06-19]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: 2020-06-22
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Packaging pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-06-23, 2020-06-24]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-06-25, 2020-06-26]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-06-09, 2020-06-11]
|
||||
country: www
|
||||
city: streaming
|
||||
event: Ardan Live
|
||||
speaker: jpetazzo
|
||||
title: Intensive Kubernetes Bootcamp
|
||||
attend: https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
|
||||
|
||||
- date: [2020-05-04, 2020-05-08]
|
||||
country: www
|
||||
city: streaming
|
||||
event: Ardan Live
|
||||
speaker: jpetazzo
|
||||
title: Intensive Kubernetes - Advanced Concepts
|
||||
attend: https://www.eventbrite.com/e/livestream-intensive-kubernetes-advanced-concepts-tickets-102358725704
|
||||
|
||||
- date: [2020-03-30, 2020-04-02]
|
||||
country: www
|
||||
city: streaming
|
||||
event: Ardan Live
|
||||
speaker: jpetazzo
|
||||
title: Intensive Docker and Kubernetes
|
||||
attend: https://www.eventbrite.com/e/ardan-labs-live-worldwide-march-30-april-2-2020-tickets-100331129108#
|
||||
slides: https://2020-03-ardan.container.training/
|
||||
|
||||
- date: 2020-03-06
|
||||
country: uk
|
||||
city: London
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/toc.md
|
||||
-
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
#- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
#- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Container_Networking_Basics.md
|
||||
#- containers/Network_Drivers.md
|
||||
#- containers/Container_Network_Model.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Multi_Stage_Builds.md
|
||||
#- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Advanced_Dockerfiles.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
#- containers/Orchestration_Overview.md
|
||||
-
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,69 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Pods_Anatomy.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,77 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
-
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Start_And_Attach.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Resource_Limits.md
|
||||
- # DAY 2
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
-
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Installing_Docker.md
|
||||
- containers/Container_Engines.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
-
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Orchestration_Overview.md
|
||||
-
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
#-
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Ambassadors.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
92
slides/k8s-bootcamp.yml
Normal file
92
slides/k8s-bootcamp.yml
Normal file
@@ -0,0 +1,92 @@
|
||||
title: |
|
||||
Intensive
|
||||
Kubernetes
|
||||
Bootcamp
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-20200609-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2020-06-ardan.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
-
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
-
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
- k8s/yamldeploy.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
-
|
||||
#- k8s/dryrun.md
|
||||
#- k8s/exercise-yaml.md
|
||||
- k8s/rollout.md
|
||||
#- k8s/record.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/ingress.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/whatsnext.md
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
-
|
||||
- |
|
||||
# (Extra material)
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-secrets.md
|
||||
|
||||
@@ -129,3 +129,8 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
:EN:- Securely accessing internal services
|
||||
:FR:- Accès sécurisé aux services internes
|
||||
|
||||
@@ -87,3 +87,8 @@
|
||||
- Tunnels are also fine
|
||||
|
||||
(e.g. [k3s](https://k3s.io/) uses a tunnel to allow each node to contact the API server)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Ensuring API server availability
|
||||
:FR:- Assurer la disponibilité du serveur API
|
||||
|
||||
@@ -381,3 +381,8 @@ We demonstrated *update* and *watch* semantics.
|
||||
- if the pod has special constraints that can't be met
|
||||
|
||||
- if the scheduler is not running (!)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Kubernetes architecture review
|
||||
:FR:- Passage en revue de l'architecture de Kubernetes
|
||||
|
||||
@@ -1,6 +1,74 @@
|
||||
# Authentication and authorization
|
||||
|
||||
*And first, a little refresher!*
|
||||
- In this section, we will:
|
||||
|
||||
- define authentication and authorization
|
||||
|
||||
- explain how they are implemented in Kubernetes
|
||||
|
||||
- talk about tokens, certificates, service accounts, RBAC ...
|
||||
|
||||
- But first: why do we need all this?
|
||||
|
||||
---
|
||||
|
||||
## The need for fine-grained security
|
||||
|
||||
- The Kubernetes API should only be available for identified users
|
||||
|
||||
- we don't want "guest access" (except in very rare scenarios)
|
||||
|
||||
- we don't want strangers to use our compute resources, delete our apps ...
|
||||
|
||||
- our keys and passwords should not be exposed to the public
|
||||
|
||||
- Users will often have different access rights
|
||||
|
||||
- cluster admin (similar to UNIX "root") can do everything
|
||||
|
||||
- developer might access specific resources, or a specific namespace
|
||||
|
||||
- supervision might have read only access to *most* resources
|
||||
|
||||
---
|
||||
|
||||
## Example: custom HTTP load balancer
|
||||
|
||||
- Let's imagine that we have a custom HTTP load balancer for multiple apps
|
||||
|
||||
- Each app has its own *Deployment* resource
|
||||
|
||||
- By default, the apps are "sleeping" and scaled to zero
|
||||
|
||||
- When a request comes in, the corresponding app gets woken up
|
||||
|
||||
- After some inactivity, the app is scaled down again
|
||||
|
||||
- This HTTP load balancer needs API access (to scale up/down)
|
||||
|
||||
- What if *a wild vulnerability appears*?
|
||||
|
||||
---
|
||||
|
||||
## Consequences of vulnerability
|
||||
|
||||
- If the HTTP load balancer has the same API access as we do:
|
||||
|
||||
*full cluster compromise (easy data leak, cryptojacking...)*
|
||||
|
||||
- If the HTTP load balancer has `update` permissions on the Deployments:
|
||||
|
||||
*defacement (easy), MITM / impersonation (medium to hard)*
|
||||
|
||||
- If the HTTP load balancer only has permission to `scale` the Deployments:
|
||||
|
||||
*denial-of-service*
|
||||
|
||||
- All these outcomes are bad, but some are worse than others
|
||||
|
||||
---
|
||||
|
||||
## Definitions
|
||||
|
||||
- Authentication = verifying the identity of a person
|
||||
|
||||
@@ -147,7 +215,7 @@ class: extra-details
|
||||
|
||||
(if their key is compromised, or they leave the organization)
|
||||
|
||||
- Option 1: re-create a new CA and re-issue everyone's certificates
|
||||
- Option 1: re-create a new CA and re-issue everyone's certificates
|
||||
<br/>
|
||||
→ Maybe OK if we only have a few users; no way otherwise
|
||||
|
||||
@@ -631,7 +699,7 @@ class: extra-details
|
||||
|
||||
- Let's look for these in existing ClusterRoleBindings:
|
||||
```bash
|
||||
kubectl get clusterrolebindings -o yaml |
|
||||
kubectl get clusterrolebindings -o yaml |
|
||||
grep -e kubernetes-admin -e system:masters
|
||||
```
|
||||
|
||||
@@ -676,3 +744,17 @@ class: extra-details
|
||||
- Both are available as standalone programs, or as plugins for `kubectl`
|
||||
|
||||
(`kubectl` plugins can be installed and managed with `krew`)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Authentication and authorization in Kubernetes
|
||||
:EN:- Authentication with tokens and certificates
|
||||
:EN:- Aithorization with RBAC (Role-Based Access Control)
|
||||
:EN:- Restricting permissions with Service Accounts
|
||||
:EN:- Working with Roles, Cluster Roles, Role Bindings, etc.
|
||||
|
||||
:FR:- Identification et droits d'accès dans Kubernetes
|
||||
:FR:- Mécanismes d'identification par jetons et certificats
|
||||
:FR:- Le modèle RBAC *(Role-Based Access Control)*
|
||||
:FR:- Restreindre les permissions grâce aux *Service Accounts*
|
||||
:FR:- Comprendre les *Roles*, *Cluster Roles*, *Role Bindings*, etc.
|
||||
|
||||
194
slides/k8s/batch-jobs.md
Normal file
194
slides/k8s/batch-jobs.md
Normal file
@@ -0,0 +1,194 @@
|
||||
# Executing batch jobs
|
||||
|
||||
- Deployments are great for stateless web apps
|
||||
|
||||
(as well as workers that keep running forever)
|
||||
|
||||
- Pods are great for one-off execution that we don't care about
|
||||
|
||||
(because they don't get automatically restarted if something goes wrong)
|
||||
|
||||
- Jobs are great for "long" background work
|
||||
|
||||
("long" being at least minutes our hours)
|
||||
|
||||
- CronJobs are great to schedule Jobs at regular intervals
|
||||
|
||||
(just like the classic UNIX `cron` daemon with its `crontab` files)
|
||||
|
||||
---
|
||||
|
||||
## Creating a Job
|
||||
|
||||
- A Job will create a Pod
|
||||
|
||||
- If the Pod fails, the Job will create another one
|
||||
|
||||
- The Job will keep trying until:
|
||||
|
||||
- either a Pod succeeds,
|
||||
|
||||
- or we hit the *backoff limit* of the Job (default=6)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a Job that has a 50% chance of success:
|
||||
```bash
|
||||
kubectl create job flipcoin --image=alpine -- sh -c 'exit $(($RANDOM%2))'
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Our Job in action
|
||||
|
||||
- Our Job will create a Pod named `flipcoin-xxxxx`
|
||||
|
||||
- If the Pod succeeds, the Job stops
|
||||
|
||||
- If the Pod fails, the Job creates another Pod
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the status of the Pod(s) created by the Job:
|
||||
```bash
|
||||
kubectl get pods --selector=job-name=flipcoin
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## More advanced jobs
|
||||
|
||||
- We can specify a number of "completions" (default=1)
|
||||
|
||||
- This indicates how many times the Job must be executed
|
||||
|
||||
- We can specify the "parallelism" (default=1)
|
||||
|
||||
- This indicates how many Pods should be running in parallel
|
||||
|
||||
- These options cannot be specified with `kubectl create job`
|
||||
|
||||
(we have to write our own YAML manifest to use them)
|
||||
|
||||
---
|
||||
|
||||
## Scheduling periodic background work
|
||||
|
||||
- A Cron Job is a Job that will be executed at specific intervals
|
||||
|
||||
(the name comes from the traditional cronjobs executed by the UNIX crond)
|
||||
|
||||
- It requires a *schedule*, represented as five space-separated fields:
|
||||
|
||||
- minute [0,59]
|
||||
- hour [0,23]
|
||||
- day of the month [1,31]
|
||||
- month of the year [1,12]
|
||||
- day of the week ([0,6] with 0=Sunday)
|
||||
|
||||
- `*` means "all valid values"; `/N` means "every N"
|
||||
|
||||
- Example: `*/3 * * * *` means "every three minutes"
|
||||
|
||||
---
|
||||
|
||||
## Creating a Cron Job
|
||||
|
||||
- Let's create a simple job to be executed every three minutes
|
||||
|
||||
- Careful: make sure that the job terminates!
|
||||
|
||||
(The Cron Job will not hold if a previous job is still running)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the Cron Job:
|
||||
```bash
|
||||
kubectl create cronjob every3mins --schedule="*/3 * * * *" \
|
||||
--image=alpine -- sleep 10
|
||||
```
|
||||
|
||||
- Check the resource that was created:
|
||||
```bash
|
||||
kubectl get cronjobs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cron Jobs in action
|
||||
|
||||
- At the specified schedule, the Cron Job will create a Job
|
||||
|
||||
- The Job will create a Pod
|
||||
|
||||
- The Job will make sure that the Pod completes
|
||||
|
||||
(re-creating another one if it fails, for instance if its node fails)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the Jobs that are created:
|
||||
```bash
|
||||
kubectl get jobs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(It will take a few minutes before the first job is scheduled.)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What about `kubectl run` before v1.18?
|
||||
|
||||
- Creating a Deployment:
|
||||
|
||||
`kubectl run`
|
||||
|
||||
- Creating a Pod:
|
||||
|
||||
`kubectl run --restart=Never`
|
||||
|
||||
- Creating a Job:
|
||||
|
||||
`kubectl run --restart=OnFailure`
|
||||
|
||||
- Creating a Cron Job:
|
||||
|
||||
`kubectl run --restart=OnFailure --schedule=...`
|
||||
|
||||
*Avoid using these forms, as they are deprecated since Kubernetes 1.18!*
|
||||
|
||||
---
|
||||
|
||||
## Beyond `kubectl create`
|
||||
|
||||
- As hinted earlier, `kubectl create` doesn't always expose all options
|
||||
|
||||
- can't express parallelism or completions of Jobs
|
||||
|
||||
- can't express Pods with multiple containers
|
||||
|
||||
- can't express healthchecks, resource limits
|
||||
|
||||
- etc.
|
||||
|
||||
- `kubectl create` and `kubectl run` are *helpers* that generate YAML manifests
|
||||
|
||||
- If we write these manifests ourselves, we can use all features and options
|
||||
|
||||
- We'll see later how to do that!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Running batch and cron jobs
|
||||
:FR:- Tâches périodiques *(cron)* et traitement par lots *(batch)*
|
||||
@@ -257,3 +257,8 @@ This is the TLS bootstrap mechanism, step by step.
|
||||
- [kubeadm token](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-token/) command
|
||||
|
||||
- [kubeadm join](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/) command (has details about [the join workflow](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/#join-workflow))
|
||||
|
||||
???
|
||||
|
||||
:EN:- Leveraging TLS bootstrap to join nodes
|
||||
:FR:- Ajout de nœuds grâce au *TLS bootstrap*
|
||||
|
||||
@@ -142,3 +142,8 @@ The list includes the following providers:
|
||||
- [configuration](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) (mainly for OpenStack)
|
||||
|
||||
- [deployment](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/)
|
||||
|
||||
???
|
||||
|
||||
:EN:- The Cloud Controller Manager
|
||||
:FR:- Le *Cloud Controller Manager*
|
||||
|
||||
@@ -364,3 +364,8 @@ docker run --rm --net host -v $PWD:/vol \
|
||||
- [bivac](https://github.com/camptocamp/bivac)
|
||||
|
||||
Backup Interface for Volumes Attached to Containers
|
||||
|
||||
???
|
||||
|
||||
:EN:- Backing up clusters
|
||||
:FR:- Politiques de sauvegarde
|
||||
|
||||
@@ -165,3 +165,12 @@ class: extra-details
|
||||
- Security advantage (stronger isolation between pods)
|
||||
|
||||
Check [this blog post](http://jpetazzo.github.io/2019/02/13/running-kubernetes-without-nodes-with-kiyot/) for more details.
|
||||
|
||||
???
|
||||
|
||||
:EN:- What happens when the cluster is at, or over, capacity
|
||||
:EN:- Cluster sizing and scaling
|
||||
|
||||
:FR:- Ce qui se passe quand il n'y a plus assez de ressources
|
||||
:FR:- Dimensionner et redimensionner ses clusters
|
||||
|
||||
|
||||
@@ -501,3 +501,11 @@ class: extra-details
|
||||
- Then upgrading kubeadm to 1.16.X, etc.
|
||||
|
||||
- **Make sure to read the release notes before upgrading!**
|
||||
|
||||
???
|
||||
|
||||
:EN:- Best practices for cluster upgrades
|
||||
:EN:- Example: upgrading a kubeadm cluster
|
||||
|
||||
:FR:- Bonnes pratiques pour la mise à jour des clusters
|
||||
:FR:- Exemple : mettre à jour un cluster kubeadm
|
||||
|
||||
@@ -574,3 +574,8 @@ done
|
||||
- This could be useful for embedded platforms with very limited resources
|
||||
|
||||
(or lab environments for learning purposes)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Configuring CNI plugins
|
||||
:FR:- Configurer des plugins CNI
|
||||
|
||||
@@ -401,3 +401,8 @@ class: pic
|
||||
- IP addresses are associated with *pods*, not with individual containers
|
||||
|
||||
Both diagrams used with permission.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Kubernetes concepts
|
||||
:FR:- Kubernetes en théorie
|
||||
|
||||
@@ -547,3 +547,13 @@ spec:
|
||||
- With RBAC, we can authorize a user to access configmaps, but not secrets
|
||||
|
||||
(since they are two different kinds of resources)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Managing application configuration
|
||||
:EN:- Exposing configuration with the downward API
|
||||
:EN:- Exposing configuration with Config Maps and Secrets
|
||||
|
||||
:FR:- Gérer la configuration des applications
|
||||
:FR:- Configuration au travers de la *downward API*
|
||||
:FR:- Configuration via les *Config Maps* et *Secrets*
|
||||
|
||||
@@ -263,3 +263,8 @@ spec:
|
||||
#name: web-xyz1234567-pqr89
|
||||
EOF
|
||||
```
|
||||
|
||||
???
|
||||
|
||||
:EN:- Control plane authentication
|
||||
:FR:- Sécurisation du plan de contrôle
|
||||
|
||||
@@ -132,11 +132,33 @@ For a user named `jean.doe`, we will have:
|
||||
|
||||
- ServiceAccount `jean.doe` in Namespace `users`
|
||||
|
||||
- CertificateSigningRequest `users:jean.doe`
|
||||
- CertificateSigningRequest `user=jean.doe`
|
||||
|
||||
- ClusterRole `users:jean.doe` giving read/write access to that CSR
|
||||
- ClusterRole `user=jean.doe` giving read/write access to that CSR
|
||||
|
||||
- ClusterRoleBinding `users:jean.doe` binding ClusterRole and ServiceAccount
|
||||
- ClusterRoleBinding `user=jean.doe` binding ClusterRole and ServiceAccount
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## About resource name constraints
|
||||
|
||||
- Most Kubernetes identifiers and names are fairly restricted
|
||||
|
||||
- They generally are DNS-1123 *labels* or *subdomains* (from [RFC 1123](https://tools.ietf.org/html/rfc1123))
|
||||
|
||||
- A label is lowercase letters, numbers, dashes; can't start or finish with a dash
|
||||
|
||||
- A subdomain is one or multiple labels separated by dots
|
||||
|
||||
- Some resources have more relaxed constraints, and can be "path segment names"
|
||||
|
||||
(uppercase are allowed, as well as some characters like `#:?!,_`)
|
||||
|
||||
- This includes RBAC objects (like Roles, RoleBindings...) and CSRs
|
||||
|
||||
- See the [Identifiers and Names](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md) design document and the [Object Names and IDs](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#path-segment-names) documentation page for more details
|
||||
|
||||
---
|
||||
|
||||
@@ -153,7 +175,7 @@ For a user named `jean.doe`, we will have:
|
||||
|
||||
- Create the ServiceAccount, ClusterRole, ClusterRoleBinding for `jean.doe`:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/users:jean.doe.yaml
|
||||
kubectl apply -f ~/container.training/k8s/user=jean.doe.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -195,7 +217,13 @@ For a user named `jean.doe`, we will have:
|
||||
|
||||
- Add a new context using that identity:
|
||||
```bash
|
||||
kubectl config set-context jean.doe --user=token:jean.doe --cluster=kubernetes
|
||||
kubectl config set-context jean.doe --user=token:jean.doe --cluster=`kubernetes`
|
||||
```
|
||||
(Make sure to adapt the cluster name if yours is different!)
|
||||
|
||||
- Use that context:
|
||||
```bash
|
||||
kubectl config use-context jean.doe
|
||||
```
|
||||
|
||||
]
|
||||
@@ -216,7 +244,7 @@ For a user named `jean.doe`, we will have:
|
||||
|
||||
- Try to access "our" CertificateSigningRequest:
|
||||
```bash
|
||||
kubectl get csr users:jean.doe
|
||||
kubectl get csr user=jean.doe
|
||||
```
|
||||
(This should tell us "NotFound")
|
||||
|
||||
@@ -273,7 +301,7 @@ The command above generates:
|
||||
apiVersion: certificates.k8s.io/v1beta1
|
||||
kind: CertificateSigningRequest
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
name: user=jean.doe
|
||||
spec:
|
||||
request: $(base64 -w0 < csr.pem)
|
||||
usages:
|
||||
@@ -324,12 +352,12 @@ The command above generates:
|
||||
|
||||
- Inspect the CSR:
|
||||
```bash
|
||||
kubectl describe csr users:jean.doe
|
||||
kubectl describe csr user=jean.doe
|
||||
```
|
||||
|
||||
- Approve it:
|
||||
```bash
|
||||
kubectl certificate approve users:jean.doe
|
||||
kubectl certificate approve user=jean.doe
|
||||
```
|
||||
|
||||
]
|
||||
@@ -347,7 +375,7 @@ The command above generates:
|
||||
|
||||
- Retrieve the updated CSR object and extract the certificate:
|
||||
```bash
|
||||
kubectl get csr users:jean.doe \
|
||||
kubectl get csr user=jean.doe \
|
||||
-o jsonpath={.status.certificate} \
|
||||
| base64 -d > cert.pem
|
||||
```
|
||||
@@ -424,3 +452,8 @@ To be usable in real environments, we would need to add:
|
||||
- we get strong security *and* convenience
|
||||
|
||||
- Systems like Vault also have certificate issuance mechanisms
|
||||
|
||||
???
|
||||
|
||||
:EN:- Generating user certificates with the CSR API
|
||||
:FR:- Génération de certificats utilisateur avec la CSR API
|
||||
|
||||
@@ -688,3 +688,8 @@ class: extra-details
|
||||
(by setting their label accordingly)
|
||||
|
||||
- This gives us building blocks for canary and blue/green deployments
|
||||
|
||||
???
|
||||
|
||||
:EN:- Scaling with Daemon Sets
|
||||
:FR:- Utilisation de Daemon Sets
|
||||
|
||||
@@ -172,3 +172,8 @@ The dashboard will then ask you which authentication you want to use.
|
||||
- It introduces new failure modes
|
||||
|
||||
(for instance, if you try to apply YAML from a link that's no longer valid)
|
||||
|
||||
???
|
||||
|
||||
:EN:- The Kubernetes dashboard
|
||||
:FR:- Le *dashboard* Kubernetes
|
||||
|
||||
@@ -26,3 +26,8 @@
|
||||
- When we want to change some resource, we update the *spec*
|
||||
|
||||
- Kubernetes will then *converge* that resource
|
||||
|
||||
???
|
||||
|
||||
:EN:- Declarative vs imperative models
|
||||
:FR:- Modèles déclaratifs et impératifs
|
||||
|
||||
@@ -823,3 +823,8 @@ class: extra-details
|
||||
(it could be as a bare process, or in a container/pod using the host network)
|
||||
|
||||
- ... And it expects to be listening on port 6443 with TLS
|
||||
|
||||
???
|
||||
|
||||
:EN:- Building our own cluster from scratch
|
||||
:FR:- Construire son cluster à la main
|
||||
|
||||
@@ -344,3 +344,14 @@ class: extra-details
|
||||
- [Dynamic Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/)
|
||||
|
||||
- [Aggregation Layer](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Extending the Kubernetes API
|
||||
:EN:- Custom Resource Definitions (CRDs)
|
||||
:EN:- The aggregation layer
|
||||
:EN:- Admission control and webhooks
|
||||
|
||||
:FR:- Comment étendre l'API Kubernetes
|
||||
:FR:- Les CRDs *(Custom Resource Definitions)*
|
||||
:FR:- Extension via *aggregation layer*, *admission control*, *webhooks*
|
||||
|
||||
@@ -237,3 +237,8 @@
|
||||
- Gitkube can also deploy Helm charts
|
||||
|
||||
(instead of raw YAML files)
|
||||
|
||||
???
|
||||
|
||||
:EN:- GitOps
|
||||
:FR:- GitOps
|
||||
|
||||
@@ -154,9 +154,9 @@ It will use the default success threshold (1 successful attempt = alive).
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `rng-daemonset.yaml` and add the liveness probe
|
||||
- Edit `rng-deployment.yaml` and add the liveness probe
|
||||
```bash
|
||||
vim rng-daemonset.yaml
|
||||
vim rng-deployment.yaml
|
||||
```
|
||||
|
||||
- Load the YAML for all the resources of DockerCoins:
|
||||
@@ -333,3 +333,8 @@ class: extra-details
|
||||
(and have gcr.io/pause take care of the reaping)
|
||||
|
||||
- Discussion of this in [Video - 10 Ways to Shoot Yourself in the Foot with Kubernetes, #9 Will Surprise You](https://www.youtube.com/watch?v=QKI-JRs2RIE)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Adding healthchecks to an app
|
||||
:FR:- Ajouter des *healthchecks* à une application
|
||||
|
||||
@@ -282,3 +282,8 @@ If the Redis process becomes unresponsive, it will be killed.
|
||||
- check the timestamp of that file from an exec probe
|
||||
|
||||
- Writing logs (and checking them from the probe) also works
|
||||
|
||||
???
|
||||
|
||||
:EN:- Using healthchecks to improve availability
|
||||
:FR:- Utiliser des *healthchecks* pour améliorer la disponibilité
|
||||
|
||||
@@ -237,3 +237,8 @@ We see the components mentioned above: `Chart.yaml`, `templates/`, `values.yaml`
|
||||
- This can be use for database migrations, backups, notifications, smoke tests ...
|
||||
|
||||
- Hooks named `test` are executed only when running `helm test RELEASE-NAME`
|
||||
|
||||
???
|
||||
|
||||
:EN:- Helm charts format
|
||||
:FR:- Le format des *Helm charts*
|
||||
|
||||
@@ -218,3 +218,8 @@ have details about recommended annotations and labels.
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
:EN:- Writing a basic Helm chart for the whole app
|
||||
:FR:- Écriture d'un *chart* Helm simplifié
|
||||
|
||||
@@ -121,7 +121,7 @@ This creates a basic chart in the directory `helmcoins`.
|
||||
helm install COMPONENT-NAME CHART-DIRECTORY
|
||||
```
|
||||
|
||||
- We can also use the following command, which is idempotent:
|
||||
- We can also use the following command, which is *idempotent*:
|
||||
```bash
|
||||
helm upgrade COMPONENT-NAME CHART-DIRECTORY --install
|
||||
```
|
||||
@@ -139,6 +139,28 @@ This creates a basic chart in the directory `helmcoins`.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## "Idempotent"
|
||||
|
||||
- Idempotent = that can be applied multiple times without changing the result
|
||||
|
||||
(the word is commonly used in maths and computer science)
|
||||
|
||||
- In this context, this means:
|
||||
|
||||
- if the action (installing the chart) wasn't done, do it
|
||||
|
||||
- if the action was already done, don't do anything
|
||||
|
||||
- Ideally, when such an action fails, it can be retried safely
|
||||
|
||||
(as opposed to, e.g., installing a new release each time we run it)
|
||||
|
||||
- Other example: `kubectl -f some-file.yaml`
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- Let's see if DockerCoins is working!
|
||||
@@ -577,3 +599,8 @@ We can look at the definition, but it's fairly complex ...
|
||||
- We can change the number of workers with `replicaCount`
|
||||
|
||||
- And much more!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Writing better Helm charts for app components
|
||||
:FR:- Écriture de *charts* composant par composant
|
||||
|
||||
@@ -18,6 +18,25 @@
|
||||
|
||||
---
|
||||
|
||||
## CNCF graduation status
|
||||
|
||||
- On April 30th 2020, Helm was the 10th project to *graduate* within the CNCF
|
||||
|
||||
.emoji[🎉]
|
||||
|
||||
(alongside Containerd, Prometheus, and Kubernetes itself)
|
||||
|
||||
- This is an acknowledgement by the CNCF for projects that
|
||||
|
||||
*demonstrate thriving adoption, an open governance process,
|
||||
<br/>
|
||||
and a strong commitment to community, sustainability, and inclusivity.*
|
||||
|
||||
- See [CNCF announcement](https://www.cncf.io/announcement/2020/04/30/cloud-native-computing-foundation-announces-helm-graduation/)
|
||||
and [Helm announcement](https://helm.sh/blog/celebrating-helms-cncf-graduation/)
|
||||
|
||||
---
|
||||
|
||||
## Helm concepts
|
||||
|
||||
- `helm` is a CLI tool
|
||||
@@ -417,3 +436,13 @@ All unspecified values will take the default values defined in the chart.
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
:EN:- Helm concepts
|
||||
:EN:- Installing software with Helm
|
||||
:EN:- Helm 2, Helm 3, and the Helm Hub
|
||||
|
||||
:FR:- Fonctionnement général de Helm
|
||||
:FR:- Installer des composants via Helm
|
||||
:FR:- Helm 2, Helm 3, et le *Helm Hub*
|
||||
|
||||
@@ -232,3 +232,8 @@ The chart is in a structured format, but it's entirely captured in this JSON.
|
||||
(including the full source of the chart, and the values used)
|
||||
|
||||
- This allows arbitrary rollbacks, as well as tweaking values even without having access to the source of the chart (or the chart repo) used for deployment
|
||||
|
||||
???
|
||||
|
||||
:EN:- Deep dive into Helm internals
|
||||
:FR:- Fonctionnement interne de Helm
|
||||
|
||||
@@ -306,3 +306,8 @@ This can also be set with `--cpu-percent=`.
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
:EN:- Auto-scaling resources
|
||||
:FR:- *Auto-scaling* (dimensionnement automatique) des ressources
|
||||
|
||||
@@ -718,3 +718,8 @@ We also need:
|
||||
(create them, promote them, delete them ...)
|
||||
|
||||
For inspiration, check [flagger by Weave](https://github.com/weaveworks/flagger).
|
||||
|
||||
???
|
||||
|
||||
:EN:- The Ingress resource
|
||||
:FR:- La ressource *ingress*
|
||||
|
||||
@@ -155,3 +155,8 @@ For critical services, we might want to precisely control the update process.
|
||||
- Even better if it's combined with DNS integration
|
||||
|
||||
(to facilitate name → ClusterIP resolution)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Interconnecting clusters
|
||||
:FR:- Interconnexion de clusters
|
||||
|
||||
162
slides/k8s/kubectl-logs.md
Normal file
162
slides/k8s/kubectl-logs.md
Normal file
@@ -0,0 +1,162 @@
|
||||
# Revisiting `kubectl logs`
|
||||
|
||||
- In this section, we assume that we have a Deployment with multiple Pods
|
||||
|
||||
(e.g. `pingpong` that we scaled to at least 3 pods)
|
||||
|
||||
- We will highlights some of the limitations of `kubectl logs`
|
||||
|
||||
---
|
||||
|
||||
## Streaming logs of multiple pods
|
||||
|
||||
- By default, `kubectl logs` shows us the output of a single Pod
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to check the output of the Pods related to a Deployment:
|
||||
```bash
|
||||
kubectl logs deploy/pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait using pod/pingpong-```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
`kubectl logs` only shows us the logs of one of the Pods.
|
||||
|
||||
---
|
||||
|
||||
## Viewing logs of multiple pods
|
||||
|
||||
- When we specify a deployment name, only one single pod's logs are shown
|
||||
|
||||
- We can view the logs of multiple pods by specifying a *selector*
|
||||
|
||||
- If we check the pods created by the deployment, they all have the label `app=pingpong`
|
||||
|
||||
(this is just a default label that gets added when using `kubectl create deployment`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the last line of log from all pods with the `app=pingpong` label:
|
||||
```bash
|
||||
kubectl logs -l app=pingpong --tail 1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Streaming logs of multiple pods
|
||||
|
||||
- Can we stream the logs of all our `pingpong` pods?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Combine `-l` and `-f` flags:
|
||||
```bash
|
||||
kubectl logs -l app=pingpong --tail 1 -f
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait seq=```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
*Note: combining `-l` and `-f` is only possible since Kubernetes 1.14!*
|
||||
|
||||
*Let's try to understand why ...*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Streaming logs of many pods
|
||||
|
||||
- Let's see what happens if we try to stream the logs for more than 5 pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Scale up our deployment:
|
||||
```bash
|
||||
kubectl scale deployment pingpong --replicas=8
|
||||
```
|
||||
|
||||
- Stream the logs:
|
||||
```bash
|
||||
kubectl logs -l app=pingpong --tail 1 -f
|
||||
```
|
||||
|
||||
<!-- ```wait error:``` -->
|
||||
|
||||
]
|
||||
|
||||
We see a message like the following one:
|
||||
```
|
||||
error: you are attempting to follow 8 log streams,
|
||||
but maximum allowed concurency is 5,
|
||||
use --max-log-requests to increase the limit
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why can't we stream the logs of many pods?
|
||||
|
||||
- `kubectl` opens one connection to the API server per pod
|
||||
|
||||
- For each pod, the API server opens one extra connection to the corresponding kubelet
|
||||
|
||||
- If there are 1000 pods in our deployment, that's 1000 inbound + 1000 outbound connections on the API server
|
||||
|
||||
- This could easily put a lot of stress on the API server
|
||||
|
||||
- Prior Kubernetes 1.14, it was decided to *not* allow multiple connections
|
||||
|
||||
- From Kubernetes 1.14, it is allowed, but limited to 5 connections
|
||||
|
||||
(this can be changed with `--max-log-requests`)
|
||||
|
||||
- For more details about the rationale, see
|
||||
[PR #67573](https://github.com/kubernetes/kubernetes/pull/67573)
|
||||
|
||||
---
|
||||
|
||||
## Shortcomings of `kubectl logs`
|
||||
|
||||
- We don't see which pod sent which log line
|
||||
|
||||
- If pods are restarted / replaced, the log stream stops
|
||||
|
||||
- If new pods are added, we don't see their logs
|
||||
|
||||
- To stream the logs of multiple pods, we need to write a selector
|
||||
|
||||
- There are external tools to address these shortcomings
|
||||
|
||||
(e.g.: [Stern](https://github.com/wercker/stern))
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl logs -l ... --tail N`
|
||||
|
||||
- If we run this with Kubernetes 1.12, the last command shows multiple lines
|
||||
|
||||
- This is a regression when `--tail` is used together with `-l`/`--selector`
|
||||
|
||||
- It always shows the last 10 lines of output for each container
|
||||
|
||||
(instead of the number of lines specified on the command line)
|
||||
|
||||
- The problem was fixed in Kubernetes 1.13
|
||||
|
||||
*See [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for details.*
|
||||
@@ -384,11 +384,11 @@ class: extra-details
|
||||
kubectl logs deploy/pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
- Leave that command running, so that we can keep an eye on these logs
|
||||
- Stop it with Ctrl-C
|
||||
|
||||
<!--
|
||||
```wait seq=3```
|
||||
```tmux split-pane -h```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
@@ -411,62 +411,55 @@ class: extra-details
|
||||
kubectl scale deployment pingpong --replicas 3
|
||||
```
|
||||
|
||||
- Check that we now have multiple pods:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: what if we tried to scale `replicaset.apps/pingpong-xxxxxxxxxx`?
|
||||
|
||||
We could! But the *deployment* would notice it right away, and scale back to the initial level.
|
||||
|
||||
---
|
||||
|
||||
## Log streaming
|
||||
class: extra-details
|
||||
|
||||
- Let's look again at the output of `kubectl logs`
|
||||
## Scaling a Replica Set
|
||||
|
||||
(the one we started before scaling up)
|
||||
- What if we scale the Replica Set instead of the Deployment?
|
||||
|
||||
- `kubectl logs` shows us one line per second
|
||||
- The Deployment would notice it right away and scale back to the initial level
|
||||
|
||||
- We could expect 3 lines per second
|
||||
- The Replica Set makes sure that we have the right numbers of Pods
|
||||
|
||||
(since we should now have 3 pods running `ping`)
|
||||
- The Deployment makes sure that the Replica Set has the right size
|
||||
|
||||
- Let's try to figure out what's happening!
|
||||
(conceptually, it delegates the management of the Pods to the Replica Set)
|
||||
|
||||
- This might seem weird (why this extra layer?) but will soon make sense
|
||||
|
||||
(when we will look at how rolling updates work!)
|
||||
|
||||
---
|
||||
|
||||
## Streaming logs of multiple pods
|
||||
|
||||
- What happens if we restart `kubectl logs`?
|
||||
- What happens if we try `kubectl logs` now that we have multiple pods?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Interrupt `kubectl logs` (with Ctrl-C)
|
||||
|
||||
<!--
|
||||
```tmux last-pane```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
- Restart it:
|
||||
```bash
|
||||
kubectl logs deploy/pingpong --tail 1 --follow
|
||||
kubectl logs deploy/pingpong --tail 3
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait using pod/pingpong-```
|
||||
```tmux last-pane```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
`kubectl logs` will warn us that multiple pods were found, and that it's showing us only one of them.
|
||||
`kubectl logs` will warn us that multiple pods were found.
|
||||
|
||||
Let's leave `kubectl logs` running while we keep exploring.
|
||||
It is showing us only one of them.
|
||||
|
||||
We'll see later how to address that shortcoming.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Resilience
|
||||
|
||||
- The *deployment* `pingpong` watches its *replica set*
|
||||
@@ -524,365 +517,7 @@ Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
- The pod is then killed, and `kubectl logs` exits
|
||||
|
||||
---
|
||||
???
|
||||
|
||||
## Viewing logs of multiple pods
|
||||
|
||||
- When we specify a deployment name, only one single pod's logs are shown
|
||||
|
||||
- We can view the logs of multiple pods by specifying a *selector*
|
||||
|
||||
- A selector is a logic expression using *labels*
|
||||
|
||||
- If we check the pods created by the deployment, they all have the label `app=pingpong`
|
||||
|
||||
(this is just a default label that gets added when using `kubectl create deployment`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the last line of log from all pods with the `app=pingpong` label:
|
||||
```bash
|
||||
kubectl logs -l app=pingpong --tail 1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Streaming logs of multiple pods
|
||||
|
||||
- Can we stream the logs of all our `pingpong` pods?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Combine `-l` and `-f` flags:
|
||||
```bash
|
||||
kubectl logs -l app=pingpong --tail 1 -f
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait seq=```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
*Note: combining `-l` and `-f` is only possible since Kubernetes 1.14!*
|
||||
|
||||
*Let's try to understand why ...*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Streaming logs of many pods
|
||||
|
||||
- Let's see what happens if we try to stream the logs for more than 5 pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Scale up our deployment:
|
||||
```bash
|
||||
kubectl scale deployment pingpong --replicas=8
|
||||
```
|
||||
|
||||
- Stream the logs:
|
||||
```bash
|
||||
kubectl logs -l app=pingpong --tail 1 -f
|
||||
```
|
||||
|
||||
<!-- ```wait error:``` -->
|
||||
|
||||
]
|
||||
|
||||
We see a message like the following one:
|
||||
```
|
||||
error: you are attempting to follow 8 log streams,
|
||||
but maximum allowed concurency is 5,
|
||||
use --max-log-requests to increase the limit
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why can't we stream the logs of many pods?
|
||||
|
||||
- `kubectl` opens one connection to the API server per pod
|
||||
|
||||
- For each pod, the API server opens one extra connection to the corresponding kubelet
|
||||
|
||||
- If there are 1000 pods in our deployment, that's 1000 inbound + 1000 outbound connections on the API server
|
||||
|
||||
- This could easily put a lot of stress on the API server
|
||||
|
||||
- Prior Kubernetes 1.14, it was decided to *not* allow multiple connections
|
||||
|
||||
- From Kubernetes 1.14, it is allowed, but limited to 5 connections
|
||||
|
||||
(this can be changed with `--max-log-requests`)
|
||||
|
||||
- For more details about the rationale, see
|
||||
[PR #67573](https://github.com/kubernetes/kubernetes/pull/67573)
|
||||
|
||||
---
|
||||
|
||||
## Shortcomings of `kubectl logs`
|
||||
|
||||
- We don't see which pod sent which log line
|
||||
|
||||
- If pods are restarted / replaced, the log stream stops
|
||||
|
||||
- If new pods are added, we don't see their logs
|
||||
|
||||
- To stream the logs of multiple pods, we need to write a selector
|
||||
|
||||
- There are external tools to address these shortcomings
|
||||
|
||||
(e.g.: [Stern](https://github.com/wercker/stern))
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl logs -l ... --tail N`
|
||||
|
||||
- If we run this with Kubernetes 1.12, the last command shows multiple lines
|
||||
|
||||
- This is a regression when `--tail` is used together with `-l`/`--selector`
|
||||
|
||||
- It always shows the last 10 lines of output for each container
|
||||
|
||||
(instead of the number of lines specified on the command line)
|
||||
|
||||
- The problem was fixed in Kubernetes 1.13
|
||||
|
||||
*See [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for details.*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Party tricks involving IP addresses
|
||||
|
||||
- It is possible to specify an IP address with less than 4 bytes
|
||||
|
||||
(example: `127.1`)
|
||||
|
||||
- Zeroes are then inserted in the middle
|
||||
|
||||
- As a result, `127.1` expands to `127.0.0.1`
|
||||
|
||||
- So we can `ping 127.1` to ping `localhost`!
|
||||
|
||||
(See [this blog post](https://ma.ttias.be/theres-more-than-one-way-to-write-an-ip-address/
|
||||
) for more details.)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## More party tricks with IP addresses
|
||||
|
||||
- We can also ping `1.1`
|
||||
|
||||
- `1.1` will expand to `1.0.0.1`
|
||||
|
||||
- This is one of the addresses of Cloudflare's
|
||||
[public DNS resolver](https://blog.cloudflare.com/announcing-1111/)
|
||||
|
||||
- This is a quick way to check connectivity
|
||||
|
||||
(if we can reach 1.1, we probably have internet access)
|
||||
|
||||
---
|
||||
|
||||
## Creating other kinds of resources
|
||||
|
||||
- Deployments are great for stateless web apps
|
||||
|
||||
(as well as workers that keep running forever)
|
||||
|
||||
- Jobs are great for "long" background work
|
||||
|
||||
("long" being at least minutes our hours)
|
||||
|
||||
- CronJobs are great to schedule Jobs at regular intervals
|
||||
|
||||
(just like the classic UNIX `cron` daemon with its `crontab` files)
|
||||
|
||||
- Pods are great for one-off execution that we don't care about
|
||||
|
||||
(because they don't get automatically restarted if something goes wrong)
|
||||
|
||||
---
|
||||
|
||||
## Creating a Job
|
||||
|
||||
- A Job will create a Pod
|
||||
|
||||
- If the Pod fails, the Job will create another one
|
||||
|
||||
- The Job will keep trying until:
|
||||
|
||||
- either a Pod succeeds,
|
||||
|
||||
- or we hit the *backoff limit* of the Job (default=6)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a Job that has a 50% chance of success:
|
||||
```bash
|
||||
kubectl create job flipcoin --image=alpine -- sh -c 'exit $(($RANDOM%2))'
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Our Job in action
|
||||
|
||||
- Our Job will create a Pod named `flipcoin-xxxxx`
|
||||
|
||||
- If the Pod succeeds, the Job stops
|
||||
|
||||
- If the Pod fails, the Job creates another Pod
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the status of the Pod(s) created by the Job:
|
||||
```bash
|
||||
kubectl get pods --selector=job-name=flipcoin
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## More advanced jobs
|
||||
|
||||
- We can specify a number of "completions" (default=1)
|
||||
|
||||
- This indicates how many times the Job must be executed
|
||||
|
||||
- We can specify the "parallelism" (default=1)
|
||||
|
||||
- This indicates how many Pods should be running in parallel
|
||||
|
||||
- These options cannot be specified with `kubectl create job`
|
||||
|
||||
(we have to write our own YAML manifest to use them)
|
||||
|
||||
---
|
||||
|
||||
## Scheduling periodic background work
|
||||
|
||||
- A Cron Job is a Job that will be executed at specific intervals
|
||||
|
||||
(the name comes from the traditional cronjobs executed by the UNIX crond)
|
||||
|
||||
- It requires a *schedule*, represented as five space-separated fields:
|
||||
|
||||
- minute [0,59]
|
||||
- hour [0,23]
|
||||
- day of the month [1,31]
|
||||
- month of the year [1,12]
|
||||
- day of the week ([0,6] with 0=Sunday)
|
||||
|
||||
- `*` means "all valid values"; `/N` means "every N"
|
||||
|
||||
- Example: `*/3 * * * *` means "every three minutes"
|
||||
|
||||
---
|
||||
|
||||
## Creating a Cron Job
|
||||
|
||||
- Let's create a simple job to be executed every three minutes
|
||||
|
||||
- Careful: make sure that the job terminates!
|
||||
|
||||
(The Cron Job will not hold if a previous job is still running)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the Cron Job:
|
||||
```bash
|
||||
kubectl create cronjob every3mins --schedule="*/3 * * * *" \
|
||||
--image=alpine -- sleep 10
|
||||
```
|
||||
|
||||
- Check the resource that was created:
|
||||
```bash
|
||||
kubectl get cronjobs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cron Jobs in action
|
||||
|
||||
- At the specified schedule, the Cron Job will create a Job
|
||||
|
||||
- The Job will create a Pod
|
||||
|
||||
- The Job will make sure that the Pod completes
|
||||
|
||||
(re-creating another one if it fails, for instance if its node fails)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the Jobs that are created:
|
||||
```bash
|
||||
kubectl get jobs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(It will take a few minutes before the first job is scheduled.)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What about `kubectl run` before v1.18?
|
||||
|
||||
- Creating a Deployment:
|
||||
|
||||
`kubectl run`
|
||||
|
||||
- Creating a Pod:
|
||||
|
||||
`kubectl run --restart=Never`
|
||||
|
||||
- Creating a Job:
|
||||
|
||||
`kubectl run --restart=OnFailure`
|
||||
|
||||
- Creating a Cron Job:
|
||||
|
||||
`kubectl run --restart=OnFailure --schedule=...`
|
||||
|
||||
*Avoid using these forms, as they are deprecated since Kubernetes 1.18!*
|
||||
|
||||
---
|
||||
|
||||
## Beyond `kubectl create`
|
||||
|
||||
- As hinted earlier, `kubectl create` doesn't always expose all options
|
||||
|
||||
- can't express parallelism or completions of Jobs
|
||||
|
||||
- can't express Pods with multiple containers
|
||||
|
||||
- can't express healthchecks, resource limits
|
||||
|
||||
- etc.
|
||||
|
||||
- `kubectl create` and `kubectl run` are *helpers* that generate YAML manifests
|
||||
|
||||
- If we write these manifests ourselves, we can use all features and options
|
||||
|
||||
- We'll see later how to do that!
|
||||
:EN:- Running pods and deployments
|
||||
:FR:- Créer un pod et un déploiement
|
||||
@@ -438,3 +438,13 @@ class: extra-details
|
||||
- They can also handle TLS certificates, URL rewriting ...
|
||||
|
||||
- They require an *Ingress Controller* to function
|
||||
|
||||
???
|
||||
|
||||
:EN:- Service discovery and load balancing
|
||||
:EN:- Accessing pods through services
|
||||
:EN:- Service types: ClusterIP, NodePort, LoadBalancer
|
||||
|
||||
:FR:- Exposer un service
|
||||
:FR:- Différents types de services : ClusterIP, NodePort, LoadBalancer
|
||||
:FR:- Utiliser CoreDNS pour la *service discovery*
|
||||
|
||||
@@ -578,3 +578,8 @@ $ curl -k https://10.96.0.1
|
||||
- Code running in pods can connect to services using their name
|
||||
|
||||
(e.g. https://kubernetes/...)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Getting started with kubectl
|
||||
:FR:- Se familiariser avec kubectl
|
||||
|
||||
@@ -145,3 +145,8 @@ class: extra-details
|
||||
- Some solutions can fill multiple roles
|
||||
|
||||
(e.g. kube-router can be set up to provide the pod network and/or network policies and/or replace kube-proxy)
|
||||
|
||||
???
|
||||
|
||||
:EN:- The Kubernetes network model
|
||||
:FR:- Le modèle réseau de Kubernetes
|
||||
|
||||
@@ -31,23 +31,17 @@
|
||||
|
||||
---
|
||||
|
||||
## Cloning some repos
|
||||
## Cloning the repository
|
||||
|
||||
- We will need two repositories:
|
||||
- We will need to clone the training repository
|
||||
|
||||
- the first one has the "DockerCoins" demo app
|
||||
- It has the DockerCoins demo app ...
|
||||
|
||||
- the second one has these slides, some scripts, more manifests ...
|
||||
- ... as well as these slides, some scripts, more manifests
|
||||
|
||||
.exercise[
|
||||
|
||||
- Clone the kubercoins repository on `node1`:
|
||||
```bash
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
|
||||
- Clone the container.training repository as well:
|
||||
- Clone the repository on `node1`:
|
||||
```bash
|
||||
git clone https://@@GITREPO@@
|
||||
```
|
||||
@@ -62,9 +56,9 @@ Without further ado, let's start this application!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Apply all the manifests from the kubercoins repository:
|
||||
- Apply the manifest for dockercoins:
|
||||
```bash
|
||||
kubectl apply -f kubercoins/
|
||||
kubectl apply -f ~/container.training/k8s/dockercoins.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -242,3 +236,8 @@ https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/wo
|
||||
|
||||
A drawing area should show up, and after a few seconds, a blue
|
||||
graph will appear.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Deploying a sample app with YAML manifests
|
||||
:FR:- Lancer une application de démo avec du YAML
|
||||
|
||||
@@ -8,45 +8,164 @@
|
||||
|
||||
- They are left untouched by Kustomize
|
||||
|
||||
- Kustomize lets us define *overlays* that extend or change the resource files
|
||||
- Kustomize lets us define *kustomizations*
|
||||
|
||||
- A *kustomization* is conceptually similar to a *layer*
|
||||
|
||||
- Technically, a *kustomization* is a file named `kustomization.yaml`
|
||||
|
||||
(or a directory containing that files + additional files)
|
||||
|
||||
---
|
||||
|
||||
## Differences with Helm
|
||||
## What's in a kustomization
|
||||
|
||||
- Helm charts use placeholders `{{ like.this }}`
|
||||
- A kustomization can do any combination of the following:
|
||||
|
||||
- Kustomize "bases" are standard Kubernetes YAML
|
||||
- include other kustomizations
|
||||
|
||||
- It is possible to use an existing set of YAML as a Kustomize base
|
||||
- include Kubernetes resources defined in YAML files
|
||||
|
||||
- As a result, writing a Helm chart is more work ...
|
||||
- patch Kubernetes resources (change values)
|
||||
|
||||
- ... But Helm charts are also more powerful; e.g. they can:
|
||||
- add labels or annotations to all resources
|
||||
|
||||
- use flags to conditionally include resources or blocks
|
||||
- specify ConfigMaps and Secrets from literal values or local files
|
||||
|
||||
- check if a given Kubernetes API group is supported
|
||||
|
||||
- [and much more](https://helm.sh/docs/chart_template_guide/)
|
||||
(... And a few more advanced features that we won't cover today!)
|
||||
|
||||
---
|
||||
|
||||
## Kustomize concepts
|
||||
## A simple kustomization
|
||||
|
||||
- Kustomize needs a `kustomization.yaml` file
|
||||
This features a Deployment, Service, and Ingress (in separate files),
|
||||
and a couple of patches (to change the number of replicas and the hostname
|
||||
used in the Ingress).
|
||||
|
||||
- That file can be a *base* or a *variant*
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
patchesStrategicMerge:
|
||||
- scale-deployment.yaml
|
||||
- ingress-hostname.yaml
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- service.yaml
|
||||
- ingress.yaml
|
||||
```
|
||||
|
||||
- If it's a *base*:
|
||||
On the next slide, let's see a more complex example ...
|
||||
|
||||
- it lists YAML resource files to use
|
||||
---
|
||||
|
||||
- If it's a *variant* (or *overlay*):
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
commonLabels:
|
||||
add-this-to-all-my-resources: please
|
||||
patchesStrategicMerge:
|
||||
- prod-scaling.yaml
|
||||
- prod-healthchecks.yaml
|
||||
bases:
|
||||
- api/
|
||||
- frontend/
|
||||
- db/
|
||||
- github.com/example/app?ref=tag-or-branch
|
||||
resources:
|
||||
- ingress.yaml
|
||||
- permissions.yaml
|
||||
configMapGenerator:
|
||||
- name: appconfig
|
||||
files:
|
||||
- global.conf
|
||||
- local.conf=prod.conf
|
||||
```
|
||||
|
||||
- it refers to (at least) one *base*
|
||||
---
|
||||
|
||||
- and some *patches*
|
||||
## Glossary
|
||||
|
||||
- A *base* is a kustomization that is referred to by other kustomizations
|
||||
|
||||
- An *overlay* is a kustomization that refers to other kustomizations
|
||||
|
||||
- A kustomization can be both a base and an overlay at the same time
|
||||
|
||||
(a kustomization can refer to another, which can refer to a third)
|
||||
|
||||
- A *patch* describes how to alter an existing resource
|
||||
|
||||
(e.g. to change the image in a Deployment; or scaling parameters; etc.)
|
||||
|
||||
- A *variant* is the final outcome of applying bases + overlays
|
||||
|
||||
(See the [kustomize glossary](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md) for more definitions!)
|
||||
|
||||
---
|
||||
|
||||
## What Kustomize *cannot* do
|
||||
|
||||
- By design, there are a number of things that Kustomize won't do
|
||||
|
||||
- For instance:
|
||||
|
||||
- using command-line arguments or environment variables to generate a variant
|
||||
|
||||
- overlays can only *add* resources, not *remove* them
|
||||
|
||||
- See the full list of [eschewed features](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/eschewedFeatures.md) for more details
|
||||
|
||||
---
|
||||
|
||||
## Kustomize workflows
|
||||
|
||||
- The Kustomize documentation proposes two different workflows
|
||||
|
||||
- *Bespoke configuration*
|
||||
|
||||
- base and overlays managed by the same team
|
||||
|
||||
- *Off-the-shelf configuration* (OTS)
|
||||
|
||||
- base and overlays managed by different teams
|
||||
|
||||
- base is regularly updated by "upstream" (e.g. a vendor)
|
||||
|
||||
- our overlays and patches should (hopefully!) apply cleanly
|
||||
|
||||
- we may regularly update the base, or use a remote base
|
||||
|
||||
---
|
||||
|
||||
## Remote bases
|
||||
|
||||
- Kustomize can fetch remote bases using Hashicorp go-getter library
|
||||
|
||||
- Examples:
|
||||
|
||||
github.com/jpetazzo/kubercoins (remote git repository)
|
||||
|
||||
github.com/jpetazzo/kubercoins?ref=kustomize (specific tag or branch)
|
||||
|
||||
https://releases.hello.io/k/1.0.zip (remote archive)
|
||||
|
||||
https://releases.hello.io/k/1.0.zip//some-subdir (subdirectory in archive)
|
||||
|
||||
- See [hashicorp/go-getter URL format docs](https://github.com/hashicorp/go-getter#url-format) for more examples
|
||||
|
||||
---
|
||||
|
||||
## Managing `kustomization.yaml`
|
||||
|
||||
- There are many ways to manage `kustomization.yaml` files, including:
|
||||
|
||||
- web wizards like [Replicated Ship](https://www.replicated.com/ship/)
|
||||
|
||||
- the `kustomize` CLI
|
||||
|
||||
- opening the file with our favorite text editor
|
||||
|
||||
- Let's see these in action!
|
||||
|
||||
---
|
||||
|
||||
@@ -199,3 +318,63 @@
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
|
||||
---
|
||||
|
||||
## Working with the `kustomize` CLI
|
||||
|
||||
- This is another way to get started
|
||||
|
||||
- General workflow:
|
||||
|
||||
`kustomize create` to generate an empty `kustomization.yaml` file
|
||||
|
||||
`kustomize edit add resource` to add Kubernetes YAML files to it
|
||||
|
||||
`kustomize edit add patch` to add patches to said resources
|
||||
|
||||
`kustomize build | kubectl apply -f-` or `kubectl apply -k .`
|
||||
|
||||
---
|
||||
|
||||
## `kubectl apply -k`
|
||||
|
||||
- Kustomize has been integrated in `kubectl`
|
||||
|
||||
- The `kustomize` tool is still needed if we want to use `create`, `edit`, ...
|
||||
|
||||
- Also, warning: `kubectl apply -k` is a slightly older version than `kustomize`!
|
||||
|
||||
- In recent versions of `kustomize`, bases can be listed in `resources`
|
||||
|
||||
(and `kustomize edit add base` will add its arguments to `resources`)
|
||||
|
||||
- `kubectl apply -k` requires bases to be listed in `bases`
|
||||
|
||||
(so after using `kustomize edit add base`, we need to fix `kustomization.yaml`)
|
||||
|
||||
---
|
||||
|
||||
## Differences with Helm
|
||||
|
||||
- Helm charts use placeholders `{{ like.this }}`
|
||||
|
||||
- Kustomize "bases" are standard Kubernetes YAML
|
||||
|
||||
- It is possible to use an existing set of YAML as a Kustomize base
|
||||
|
||||
- As a result, writing a Helm chart is more work ...
|
||||
|
||||
- ... But Helm charts are also more powerful; e.g. they can:
|
||||
|
||||
- use flags to conditionally include resources or blocks
|
||||
|
||||
- check if a given Kubernetes API group is supported
|
||||
|
||||
- [and much more](https://helm.sh/docs/chart_template_guide/)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Packaging and running apps with Kustomize
|
||||
:FR:- *Packaging* d'applications avec Kustomize
|
||||
|
||||
|
||||
202
slides/k8s/labels-annotations.md
Normal file
202
slides/k8s/labels-annotations.md
Normal file
@@ -0,0 +1,202 @@
|
||||
# Labels and annotations
|
||||
|
||||
- Most Kubernetes resources can have *labels* and *annotations*
|
||||
|
||||
- Both labels and annotations are arbitrary strings
|
||||
|
||||
(with some limitations that we'll explain in a minute)
|
||||
|
||||
- Both labels and annotations can be added, removed, changed, dynamically
|
||||
|
||||
- This can be done with:
|
||||
|
||||
- the `kubectl edit` command
|
||||
|
||||
- the `kubectl label` and `kubectl annotate`
|
||||
|
||||
- ... many other ways! (`kubectl apply -f`, `kubectl patch`, ...)
|
||||
|
||||
---
|
||||
|
||||
## Viewing labels and annotations
|
||||
|
||||
- Let's see what we get when we create a Deployment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a Deployment:
|
||||
```bash
|
||||
kubectl create deployment clock --image=jpetazzo/clock
|
||||
```
|
||||
|
||||
- Look at its annotations and labels:
|
||||
```bash
|
||||
kubectl describe deployment clock
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
So, what do we get?
|
||||
|
||||
---
|
||||
|
||||
## Labels and annotations for our Deployment
|
||||
|
||||
- We see one label:
|
||||
```
|
||||
Labels: app=clock
|
||||
```
|
||||
|
||||
- This is added by `kubectl create deployment`
|
||||
|
||||
- And one annotation:
|
||||
```
|
||||
Annotations: deployment.kubernetes.io/revision: 1
|
||||
```
|
||||
|
||||
- This is to keep track of successive versions when doing rolling updates
|
||||
|
||||
---
|
||||
|
||||
## And for the related Pod?
|
||||
|
||||
- Let's look up the Pod that was created and check it too
|
||||
|
||||
.exercise[
|
||||
|
||||
- Find the name of the Pod:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
- Display its information:
|
||||
```bash
|
||||
kubectl describe pod clock-xxxxxxxxxx-yyyyy
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
So, what do we get?
|
||||
|
||||
---
|
||||
|
||||
## Labels and annotations for our Pod
|
||||
|
||||
- We see two labels:
|
||||
```
|
||||
Labels: app=clock
|
||||
pod-template-hash=xxxxxxxxxx
|
||||
```
|
||||
|
||||
- `app=clock` comes from `kubectl create deployment` too
|
||||
|
||||
- `pod-template-hash` was assigned by the Replica Set
|
||||
|
||||
(when we will do rolling updates, each set of Pods will have a different hash)
|
||||
|
||||
- There are no annotations:
|
||||
```
|
||||
Annotations: <none>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Selectors
|
||||
|
||||
- A *selector* is an expression matching labels
|
||||
|
||||
- It will restrict a command to the objects matching *at least* all these labels
|
||||
|
||||
.exercise[
|
||||
|
||||
- List all the pods with at least `app=clock`:
|
||||
```bash
|
||||
kubectl get pods --selector=app=clock
|
||||
```
|
||||
|
||||
- List all the pods with a label `app`, regardless of its value:
|
||||
```bash
|
||||
kubectl get pods --selector=app
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Settings labels and annotations
|
||||
|
||||
- The easiest method is to use `kubectl label` and `kubectl annotate`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Set a label on the `clock` Deployment:
|
||||
```bash
|
||||
kubectl label deployment clock color=blue
|
||||
```
|
||||
|
||||
- Check it out:
|
||||
```bash
|
||||
kubectl describe deployment clock
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## More on selectors
|
||||
|
||||
- If a selector has multiple labels, it means "match at least these labels"
|
||||
|
||||
Example: `--selector=app=frontend,release=prod`
|
||||
|
||||
- `--selector` can be abbreviated as `-l` (for **l**abels)
|
||||
|
||||
We can also use negative selectors
|
||||
|
||||
Example: `--selector=app!=clock`
|
||||
|
||||
- Selectors can be used with most `kubectl` commands
|
||||
|
||||
Examples: `kubectl delete`, `kubectl label`, ...
|
||||
|
||||
---
|
||||
|
||||
## Other ways to view labels
|
||||
|
||||
- We can use the `--show-labels` flag with `kubectl get`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Show labels for a bunch of objects:
|
||||
```bash
|
||||
kubectl get --show-labels po,rs,deploy,svc,no
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Differences between labels and annotations
|
||||
|
||||
- The *key* for both labels and annotations:
|
||||
|
||||
- must start and end with a letter or digit
|
||||
|
||||
- can also have `.` `-` `_` (but not in first or last position)
|
||||
|
||||
- can be up to 63 characters, or 253 + `/` + 63
|
||||
|
||||
- Label *values* are up to 63 characters, with the same restrictions
|
||||
|
||||
- Annotations *values* can have arbitrary characeters (yes, even binary)
|
||||
|
||||
- Maximum length isn't defined
|
||||
|
||||
(dozens of kilobytes is fine, hundreds maybe not so much)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Labels and annotations
|
||||
:FR:- *Labels* et annotations
|
||||
@@ -246,3 +246,10 @@
|
||||
(when we can't or won't dedicate a whole disk to a volume)
|
||||
|
||||
- It's possible to mix both (using distinct Storage Classes)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Static vs dynamic volume provisioning
|
||||
:EN:- Example: local persistent volume provisioner
|
||||
:FR:- Création statique ou dynamique de volumes
|
||||
:FR:- Exemple : création de volumes locaux
|
||||
|
||||
@@ -193,3 +193,8 @@ class: extra-details
|
||||
]
|
||||
|
||||
We can now utilize the cluster exactly as if we're logged into a node, except that it's remote.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Working with remote Kubernetes clusters
|
||||
:FR:- Travailler avec des *clusters* distants
|
||||
|
||||
@@ -145,3 +145,8 @@ But this is outside of the scope of this chapter.
|
||||
The YAML file that we used creates all the resources in the
|
||||
`default` namespace, for simplicity. In a real scenario, you will
|
||||
create the resources in the `kube-system` namespace or in a dedicated namespace.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Centralizing logs
|
||||
:FR:- Centraliser les logs
|
||||
|
||||
@@ -45,7 +45,7 @@ Exactly what we need!
|
||||
|
||||
---
|
||||
|
||||
## Installing Stern
|
||||
## Checking if Stern is installed
|
||||
|
||||
- Run `stern` (without arguments) to check if it's installed:
|
||||
|
||||
@@ -57,7 +57,17 @@ Exactly what we need!
|
||||
stern pod-query [flags]
|
||||
```
|
||||
|
||||
- If it is not installed, the easiest method is to download a [binary release](https://github.com/wercker/stern/releases)
|
||||
- If it's missing, let's see how to install it
|
||||
|
||||
---
|
||||
|
||||
## Installing Stern
|
||||
|
||||
- Stern is written in Go, and Go programs are usually shipped as a single binary
|
||||
|
||||
- We just need to download that binary and put it in our `PATH`!
|
||||
|
||||
- Binary releases are available [here](https://github.com/wercker/stern/releases) on GitHub
|
||||
|
||||
- The following commands will install Stern on a Linux Intel 64 bit machine:
|
||||
```bash
|
||||
@@ -66,7 +76,7 @@ Exactly what we need!
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
```
|
||||
|
||||
- On OS X, just `brew install stern`
|
||||
- On macOS, we can also `brew install stern` or `port install stern`
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
@@ -149,3 +159,8 @@ Exactly what we need!
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
:EN:- Viewing pod logs from the CLI
|
||||
:FR:- Consulter les logs des pods depuis la CLI
|
||||
|
||||
@@ -80,3 +80,8 @@ If it shows our nodes and their CPU and memory load, we're good!
|
||||
- kube-resource-report can generate HTML reports
|
||||
|
||||
(https://github.com/hjacobs/kube-resource-report)
|
||||
|
||||
???
|
||||
|
||||
:EN:- The *core metrics pipeline*
|
||||
:FR:- Le *core metrics pipeline*
|
||||
|
||||
@@ -532,3 +532,8 @@ Sometimes it works, sometimes it doesn't. Why?
|
||||
- We want to automate all these steps
|
||||
|
||||
- We want something that works on all networks
|
||||
|
||||
???
|
||||
|
||||
:EN:- Connecting nodes ands pods
|
||||
:FR:- Interconnecter les nœuds et les pods
|
||||
|
||||
@@ -365,3 +365,7 @@ Note: we could have used `--namespace=default` for the same result.
|
||||
|
||||
- Pro-tip: install it on your machine during the next break!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Organizing resources with Namespaces
|
||||
:FR:- Organiser les ressources avec des *namespaces*
|
||||
|
||||
@@ -446,3 +446,8 @@ troubleshoot easily, without having to poke holes in our firewall.
|
||||
- a [very good talk about network policies](https://www.youtube.com/watch?list=PLj6h78yzYM2P-3-xqvmWaZbbI1sW-ulZb&v=3gGpMmYeEO8) at KubeCon North America 2017
|
||||
|
||||
- a repository of [ready-to-use recipes](https://github.com/ahmetb/kubernetes-network-policy-recipes) for network policies
|
||||
|
||||
???
|
||||
|
||||
:EN:- Isolating workloads with Network Policies
|
||||
:FR:- Isolation réseau avec les *network policies*
|
||||
|
||||
@@ -377,3 +377,8 @@ class: extra-details
|
||||
- It should now say "Signature Verified"
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
:EN:- Authenticating with OIDC
|
||||
:FR:- S'identifier avec OIDC
|
||||
|
||||
@@ -1,3 +1,35 @@
|
||||
# Designing an operator
|
||||
|
||||
- Once we understand CRDs and operators, it's tempting to use them everywhere
|
||||
|
||||
- Yes, we can do (almost) everything with operators ...
|
||||
|
||||
- ... But *should we?*
|
||||
|
||||
- Very often, the answer is **“no!”**
|
||||
|
||||
- Operators are powerful, but significantly more complex than other solutions
|
||||
|
||||
---
|
||||
|
||||
## When should we (not) use operators?
|
||||
|
||||
- Operators are great if our app needs to react to cluster events
|
||||
|
||||
(nodes or pods going down, and requiring extensive reconfiguration)
|
||||
|
||||
- Operators *might* be helpful to encapsulate complexity
|
||||
|
||||
(manipulate one single custom resource for an entire stack)
|
||||
|
||||
- Operators are probably overkill if a Helm chart would suffice
|
||||
|
||||
- That being said, if we really want to write an operator ...
|
||||
|
||||
Read on!
|
||||
|
||||
---
|
||||
|
||||
## What does it take to write an operator?
|
||||
|
||||
- Writing a quick-and-dirty operator, or a POC/MVP, is easy
|
||||
@@ -356,3 +388,8 @@ class: extra-details
|
||||
(this is used e.g. by the metrics server)
|
||||
|
||||
- [This documentation page](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#choosing-a-method-for-adding-custom-resources) compares the features of CRDs and API aggregation
|
||||
|
||||
???
|
||||
|
||||
:EN:- Guidelines to design our own operators
|
||||
:FR:- Comment concevoir nos propres opérateurs
|
||||
|
||||
@@ -93,11 +93,11 @@ Examples:
|
||||
|
||||
- Representing and managing external resources
|
||||
|
||||
(Example: [AWS Service Operator](https://operatorhub.io/operator/alpha/aws-service-operator.v0.0.1))
|
||||
(Example: [AWS S3 Operator](https://operatorhub.io/operator/awss3-operator-registry))
|
||||
|
||||
- Managing complex cluster add-ons
|
||||
|
||||
(Example: [Istio operator](https://operatorhub.io/operator/beta/istio-operator.0.1.6))
|
||||
(Example: [Istio operator](https://operatorhub.io/operator/istio))
|
||||
|
||||
- Deploying and managing our applications' lifecycles
|
||||
|
||||
@@ -615,3 +615,11 @@ After the Kibana UI loads, we need to click around a bit
|
||||
*Operators can be very powerful.
|
||||
<br/>
|
||||
But we need to know exactly the scenarios that they can handle.*
|
||||
|
||||
???
|
||||
|
||||
:EN:- Kubernetes operators
|
||||
:EN:- Deploying ElasticSearch with ECK
|
||||
|
||||
:FR:- Les opérateurs
|
||||
:FR:- Déployer ElasticSearch avec ECK
|
||||
|
||||
@@ -162,3 +162,8 @@ Yes, this may take a little while to update. *(Narrator: it was DNS.)*
|
||||
--
|
||||
|
||||
*Alright, we're back to where we started, when we were running on a single node!*
|
||||
|
||||
???
|
||||
|
||||
:EN:- Running our demo app on Kubernetes
|
||||
:FR:- Faire tourner l'application de démo sur Kubernetes
|
||||
|
||||
@@ -180,3 +180,8 @@ class: extra-details
|
||||
]
|
||||
|
||||
As always, the [documentation](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/) has useful extra information and pointers.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Owners and dependents
|
||||
:FR:- Liens de parenté entre les ressources
|
||||
|
||||
@@ -287,7 +287,7 @@
|
||||
|
||||
- Try to create a Deployment:
|
||||
```bash
|
||||
kubectl run testpsp2 --image=nginx
|
||||
kubectl create deployment testpsp2 --image=nginx
|
||||
```
|
||||
|
||||
- Look at existing resources:
|
||||
@@ -350,7 +350,7 @@ We can get hints at what's happening by looking at the ReplicaSet and Events.
|
||||
|
||||
- Create a Deployment as well:
|
||||
```bash
|
||||
kubectl run testpsp4 --image=nginx
|
||||
kubectl create deployment testpsp4 --image=nginx
|
||||
```
|
||||
|
||||
- Confirm that the Deployment is *not* creating any Pods:
|
||||
@@ -531,3 +531,8 @@ class: extra-details
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
:EN:- Preventing privilege escalation with Pod Security Policies
|
||||
:FR:- Limiter les droits des conteneurs avec les *Pod Security Policies*
|
||||
|
||||
@@ -74,29 +74,78 @@
|
||||
|
||||
---
|
||||
|
||||
## Portworx requirements
|
||||
## Installing Portworx
|
||||
|
||||
- Kubernetes cluster ✔️
|
||||
- Portworx installation is relatively simple
|
||||
|
||||
- Optional key/value store (etcd or Consul) ❌
|
||||
- ... But we made it *even simpler!*
|
||||
|
||||
- At least one available block device ❌
|
||||
- We are going to use a YAML manifest that will take care of everything
|
||||
|
||||
- Warning: this manifest is customized for a very specific setup
|
||||
|
||||
(like the VMs that we provide during workshops and training sessions)
|
||||
|
||||
- It will probably *not work* If you are using a different setup
|
||||
|
||||
(like Docker Desktop, k3s, MicroK8S, Minikube ...)
|
||||
|
||||
---
|
||||
|
||||
## The key-value store
|
||||
## The simplified Portworx installer
|
||||
|
||||
- In the current version of Portworx (1.4) it is recommended to use etcd or Consul
|
||||
- The Portworx installation will take a few minutes
|
||||
|
||||
- But Portworx also has beta support for an embedded key/value store
|
||||
- Let's start it, then we'll explain what happens behind the scenes
|
||||
|
||||
- For simplicity, we are going to use the latter option
|
||||
.exercise[
|
||||
|
||||
(but if we have deployed Consul or etcd, we can use that, too)
|
||||
- Install Portworx:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/portworx.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
<!-- ##VERSION ## -->
|
||||
|
||||
*Note: this was tested with Kubernetes 1.18. Newer versions may or may not work.*
|
||||
|
||||
---
|
||||
|
||||
## One available block device
|
||||
class: extra-details
|
||||
|
||||
## What's in this YAML manifest?
|
||||
|
||||
- Portworx installation itself, pre-configured for our setup
|
||||
|
||||
- A default *Storage Class* using Portworx
|
||||
|
||||
- A *Daemon Set* to create loop devices on each node of the cluster
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Portworx installation
|
||||
|
||||
- The official way to install Portworx is to use [PX-Central](https://central.portworx.com/)
|
||||
|
||||
(this requires a free account)
|
||||
|
||||
- PX-Central will ask us a few questions about our cluster
|
||||
|
||||
(Kubernetes version, on-prem/cloud deployment, etc.)
|
||||
|
||||
- Using our answers, it will generate a YAML manifest that we can use
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Portworx storage configuration
|
||||
|
||||
- Portworx needs at least one *block device*
|
||||
|
||||
- Block device = disk or partition on a disk
|
||||
|
||||
@@ -112,71 +161,41 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Setting up a loop device
|
||||
|
||||
- We are going to create a 10 GB (empty) file on each node
|
||||
- Our `portworx.yaml` manifest includes a *Daemon Set* that will:
|
||||
|
||||
- Then make a loop device from it, to be used by Portworx
|
||||
- create a 10 GB (empty) file on each node
|
||||
|
||||
.exercise[
|
||||
- load the `loop` module (if it's not already loaded)
|
||||
|
||||
- Create a 10 GB file on each node:
|
||||
```bash
|
||||
for N in $(seq 1 4); do ssh node$N sudo truncate --size 10G /portworx.blk; done
|
||||
```
|
||||
(If SSH asks to confirm host keys, enter `yes` each time.)
|
||||
- associate a loop device with the 10 GB file
|
||||
|
||||
- Associate the file to a loop device on each node:
|
||||
```bash
|
||||
for N in $(seq 1 4); do ssh node$N sudo losetup /dev/loop4 /portworx.blk; done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Installing Portworx
|
||||
|
||||
- To install Portworx, we need to go to https://install.portworx.com/
|
||||
|
||||
- This website will ask us a bunch of questions about our cluster
|
||||
|
||||
- Then, it will generate a YAML file that we should apply to our cluster
|
||||
|
||||
--
|
||||
|
||||
- Or, we can just apply that YAML file directly (it's in `k8s/portworx.yaml`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install Portworx:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/portworx.yaml
|
||||
```
|
||||
|
||||
]
|
||||
- After these steps, we have a block device that Portworx can use
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Generating a custom YAML file
|
||||
## Implementation details
|
||||
|
||||
If you want to generate a YAML file tailored to your own needs, the easiest
|
||||
way is to use https://install.portworx.com/.
|
||||
- The file is `/portworx.blk`
|
||||
|
||||
FYI, this is how we obtained the YAML file used earlier:
|
||||
```
|
||||
KBVER=$(kubectl version -o json | jq -r .serverVersion.gitVersion)
|
||||
BLKDEV=/dev/loop4
|
||||
curl https://install.portworx.com/1.4/?kbver=$KBVER&b=true&s=$BLKDEV&c=px-workshop&stork=true&lh=true
|
||||
```
|
||||
If you want to use an external key/value store, add one of the following:
|
||||
```
|
||||
&k=etcd://`XXX`:2379
|
||||
&k=consul://`XXX`:8500
|
||||
```
|
||||
... where `XXX` is the name or address of your etcd or Consul server.
|
||||
(it is a [sparse file](https://en.wikipedia.org/wiki/Sparse_file) created with `truncate`)
|
||||
|
||||
- The loop device is `/dev/loop4`
|
||||
|
||||
- This can be verified by running `sudo losetup`
|
||||
|
||||
- The *Daemon Set* uses a privileged *Init Container*
|
||||
|
||||
- We can check the logs of that container with:
|
||||
```bash
|
||||
kubectl logs --selector=app=setup-loop4-for-portworx \
|
||||
-c setup-loop4-for-portworx
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -276,11 +295,9 @@ parameters:
|
||||
priority_io: "high"
|
||||
```
|
||||
|
||||
- It says "use Portworx to create volumes"
|
||||
- It says "use Portworx to create volumes and keep 2 replicas of these volumes"
|
||||
|
||||
- It tells Portworx to "keep 2 replicas of these volumes"
|
||||
|
||||
- It marks the Storage Class as being the default one
|
||||
- The annotation makes this Storage Class the default one
|
||||
|
||||
---
|
||||
|
||||
@@ -323,7 +340,10 @@ spec:
|
||||
schedulerName: stork
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:11
|
||||
image: postgres:12
|
||||
env:
|
||||
- name: POSTGRES_HOST_AUTH_METHOD
|
||||
value: trust
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
@@ -401,14 +421,14 @@ autopilot prompt detection expects $ or # at the beginning of the line.
|
||||
|
||||
- Populate it with `pgbench`:
|
||||
```bash
|
||||
pgbench -i -s 10 demo
|
||||
pgbench -i demo
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- The `-i` flag means "create tables"
|
||||
|
||||
- The `-s 10` flag means "create 10 x 100,000 rows"
|
||||
- If you want more data in the test tables, add e.g. `-s 10` (to get 10x more rows)
|
||||
|
||||
---
|
||||
|
||||
@@ -428,11 +448,55 @@ autopilot prompt detection expects $ or # at the beginning of the line.
|
||||
psql demo -c "select count(*) from pgbench_accounts"
|
||||
```
|
||||
|
||||
<!-- ```key ^D``` -->
|
||||
- Check that `pgbench_history` is currently empty:
|
||||
```bash
|
||||
psql demo -c "select count(*) from pgbench_history"
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(We should see a count of 1,000,000 rows.)
|
||||
---
|
||||
|
||||
## Testing the load generator
|
||||
|
||||
- Let's use `pgbench` to generate a few transactions
|
||||
|
||||
.exercise[
|
||||
|
||||
- Run `pgbench` for 10 seconds, reporting progress every second:
|
||||
```bash
|
||||
pgbench -P 1 -T 10 demo
|
||||
```
|
||||
|
||||
- Check the size of the history table now:
|
||||
```bash
|
||||
psql demo -c "select count(*) from pgbench_history"
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: on small cloud instances, a typical speed is about 100 transactions/second.
|
||||
|
||||
---
|
||||
|
||||
## Generating transactions
|
||||
|
||||
- Now let's use `pgbench` to generate more transactions
|
||||
|
||||
- While it's running, we will disrupt the database server
|
||||
|
||||
.exercise[
|
||||
|
||||
- Run `pgbench` for 10 minutes, reporting progress every second:
|
||||
```bash
|
||||
pgbench -P 1 -T 600 demo
|
||||
```
|
||||
|
||||
- You can use a longer time period if you need more time to run the next steps
|
||||
|
||||
<!-- ```tmux split-pane -h``` -->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -522,15 +586,18 @@ By "disrupt" we mean: "disconnect it from the network".
|
||||
```key ^J```
|
||||
-->
|
||||
|
||||
- Check the number of rows in the `pgbench_accounts` table:
|
||||
- Check how many transactions are now in the `pgbench_history` table:
|
||||
```bash
|
||||
psql demo -c "select count(*) from pgbench_accounts"
|
||||
psql demo -c "select count(*) from pgbench_history"
|
||||
```
|
||||
|
||||
<!-- ```key ^D``` -->
|
||||
|
||||
]
|
||||
|
||||
If the 10-second test that we ran earlier gave e.g. 80 transactions per second,
|
||||
and we failed the node after 30 seconds, we should have about 2400 row in that table.
|
||||
|
||||
---
|
||||
|
||||
## Double-check that the pod has really moved
|
||||
@@ -598,7 +665,7 @@ class: extra-details
|
||||
|
||||
- If we need to see what's going on with Portworx:
|
||||
```
|
||||
PXPOD=$(kubectl -n kube-system get pod -l name=portworx -o json |
|
||||
PXPOD=$(kubectl -n kube-system get pod -l name=portworx -o json |
|
||||
jq -r .items[0].metadata.name)
|
||||
kubectl -n kube-system exec $PXPOD -- /opt/pwx/bin/pxctl status
|
||||
```
|
||||
@@ -678,3 +745,11 @@ were inspired by [Portworx examples on Katacoda](https://katacoda.com/portworx/s
|
||||
- [HA PostgreSQL on Kubernetes with Portworx](https://www.katacoda.com/portworx/scenarios/px-k8s-postgres-all-in-one)
|
||||
|
||||
(with adaptations to use a Stateful Set and simplify PostgreSQL's setup)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Using highly available persistent volumes
|
||||
:EN:- Example: deploying a database that can withstand node outages
|
||||
|
||||
:FR:- Utilisation de volumes à haute disponibilité
|
||||
:FR:- Exemple : déployer une base de données survivant à la défaillance d'un nœud
|
||||
|
||||
@@ -562,3 +562,8 @@ class: extra-details
|
||||
Don't panic if you don't know these tools!
|
||||
|
||||
...But make sure at least one person in your team is on it 💯
|
||||
|
||||
???
|
||||
|
||||
:EN:- Collecting metrics with Prometheus
|
||||
:FR:- Collecter des métriques avec Prometheus
|
||||
|
||||
@@ -536,3 +536,15 @@ services.nodeports 0 0
|
||||
- [static demo](https://hjacobs.github.io/kube-resource-report/sample-report/output/index.html)
|
||||
|
|
||||
[live demo](https://kube-resource-report.demo.j-serv.de/applications.html)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Setting compute resource limits
|
||||
:EN:- Defining default policies for resource usage
|
||||
:EN:- Managing cluster allocation and quotas
|
||||
:EN:- Resource management in practice
|
||||
|
||||
:FR:- Allouer et limiter les ressources des conteneurs
|
||||
:FR:- Définir des ressources par défaut
|
||||
:FR:- Gérer les quotas de ressources au niveau du cluster
|
||||
:FR:- Conseils pratiques
|
||||
|
||||
@@ -437,3 +437,12 @@ class: extra-details
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
:EN:- Rolling updates
|
||||
:EN:- Rolling back a bad deployment
|
||||
|
||||
:FR:- Mettre à jour un déploiement
|
||||
:FR:- Concept de *rolling update* et *rollback*
|
||||
:FR:- Paramétrer la vitesse de déploiement
|
||||
|
||||
@@ -200,3 +200,8 @@ Now we can access the IP addresses of our services through `$HASHER` and `$RNG`.
|
||||
- `rng` is not (it should take about 700 milliseconds if there are 10 workers)
|
||||
|
||||
- Something is wrong with `rng`, but ... what?
|
||||
|
||||
???
|
||||
|
||||
:EN:- Scaling up our demo app
|
||||
:FR:- *Scale up* de l'application de démo
|
||||
|
||||
145
slides/k8s/setup-devel.md
Normal file
145
slides/k8s/setup-devel.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# Running a local development cluster
|
||||
|
||||
- Let's review some options to run Kubernetes locally
|
||||
|
||||
- There is no "best option", it depends what you value:
|
||||
|
||||
- ability to run on all platforms (Linux, Mac, Windows, other?)
|
||||
|
||||
- ability to run clusters with multiple nodes
|
||||
|
||||
- ability to run multiple clusters side by side
|
||||
|
||||
- ability to run recent (or even, unreleased) versions of Kubernetes
|
||||
|
||||
- availability of plugins
|
||||
|
||||
- etc.
|
||||
|
||||
---
|
||||
|
||||
## Docker Desktop
|
||||
|
||||
- Available on Mac and Windows
|
||||
|
||||
- Gives you one cluster with one node
|
||||
|
||||
- Rather old version of Kubernetes
|
||||
|
||||
- Very easy to use if you are already using Docker Desktop:
|
||||
|
||||
go to Docker Desktop preferences and enable Kubernetes
|
||||
|
||||
- Ideal for Docker users who need good integration between both platforms
|
||||
|
||||
---
|
||||
|
||||
## [k3d](https://k3d.io/)
|
||||
|
||||
- Based on [K3s](https://k3s.io/) by Rancher Labs
|
||||
|
||||
- Requires Docker
|
||||
|
||||
- Runs Kubernetes nodes in Docker containers
|
||||
|
||||
- Can deploy multiple clusters, with multiple nodes, and multiple master nodes
|
||||
|
||||
- As of June 2020, two versions co-exist: stable (1.7) and beta (3.0)
|
||||
|
||||
- They have different syntax and options, this can be confusing
|
||||
|
||||
(but don't let that stop you!)
|
||||
|
||||
---
|
||||
|
||||
## k3d in action
|
||||
|
||||
- Get `k3d` beta 3 binary on https://github.com/rancher/k3d/releases
|
||||
|
||||
- Create a simple cluster:
|
||||
```bash
|
||||
k3d create cluster petitcluster --update-kubeconfig
|
||||
```
|
||||
|
||||
- Use it:
|
||||
```bash
|
||||
kubectl config use-context k3d-petitcluster
|
||||
```
|
||||
|
||||
- Create a more complex cluster with a custom version:
|
||||
```bash
|
||||
k3d create cluster groscluster --update-kubeconfig \
|
||||
--image rancher/k3s:v1.18.3-k3s1 --masters 3 --workers 5 --api-port 6444
|
||||
```
|
||||
|
||||
(note: API port seems to be necessary when running multiple clusters)
|
||||
|
||||
---
|
||||
|
||||
## [KinD](https://kind.sigs.k8s.io/)
|
||||
|
||||
- Kubernetes-in-Docker
|
||||
|
||||
- Requires Docker (obviously!)
|
||||
|
||||
- Deploying a single node cluster using the latest version is simple:
|
||||
```bash
|
||||
kind create cluster
|
||||
```
|
||||
|
||||
- More advanced scenarios require writing a short [config file](https://kind.sigs.k8s.io/docs/user/quick-start#configuring-your-kind-cluster)
|
||||
|
||||
(to define multiple nodes, multiple master nodes, set Kubernetes versions ...)
|
||||
|
||||
- Can deploy multiple clusters
|
||||
|
||||
---
|
||||
|
||||
## [Minikube](https://minikube.sigs.k8s.io/docs/)
|
||||
|
||||
- The "legacy" option!
|
||||
|
||||
(note: this is not a bad thing, it means that it's very stable, has lots of plugins, etc.)
|
||||
|
||||
- Supports many [drivers](https://minikube.sigs.k8s.io/docs/drivers/)
|
||||
|
||||
(HyperKit, Hyper-V, KVM, VirtualBox, but also Docker and many others)
|
||||
|
||||
- Can deploy a single cluster; recent versions can deploy multiple nodes
|
||||
|
||||
- Great option if you want a "Kubernetes first" experience
|
||||
|
||||
(i.e. if you don't already have Docker and/or don't want/need it)
|
||||
|
||||
---
|
||||
|
||||
## [MicroK8s](https://microk8s.io/)
|
||||
|
||||
- Available on Linux, and since recently, on Mac and Windows as well
|
||||
|
||||
- The Linux version is installed through Snap
|
||||
|
||||
(which is pre-installed on all recent versions of Ubuntu)
|
||||
|
||||
- Also supports clustering (as in, multiple machines running MicroK8s)
|
||||
|
||||
- DNS is not enabled by default; enable it with `microk8s enable dns`
|
||||
|
||||
---
|
||||
|
||||
## VM with custom install
|
||||
|
||||
- Choose your own adventure!
|
||||
|
||||
- Pick any Linux distribution!
|
||||
|
||||
- Build your cluster from scratch or use a Kubernetes installer!
|
||||
|
||||
- Discover exotic CNI plugins and container runtimes!
|
||||
|
||||
- The only limit is yourself, and the time you are willing to sink in!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Kubernetes options for local development
|
||||
:FR:- Installation de Kubernetes pour travailler en local
|
||||
@@ -1,94 +0,0 @@
|
||||
# Setting up Kubernetes
|
||||
|
||||
- How did we set up these Kubernetes clusters that we're using?
|
||||
|
||||
--
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
- We used `kubeadm` on freshly installed VM instances running Ubuntu LTS
|
||||
|
||||
1. Install Docker
|
||||
|
||||
2. Install Kubernetes packages
|
||||
|
||||
3. Run `kubeadm init` on the first node (it deploys the control plane on that node)
|
||||
|
||||
4. Set up Weave (the overlay network)
|
||||
<br/>
|
||||
(that step is just one `kubectl apply` command; discussed later)
|
||||
|
||||
5. Run `kubeadm join` on the other nodes (with the token produced by `kubeadm init`)
|
||||
|
||||
6. Copy the configuration file generated by `kubeadm init`
|
||||
|
||||
- Check the [prepare VMs README](https://@@GITREPO@@/blob/master/prepare-vms/README.md) for more details
|
||||
|
||||
---
|
||||
|
||||
## `kubeadm` drawbacks
|
||||
|
||||
- Doesn't set up Docker or any other container engine
|
||||
|
||||
- Doesn't set up the overlay network
|
||||
|
||||
- Doesn't set up multi-master (no high availability)
|
||||
|
||||
--
|
||||
|
||||
(At least ... not yet! Though it's [experimental in 1.12](https://kubernetes.io/docs/setup/independent/high-availability/).)
|
||||
|
||||
--
|
||||
|
||||
- "It's still twice as many steps as setting up a Swarm cluster 😕" -- Jérôme
|
||||
|
||||
---
|
||||
|
||||
## Other deployment options
|
||||
|
||||
- [AKS](https://azure.microsoft.com/services/kubernetes-service/):
|
||||
managed Kubernetes on Azure
|
||||
|
||||
- [GKE](https://cloud.google.com/kubernetes-engine/):
|
||||
managed Kubernetes on Google Cloud
|
||||
|
||||
- [EKS](https://aws.amazon.com/eks/),
|
||||
[eksctl](https://eksctl.io/):
|
||||
managed Kubernetes on AWS
|
||||
|
||||
- [kops](https://github.com/kubernetes/kops):
|
||||
customizable deployments on AWS, Digital Ocean, GCE (beta), vSphere (alpha)
|
||||
|
||||
- [minikube](https://kubernetes.io/docs/setup/minikube/),
|
||||
[kubespawn](https://github.com/kinvolk/kube-spawn),
|
||||
[Docker Desktop](https://docs.docker.com/docker-for-mac/kubernetes/),
|
||||
[kind](https://kind.sigs.k8s.io):
|
||||
for local development
|
||||
|
||||
- [kubicorn](https://github.com/kubicorn/kubicorn),
|
||||
the [Cluster API](https://blogs.vmware.com/cloudnative/2019/03/14/what-and-why-of-cluster-api/):
|
||||
deploy your clusters declaratively, "the Kubernetes way"
|
||||
|
||||
---
|
||||
|
||||
## Even more deployment options
|
||||
|
||||
- If you like Ansible:
|
||||
[kubespray](https://github.com/kubernetes-incubator/kubespray)
|
||||
|
||||
- If you like Terraform:
|
||||
[typhoon](https://github.com/poseidon/typhoon)
|
||||
|
||||
- If you like Terraform and Puppet:
|
||||
[tarmak](https://github.com/jetstack/tarmak)
|
||||
|
||||
- You can also learn how to install every component manually, with
|
||||
the excellent tutorial [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way)
|
||||
|
||||
*Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster.*
|
||||
|
||||
- There are also many commercial options available!
|
||||
|
||||
- For a longer list, check the Kubernetes documentation:
|
||||
<br/>
|
||||
it has a great guide to [pick the right solution](https://kubernetes.io/docs/setup/#production-environment) to set up Kubernetes.
|
||||
@@ -1,4 +1,4 @@
|
||||
# Installing a managed cluster
|
||||
# Deploying a managed cluster
|
||||
|
||||
*"The easiest way to install Kubernetes is to get someone
|
||||
else to do it for you."
|
||||
@@ -11,6 +11,8 @@ else to do it for you."
|
||||
|
||||
(the goal is to show the actual steps to get started)
|
||||
|
||||
- The list is sorted alphabetically
|
||||
|
||||
- All the options mentioned here require an account
|
||||
with a cloud provider
|
||||
|
||||
@@ -18,123 +20,6 @@ with a cloud provider
|
||||
|
||||
---
|
||||
|
||||
## EKS (the old way)
|
||||
|
||||
- [Read the doc](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html)
|
||||
|
||||
- Create service roles, VPCs, and a bunch of other oddities
|
||||
|
||||
- Try to figure out why it doesn't work
|
||||
|
||||
- Start over, following an [official AWS blog post](https://aws.amazon.com/blogs/aws/amazon-eks-now-generally-available/)
|
||||
|
||||
- Try to find the missing Cloud Formation template
|
||||
|
||||
--
|
||||
|
||||
.footnote[(╯°□°)╯︵ ┻━┻]
|
||||
|
||||
---
|
||||
|
||||
## EKS (the new way)
|
||||
|
||||
- Install `eksctl`
|
||||
|
||||
- Set the usual environment variables
|
||||
|
||||
([AWS_DEFAULT_REGION](https://docs.aws.amazon.com/general/latest/gr/rande.html#eks_region), AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
|
||||
|
||||
- Create the cluster:
|
||||
```bash
|
||||
eksctl create cluster
|
||||
```
|
||||
|
||||
- Wait 15-20 minutes (yes, it's sloooooooooooooooooow)
|
||||
|
||||
- Add cluster add-ons
|
||||
|
||||
(by default, it doesn't come with metrics-server, logging, etc.)
|
||||
|
||||
---
|
||||
|
||||
## EKS (cleanup)
|
||||
|
||||
- Delete the cluster:
|
||||
```bash
|
||||
eksctl delete cluster <clustername>
|
||||
```
|
||||
|
||||
- If you need to find the name of the cluster:
|
||||
```bash
|
||||
eksctl get clusters
|
||||
```
|
||||
|
||||
.footnote[Note: the AWS documentation has been updated and now includes [eksctl instructions](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html).]
|
||||
|
||||
---
|
||||
|
||||
## GKE (initial setup)
|
||||
|
||||
- Install `gcloud`
|
||||
|
||||
- Login:
|
||||
```bash
|
||||
gcloud auth init
|
||||
```
|
||||
|
||||
- Create a "project":
|
||||
```bash
|
||||
gcloud projects create my-gke-project
|
||||
gcloud config set project my-gke-project
|
||||
```
|
||||
|
||||
- Pick a [region](https://cloud.google.com/compute/docs/regions-zones/)
|
||||
|
||||
(example: `europe-west1`, `us-west1`, ...)
|
||||
|
||||
---
|
||||
|
||||
## GKE (create cluster)
|
||||
|
||||
- Create the cluster:
|
||||
```bash
|
||||
gcloud container clusters create my-gke-cluster --region us-west1 --num-nodes=2
|
||||
```
|
||||
|
||||
(without `--num-nodes` you might exhaust your IP address quota!)
|
||||
|
||||
- The first time you try to create a cluster in a given project, you get an error
|
||||
|
||||
- you need to enable the Kubernetes Engine API
|
||||
- the error message gives you a link
|
||||
- follow the link and enable the API (and billing)
|
||||
<br/>(it's just a couple of clicks and it's instantaneous)
|
||||
|
||||
- Wait a couple of minutes (yes, it's faaaaaaaaast)
|
||||
|
||||
- The cluster comes with many add-ons
|
||||
|
||||
---
|
||||
|
||||
## GKE (cleanup)
|
||||
|
||||
- List clusters (if you forgot its name):
|
||||
```bash
|
||||
gcloud container clusters list
|
||||
```
|
||||
|
||||
- Delete the cluster:
|
||||
```bash
|
||||
gcloud container clusters delete my-gke-cluster --region us-west1
|
||||
```
|
||||
|
||||
- Delete the project (optional):
|
||||
```bash
|
||||
gcloud projects delete my-gke-project
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## AKS (initial setup)
|
||||
|
||||
- Install the Azure CLI
|
||||
@@ -168,8 +53,6 @@ with a cloud provider
|
||||
az aks get-credentials --resource-group my-aks-group --name my-aks-cluster
|
||||
```
|
||||
|
||||
- The cluster has useful components pre-installed, such as the metrics server
|
||||
|
||||
---
|
||||
|
||||
## AKS (cleanup)
|
||||
@@ -190,6 +73,95 @@ with a cloud provider
|
||||
|
||||
---
|
||||
|
||||
## AKS (notes)
|
||||
|
||||
- The cluster has useful components pre-installed, such as the metrics server
|
||||
|
||||
- There is also a product called [AKS Engine](https://github.com/Azure/aks-engine):
|
||||
|
||||
- leverages ARM (Azure Resource Manager) templates to deploy Kubernetes
|
||||
|
||||
- it's "the library used by AKS"
|
||||
|
||||
- fully customizable
|
||||
|
||||
- think of it as "half-managed" Kubernetes option
|
||||
|
||||
---
|
||||
|
||||
## Amazon EKS (the old way)
|
||||
|
||||
- [Read the doc](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html)
|
||||
|
||||
- Create service roles, VPCs, and a bunch of other oddities
|
||||
|
||||
- Try to figure out why it doesn't work
|
||||
|
||||
- Start over, following an [official AWS blog post](https://aws.amazon.com/blogs/aws/amazon-eks-now-generally-available/)
|
||||
|
||||
- Try to find the missing Cloud Formation template
|
||||
|
||||
--
|
||||
|
||||
.footnote[(╯°□°)╯︵ ┻━┻]
|
||||
|
||||
---
|
||||
|
||||
## Amazon EKS (the new way)
|
||||
|
||||
- Install `eksctl`
|
||||
|
||||
- Set the usual environment variables
|
||||
|
||||
([AWS_DEFAULT_REGION](https://docs.aws.amazon.com/general/latest/gr/rande.html#eks_region), AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
|
||||
|
||||
- Create the cluster:
|
||||
```bash
|
||||
eksctl create cluster
|
||||
```
|
||||
|
||||
- Cluster can take a long time to be ready (15-20 minutes is typical)
|
||||
|
||||
- Add cluster add-ons
|
||||
|
||||
(by default, it doesn't come with metrics-server, logging, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Amazon EKS (cleanup)
|
||||
|
||||
- Delete the cluster:
|
||||
```bash
|
||||
eksctl delete cluster <clustername>
|
||||
```
|
||||
|
||||
- If you need to find the name of the cluster:
|
||||
```bash
|
||||
eksctl get clusters
|
||||
```
|
||||
|
||||
.footnote[Note: the AWS documentation has been updated and now includes [eksctl instructions](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html).]
|
||||
|
||||
---
|
||||
|
||||
## Amazon EKS (notes)
|
||||
|
||||
- Convenient if you *have to* use AWS
|
||||
|
||||
- Needs extra steps to be truly production-ready
|
||||
|
||||
- [Versions tend to be outdated](https://twitter.com/jpetazzo/status/1252948707680686081)
|
||||
|
||||
- The only officially supported pod network is the [Amazon VPC CNI plugin](https://docs.aws.amazon.com/eks/latest/userguide/pod-networking.html)
|
||||
|
||||
- integrates tightly with security groups and VPC networking
|
||||
|
||||
- not suitable for high density clusters (with many small pods on big nodes)
|
||||
|
||||
- other plugins [should still work](https://docs.aws.amazon.com/eks/latest/userguide/alternate-cni-plugins.html) but will require extra work
|
||||
|
||||
---
|
||||
|
||||
## Digital Ocean (initial setup)
|
||||
|
||||
- Install `doctl`
|
||||
@@ -242,14 +214,185 @@ with a cloud provider
|
||||
|
||||
---
|
||||
|
||||
## GKE (initial setup)
|
||||
|
||||
- Install `gcloud`
|
||||
|
||||
- Login:
|
||||
```bash
|
||||
gcloud auth init
|
||||
```
|
||||
|
||||
- Create a "project":
|
||||
```bash
|
||||
gcloud projects create my-gke-project
|
||||
gcloud config set project my-gke-project
|
||||
```
|
||||
|
||||
- Pick a [region](https://cloud.google.com/compute/docs/regions-zones/)
|
||||
|
||||
(example: `europe-west1`, `us-west1`, ...)
|
||||
|
||||
---
|
||||
|
||||
## GKE (create cluster)
|
||||
|
||||
- Create the cluster:
|
||||
```bash
|
||||
gcloud container clusters create my-gke-cluster --region us-west1 --num-nodes=2
|
||||
```
|
||||
|
||||
(without `--num-nodes` you might exhaust your IP address quota!)
|
||||
|
||||
- The first time you try to create a cluster in a given project, you get an error
|
||||
|
||||
- you need to enable the Kubernetes Engine API
|
||||
- the error message gives you a link
|
||||
- follow the link and enable the API (and billing)
|
||||
<br/>(it's just a couple of clicks and it's instantaneous)
|
||||
|
||||
- Clutser should be ready in a couple of minutes
|
||||
|
||||
---
|
||||
|
||||
## GKE (cleanup)
|
||||
|
||||
- List clusters (if you forgot its name):
|
||||
```bash
|
||||
gcloud container clusters list
|
||||
```
|
||||
|
||||
- Delete the cluster:
|
||||
```bash
|
||||
gcloud container clusters delete my-gke-cluster --region us-west1
|
||||
```
|
||||
|
||||
- Delete the project (optional):
|
||||
```bash
|
||||
gcloud projects delete my-gke-project
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## GKE (notes)
|
||||
|
||||
- Well-rounded product overall
|
||||
|
||||
(it used to be one of the best managed Kubernetes offerings available;
|
||||
now that many other providers entered the game, that title is debatable)
|
||||
|
||||
- The cluster comes with many add-ons
|
||||
|
||||
- Versions lag a bit:
|
||||
|
||||
- latest minor version (e.g. 1.18) tends to be unsupported
|
||||
|
||||
- previous minor version (e.g. 1.17) supported through alpha channel
|
||||
|
||||
- previous versions (e.g. 1.14-1.16) supported
|
||||
|
||||
---
|
||||
|
||||
## Scaleway (initial setup)
|
||||
|
||||
- After creating your account, make sure you set a password or get an API key
|
||||
|
||||
(by default, it uses email "magic links" to sign in)
|
||||
|
||||
- Install `scw`
|
||||
|
||||
(you need [CLI v2](https://github.com/scaleway/scaleway-cli/tree/v2#Installation), which in beta as of May 2020)
|
||||
|
||||
- Generate the CLI configuration with `scw init`
|
||||
|
||||
(it will prompt for your API key, or email + password)
|
||||
|
||||
---
|
||||
|
||||
## Scaleway (create cluster)
|
||||
|
||||
- Create the cluster:
|
||||
```bash
|
||||
k8s cluster create name=my-kapsule-cluster version=1.18.3 cni=cilium \
|
||||
default-pool-config.node-type=DEV1-M default-pool-config.size=3
|
||||
```
|
||||
|
||||
- After less than 5 minutes, cluster state will be `ready`
|
||||
|
||||
(check cluster status with e.g. `scw k8s cluster list` on a wide terminal
|
||||
)
|
||||
|
||||
- Add connection information to your `.kube/config` file:
|
||||
```bash
|
||||
scw k8s kubeconfig install `CLUSTERID`
|
||||
```
|
||||
|
||||
(the cluster ID is shown by `scw k8s cluster list`)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Scaleway (automation)
|
||||
|
||||
- If you want to obtain the cluster ID programmatically, this will do it:
|
||||
|
||||
```bash
|
||||
scw k8s cluster list
|
||||
# or
|
||||
CLUSTERID=$(scw k8s cluster list -o json | \
|
||||
jq -r '.[] | select(.name="my-kapsule-cluster") | .id')
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Scaleway (cleanup)
|
||||
|
||||
- Get cluster ID (e.g. with `scw k8s cluster list`)
|
||||
|
||||
- Delete the cluster:
|
||||
```bash
|
||||
scw cluster delete cluster-id=$CLUSTERID
|
||||
```
|
||||
|
||||
- Warning: as of May 2020, load balancers have to be deleted separately!
|
||||
|
||||
---
|
||||
|
||||
## Scaleway (notes)
|
||||
|
||||
- The `create` command is a bit more complex than with other providers
|
||||
|
||||
(you must specify the Kubernetes version, CNI plugin, and node type)
|
||||
|
||||
- To see available versions and CNI plugins, run `scw k8s version list`
|
||||
|
||||
- As of May 2020, Kapsule supports:
|
||||
|
||||
- multiple CNI plugins, including: cilium, calico, weave, flannel
|
||||
|
||||
- Kubernetes versions 1.15 to 1.18
|
||||
|
||||
- multiple container runtimes, including: Docker, containerd, CRI-O
|
||||
|
||||
- To see available node types and their price, check their [pricing page](
|
||||
https://www.scaleway.com/en/pricing/)
|
||||
|
||||
---
|
||||
|
||||
## More options
|
||||
|
||||
- Alibaba Cloud
|
||||
|
||||
- [IBM Cloud](https://console.bluemix.net/docs/containers/cs_cli_install.html#cs_cli_install)
|
||||
|
||||
- OVH
|
||||
- [Linode Kubernetes Engine (LKE)](https://www.linode.com/products/kubernetes/)
|
||||
|
||||
- Scaleway (private beta)
|
||||
- OVHcloud [Managed Kubernetes Service](https://www.ovhcloud.com/en/public-cloud/kubernetes/)
|
||||
|
||||
- ...
|
||||
|
||||
???
|
||||
|
||||
:EN:- Installing a managed cluster
|
||||
:FR:- Installer un cluster infogéré
|
||||
|
||||
192
slides/k8s/setup-overview.md
Normal file
192
slides/k8s/setup-overview.md
Normal file
@@ -0,0 +1,192 @@
|
||||
# Setting up Kubernetes
|
||||
|
||||
- Kubernetes is made of many components that require careful configuration
|
||||
|
||||
- Secure operation typically requires TLS certificates and a local CA
|
||||
|
||||
(certificate authority)
|
||||
|
||||
- Setting up everything manually is possible, but rarely done
|
||||
|
||||
(except for learning purposes)
|
||||
|
||||
- Let's do a quick overview of available options!
|
||||
|
||||
---
|
||||
|
||||
## Local development
|
||||
|
||||
- Are you writing code that will eventually run on Kubernetes?
|
||||
|
||||
- Then it's a good idea to have a development cluster!
|
||||
|
||||
- Development clusters only need one node
|
||||
|
||||
- This simplifies their setup a lot:
|
||||
|
||||
- pod networking doesn't even need CNI plugins, overlay networks, etc.
|
||||
|
||||
- they can be fully contained (no pun intended) in an easy-to-ship VM image
|
||||
|
||||
- some of the security aspects may be simplified (different threat model)
|
||||
|
||||
- Examples: Docker Desktop, k3d, KinD, MicroK8s, Minikube
|
||||
|
||||
(some of these also support clusters with multiple nodes)
|
||||
|
||||
---
|
||||
|
||||
## Managed clusters
|
||||
|
||||
- Many cloud providers and hosting providers offer "managed Kubernetes"
|
||||
|
||||
- The deployment and maintenance of the cluster is entirely managed by the provider
|
||||
|
||||
(ideally, clusters can be spun up automatically through an API, CLI, or web interface)
|
||||
|
||||
- Given the complexity of Kubernetes, this approach is *strongly recommended*
|
||||
|
||||
(at least for your first production clusters)
|
||||
|
||||
- After working for a while with Kubernetes, you will be better equipped to decide:
|
||||
|
||||
- whether to operate it yourself or use a managed offering
|
||||
|
||||
- which offering or which distribution works best for you and your needs
|
||||
|
||||
---
|
||||
|
||||
## Managed clusters details
|
||||
|
||||
- Pricing models differ from one provider to another
|
||||
|
||||
- nodes are generally charged at their usual price
|
||||
|
||||
- control plane may be free or incur a small nominal fee
|
||||
|
||||
- Beyond pricing, there are *huge* differences in features between providers
|
||||
|
||||
- The "major" providers are not always the best ones!
|
||||
|
||||
---
|
||||
|
||||
## Managed clusters differences
|
||||
|
||||
- Most providers let you pick which Kubernetes version you want
|
||||
|
||||
- some providers offer up-to-date versions
|
||||
|
||||
- others lag significantly (sometimes by 2 or 3 minor versions)
|
||||
|
||||
- Some providers offer multiple networking or storage options
|
||||
|
||||
- Others will only support one, tied to their infrastructure
|
||||
|
||||
(changing that is in theory possible, but might be complex or unsupported)
|
||||
|
||||
- Some providers let you configure or customize the control plane
|
||||
|
||||
(generally through Kubernetes "feature gates")
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes distributions and installers
|
||||
|
||||
- If you want to run Kubernetes yourselves, there are many options
|
||||
|
||||
(free, commercial, proprietary, open source ...)
|
||||
|
||||
- Some of them are installers, while some are complete platforms
|
||||
|
||||
- Some of them leverage other well-known deployment tools
|
||||
|
||||
(like Puppet, Terraform ...)
|
||||
|
||||
- A good starting point to explore these options is this [guide](https://v1-16.docs.kubernetes.io/docs/setup/#production-environment)
|
||||
|
||||
(it defines categories like "managed", "turnkey" ...)
|
||||
|
||||
---
|
||||
|
||||
## kubeadm
|
||||
|
||||
- kubeadm is a tool part of Kubernetes to facilitate cluster setup
|
||||
|
||||
- Many other installers and distributions use it (but not all of them)
|
||||
|
||||
- It can also be used by itself
|
||||
|
||||
- Excellent starting point to install Kubernetes on your own machines
|
||||
|
||||
(virtual, physical, it doesn't matter)
|
||||
|
||||
- It even supports highly available control planes, or "multi-master"
|
||||
|
||||
(this is more complex, though, because it introduces the need for an API load balancer)
|
||||
|
||||
---
|
||||
|
||||
## Manual setup
|
||||
|
||||
- The resources below are mainly for educational purposes!
|
||||
|
||||
- [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) by Kelsey Hightower
|
||||
|
||||
- step by step guide to install Kubernetes on Google Cloud
|
||||
|
||||
- covers certificates, high availability ...
|
||||
|
||||
- *“Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster.”*
|
||||
|
||||
- [Deep Dive into Kubernetes Internals for Builders and Operators](https://www.youtube.com/watch?v=3KtEAa7_duA)
|
||||
|
||||
- conference presentation showing step-by-step control plane setup
|
||||
|
||||
- emphasis on simplicity, not on security and availability
|
||||
|
||||
---
|
||||
|
||||
## About our training clusters
|
||||
|
||||
- How did we set up these Kubernetes clusters that we're using?
|
||||
|
||||
--
|
||||
|
||||
- We used `kubeadm` on freshly installed VM instances running Ubuntu LTS
|
||||
|
||||
1. Install Docker
|
||||
|
||||
2. Install Kubernetes packages
|
||||
|
||||
3. Run `kubeadm init` on the first node (it deploys the control plane on that node)
|
||||
|
||||
4. Set up Weave (the overlay network) with a single `kubectl apply` command
|
||||
|
||||
5. Run `kubeadm join` on the other nodes (with the token produced by `kubeadm init`)
|
||||
|
||||
6. Copy the configuration file generated by `kubeadm init`
|
||||
|
||||
- Check the [prepare VMs README](https://@@GITREPO@@/blob/master/prepare-vms/README.md) for more details
|
||||
|
||||
---
|
||||
|
||||
## `kubeadm` "drawbacks"
|
||||
|
||||
- Doesn't set up Docker or any other container engine
|
||||
|
||||
(this is by design, to give us choice)
|
||||
|
||||
- Doesn't set up the overlay network
|
||||
|
||||
(this is also by design, for the same reasons)
|
||||
|
||||
- HA control plane requires [some extra steps](https://kubernetes.io/docs/setup/independent/high-availability/)
|
||||
|
||||
- Note that HA control plane also requires setting up a specific API load balancer
|
||||
|
||||
(which is beyond the scope of kubeadm)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Various ways to install Kubernetes
|
||||
:FR:- Survol des techniques d'installation de Kubernetes
|
||||
@@ -1,5 +1,15 @@
|
||||
# Kubernetes distributions and installers
|
||||
|
||||
- Sometimes, we need to run Kubernetes ourselves
|
||||
|
||||
(as opposed to "use a managed offering")
|
||||
|
||||
- Beware: it takes *a lot of work* to set up and maintain Kubernetes
|
||||
|
||||
- It might be necessary if you have specific security or compliance requirements
|
||||
|
||||
(e.g. national security for states that don't have a suitable domestic cloud)
|
||||
|
||||
- There are [countless](https://kubernetes.io/docs/setup/pick-right-solution/) distributions available
|
||||
|
||||
- We can't review them all
|
||||
@@ -8,7 +18,7 @@
|
||||
|
||||
---
|
||||
|
||||
## kops
|
||||
## [kops](https://github.com/kubernetes/kops)
|
||||
|
||||
- Deploys Kubernetes using cloud infrastructure
|
||||
|
||||
@@ -32,7 +42,7 @@
|
||||
|
||||
---
|
||||
|
||||
## Kubespray
|
||||
## [kubespray](https://github.com/kubernetes-incubator/kubespray)
|
||||
|
||||
- Based on Ansible
|
||||
|
||||
@@ -78,15 +88,21 @@
|
||||
|
||||
## And many more ...
|
||||
|
||||
- [AKS Engine](https://github.com/Azure/aks-engine)
|
||||
|
||||
- Docker Enterprise Edition
|
||||
|
||||
- [AKS Engine](https://github.com/Azure/aks-engine)
|
||||
- [Lokomotive](https://github.com/kinvolk/lokomotive), leveraging Terraform and [Flatcar Linux](https://www.flatcar-linux.org/)
|
||||
|
||||
- Pivotal Container Service (PKS)
|
||||
|
||||
- Tectonic by CoreOS
|
||||
- [Tarmak](https://github.com/jetstack/tarmak), leveraging Puppet and Terraform
|
||||
|
||||
- etc.
|
||||
- Tectonic by CoreOS (now being integrated into Red Hat OpenShift)
|
||||
|
||||
- [Typhoon](https://typhoon.psdn.io/), leveraging Terraform
|
||||
|
||||
- VMware Tanzu Kubernetes Grid (TKG)
|
||||
|
||||
---
|
||||
|
||||
@@ -108,3 +124,8 @@
|
||||
<br/>(do they need training?)
|
||||
|
||||
- etc.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Kubernetes distributions and installers
|
||||
:FR:- L'offre Kubernetes "on premises"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user