mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-03-02 17:30:20 +00:00
Compare commits
12 Commits
2021-06-sc
...
2021-03-lk
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1292168d4e | ||
|
|
220103f2fd | ||
|
|
372eb2e717 | ||
|
|
6185ad6ff3 | ||
|
|
ee9c114da0 | ||
|
|
edf496df13 | ||
|
|
018f06a409 | ||
|
|
c283d7e7d6 | ||
|
|
cd9f1cc645 | ||
|
|
9c3ab19918 | ||
|
|
a8ecffbaf0 | ||
|
|
fc170fe4a7 |
@@ -1,6 +1,3 @@
|
||||
# Note: hyperkube isn't available after Kubernetes 1.18.
|
||||
# So we'll have to update this for Kubernetes 1.19!
|
||||
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: fluentd
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fluentd
|
||||
@@ -21,7 +21,7 @@ rules:
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fluentd
|
||||
roleRef:
|
||||
|
||||
@@ -11,7 +11,7 @@ metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
@@ -41,7 +41,7 @@ rules:
|
||||
resources: ["elasticsearchclusters"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
@@ -55,16 +55,13 @@ subjects:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: elasticsearch-operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
||||
@@ -131,7 +131,7 @@ spec:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
@@ -144,7 +144,7 @@ roleRef:
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
@@ -11,4 +11,4 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
namespace: kube-system
|
||||
@@ -1,34 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hackthecluster
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hackthecluster
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hackthecluster
|
||||
spec:
|
||||
volumes:
|
||||
- name: slash
|
||||
hostPath:
|
||||
path: /
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: alpine
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: slash
|
||||
mountPath: /hostfs
|
||||
command:
|
||||
- sleep
|
||||
- infinity
|
||||
securityContext:
|
||||
#privileged: true
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_CHROOT
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: whatever
|
||||
port:
|
||||
number: 1234
|
||||
@@ -1,17 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
@@ -1 +0,0 @@
|
||||
ingress-v1beta1.yaml
|
||||
17
k8s/ingress.yaml
Normal file
17
k8s/ingress.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
@@ -1,50 +1,49 @@
|
||||
# This is a local copy of:
|
||||
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: local-path-provisioner-role
|
||||
namespace: local-path-storage
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "endpoints", "persistentvolumes", "pods" ]
|
||||
verbs: [ "*" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "create", "patch" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes", "persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints", "persistentvolumes", "pods"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-path-provisioner-bind
|
||||
namespace: local-path-storage
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: local-path-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -63,28 +62,27 @@ spec:
|
||||
spec:
|
||||
serviceAccountName: local-path-provisioner-service-account
|
||||
containers:
|
||||
- name: local-path-provisioner
|
||||
image: rancher/local-path-provisioner:v0.0.19
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: local-path-provisioner
|
||||
image: rancher/local-path-provisioner:v0.0.8
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: local-path-config
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
@@ -93,7 +91,6 @@ metadata:
|
||||
provisioner: rancher.io/local-path
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
@@ -102,59 +99,12 @@ metadata:
|
||||
namespace: local-path-storage
|
||||
data:
|
||||
config.json: |-
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/opt/local-path-provisioner"]
|
||||
}
|
||||
]
|
||||
}
|
||||
setup: |-
|
||||
#!/bin/sh
|
||||
while getopts "m:s:p:" opt
|
||||
do
|
||||
case $opt in
|
||||
p)
|
||||
absolutePath=$OPTARG
|
||||
;;
|
||||
s)
|
||||
sizeInBytes=$OPTARG
|
||||
;;
|
||||
m)
|
||||
volMode=$OPTARG
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
mkdir -m 0777 -p ${absolutePath}
|
||||
teardown: |-
|
||||
#!/bin/sh
|
||||
while getopts "m:s:p:" opt
|
||||
do
|
||||
case $opt in
|
||||
p)
|
||||
absolutePath=$OPTARG
|
||||
;;
|
||||
s)
|
||||
sizeInBytes=$OPTARG
|
||||
;;
|
||||
m)
|
||||
volMode=$OPTARG
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
rm -rf ${absolutePath}
|
||||
helperPod.yaml: |-
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: helper-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: helper-pod
|
||||
image: busybox
|
||||
|
||||
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/opt/local-path-provisioner"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -1,61 +1,32 @@
|
||||
# This file is https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||
# But with the following arguments added to metrics-server:
|
||||
# args:
|
||||
# - --kubelet-insecure-tls
|
||||
# - --metric-resolution=5s
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:aggregated-metrics-reader
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
name: system:aggregated-metrics-reader
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
- namespaces
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server-auth-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
@@ -67,127 +38,101 @@ subjects:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:metrics-server
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
group: metrics.k8s.io
|
||||
version: v1beta1
|
||||
insecureSkipTLSVerify: true
|
||||
groupPriorityMinimum: 100
|
||||
versionPriority: 100
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --cert-dir=/tmp
|
||||
- --secure-port=4443
|
||||
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
|
||||
- --kubelet-use-node-status-port
|
||||
- --kubelet-insecure-tls
|
||||
- --metric-resolution=5s
|
||||
image: k8s.gcr.io/metrics-server/metrics-server:v0.4.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /livez
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
periodSeconds: 10
|
||||
name: metrics-server
|
||||
ports:
|
||||
- containerPort: 4443
|
||||
name: https
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
periodSeconds: 10
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-dir
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp-dir
|
||||
# mount in tmp so we can safely use from-scratch images and/or read-only containers
|
||||
- name: tmp-dir
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: tmp-dir
|
||||
mountPath: /tmp
|
||||
args:
|
||||
- --kubelet-preferred-address-types=InternalIP
|
||||
- --kubelet-insecure-tls
|
||||
- --metric-resolution=5s
|
||||
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: v1beta1.metrics.k8s.io
|
||||
kubernetes.io/name: "Metrics-server"
|
||||
spec:
|
||||
group: metrics.k8s.io
|
||||
groupPriorityMinimum: 100
|
||||
insecureSkipTLSVerify: true
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
version: v1beta1
|
||||
versionPriority: 100
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:metrics-server
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
|
||||
@@ -49,8 +49,24 @@ spec:
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
@@ -74,7 +90,7 @@ rules:
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
|
||||
@@ -55,8 +55,28 @@ spec:
|
||||
- --entrypoints.https.Address=:443
|
||||
- --entrypoints.https.http.tls.certResolver=default
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
@@ -89,7 +109,7 @@ rules:
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
|
||||
@@ -4,11 +4,7 @@ These tools can help you to create VMs on:
|
||||
|
||||
- Azure
|
||||
- EC2
|
||||
- Hetzner
|
||||
- Linode
|
||||
- OpenStack
|
||||
- OVHcloud
|
||||
- Scaleway
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -17,8 +13,7 @@ These tools can help you to create VMs on:
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
|
||||
|
||||
Depending on the infrastructure that you want to use, you also need to install
|
||||
the CLI that is specific to that cloud. For OpenStack deployments, you will
|
||||
need Terraform.
|
||||
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
|
||||
|
||||
And if you want to generate printable cards:
|
||||
|
||||
@@ -95,9 +90,6 @@ You're all set!
|
||||
|
||||
## `./workshopctl` Usage
|
||||
|
||||
If you run `./workshopctl` without arguments, it will show a list of
|
||||
available commands, looking like this:
|
||||
|
||||
```
|
||||
workshopctl - the orchestration workshop swiss army knife
|
||||
Commands:
|
||||
@@ -106,7 +98,32 @@ cards Generate ready-to-print cards for a group of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
disableaddrchecks Disable source/destination IP address checks
|
||||
disabledocker Stop Docker Engine and don't restart it automatically
|
||||
...
|
||||
helmprom Install Helm and Prometheus
|
||||
help Show available commands
|
||||
ids (FIXME) List the instance IDs belonging to a given tag or token
|
||||
kubebins Install Kubernetes and CNI binaries but don't start anything
|
||||
kubereset Wipe out Kubernetes configuration on all nodes
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
kubetest Check that all nodes are reporting as Ready
|
||||
listall List VMs running on all configured infrastructures
|
||||
list List available groups for a given infrastructure
|
||||
netfix Disable GRO and run a pinger job on the VMs
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
ping Ping VMs in a given tag, to check that they have network access
|
||||
pssh Run an arbitrary command on all nodes
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
quotas Check our infrastructure quotas (max instances)
|
||||
remap_nodeports Remap NodePort range to 10000-10999
|
||||
retag (FIXME) Apply a new tag to a group of VMs
|
||||
ssh Open an SSH session to the first node of a tag
|
||||
start Start a group of VMs
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
tags List groups of VMs known locally
|
||||
test Run tests (pre-flight checks) on a group of VMs
|
||||
weavetest Check that weave seems properly setup
|
||||
webssh Install a WEB SSH server on the machines (port 1080)
|
||||
wrap Run this program in a container
|
||||
www Run a web server to access card HTML and PDF
|
||||
```
|
||||
|
||||
### Summary of What `./workshopctl` Does For You
|
||||
@@ -121,8 +138,7 @@ disabledocker Stop Docker Engine and don't restart it automatically
|
||||
|
||||
### Example Steps to Launch a group of AWS Instances for a Workshop
|
||||
|
||||
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --students 50` to create 50 clusters
|
||||
- The number of instances will be `students × clustersize`
|
||||
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --count 60` to create 60 EC2 instances
|
||||
- Your local SSH key will be synced to instances under `ubuntu` user
|
||||
- AWS instances will be created and tagged based on date, and IP's stored in `prepare-vms/tags/`
|
||||
- Run `./workshopctl deploy TAG` to run `lib/postprep.py` via parallel-ssh
|
||||
@@ -232,19 +248,12 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
|
||||
|
||||
#### List tags
|
||||
|
||||
$ ./workshopctl list infra/some-infra-file
|
||||
|
||||
$ ./workshopctl listall
|
||||
|
||||
$ ./workshopctl tags
|
||||
|
||||
$ ./workshopctl inventory infra/some-infra-file
|
||||
|
||||
$ ./workshopctl inventory
|
||||
|
||||
Note: the `tags` command will show only the VMs that you have provisioned
|
||||
and deployed on the current machine (i.e. listed in the `tags` subdirectory).
|
||||
The `inventory` command will try to list all existing VMs (including the
|
||||
ones not listed in the `tags` directory, and including VMs provisioned
|
||||
through other mechanisms). It is not supported across all platforms,
|
||||
however.
|
||||
|
||||
#### Stop and destroy VMs
|
||||
|
||||
$ ./workshopctl stop TAG
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
INFRACLASS=hetzner
|
||||
if ! [ -f ~/.config/hcloud/cli.toml ]; then
|
||||
warning "~/.config/hcloud/cli.toml not found."
|
||||
warning "Make sure that the Hetzner CLI (hcloud) is installed and configured."
|
||||
warn "~/.config/hcloud/cli.toml not found."
|
||||
warn "Make sure that the Hetzner CLI (hcloud) is installed and configured."
|
||||
fi
|
||||
|
||||
@@ -66,7 +66,7 @@ need_infra() {
|
||||
|
||||
need_tag() {
|
||||
if [ -z "$TAG" ]; then
|
||||
die "Please specify a tag. To see available tags, run: $0 tags"
|
||||
die "Please specify a tag or token. To see available tags and tokens, run: $0 list"
|
||||
fi
|
||||
if [ ! -d "tags/$TAG" ]; then
|
||||
die "Tag $TAG not found (directory tags/$TAG does not exist)."
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
export AWS_DEFAULT_OUTPUT=text
|
||||
|
||||
# Ignore SSH key validation when connecting to these remote hosts.
|
||||
# (Otherwise, deployment scripts break when a VM IP address reuse.)
|
||||
SSHOPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR"
|
||||
|
||||
HELP=""
|
||||
_cmd() {
|
||||
HELP="$(printf "%s\n%-20s %s\n" "$HELP" "$1" "$2")"
|
||||
@@ -126,7 +122,7 @@ _cmd_deploy() {
|
||||
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from the first node
|
||||
pssh "
|
||||
sudo -u docker [ -f /home/docker/.ssh/id_rsa ] ||
|
||||
ssh $SSHOPTS \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
|
||||
ssh -o StrictHostKeyChecking=no \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
|
||||
sudo -u docker tar -C /home/docker -xf-"
|
||||
|
||||
# if 'docker@' doesn't appear in /home/docker/.ssh/authorized_keys, copy it there
|
||||
@@ -170,27 +166,24 @@ _cmd_kubebins() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
##VERSION##
|
||||
ETCD_VERSION=v3.4.13
|
||||
K8SBIN_VERSION=v1.19.11 # Can't go to 1.20 because it requires a serviceaccount signing key.
|
||||
CNI_VERSION=v0.8.7
|
||||
pssh --timeout 300 "
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-amd64.tar.gz \
|
||||
##VERSION##
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
##VERSION##
|
||||
curl -L https://dl.k8s.io/$K8SBIN_VERSION/kubernetes-server-linux-amd64.tar.gz \
|
||||
curl -L https://dl.k8s.io/v1.18.10/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx \
|
||||
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
|
||||
fi
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-amd64-$CNI_VERSION.tgz \
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
@@ -259,7 +252,7 @@ _cmd_kube() {
|
||||
pssh --timeout 200 "
|
||||
if ! i_am_first_node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
FIRSTNODE=\$(cat /etc/name_of_first_node) &&
|
||||
TOKEN=\$(ssh $SSHOPTS \$FIRSTNODE cat /tmp/token) &&
|
||||
TOKEN=\$(ssh -o StrictHostKeyChecking=no \$FIRSTNODE cat /tmp/token) &&
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN \$FIRSTNODE:6443
|
||||
fi"
|
||||
|
||||
@@ -330,7 +323,7 @@ EOF"
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
@@ -345,17 +338,13 @@ EOF"
|
||||
echo export PATH=/home/docker/.krew/bin:\\\$PATH | sudo -u docker tee -a /home/docker/.bashrc
|
||||
fi"
|
||||
|
||||
# Install k9s
|
||||
# Install k9s and popeye
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/k9s ]; then
|
||||
VERSION=v0.24.10 &&
|
||||
FILENAME=k9s_\${VERSION}_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/k9s/releases/download/\$VERSION/\$FILENAME |
|
||||
FILENAME=k9s_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin k9s
|
||||
fi"
|
||||
|
||||
# Install popeye
|
||||
pssh "
|
||||
fi
|
||||
if [ ! -x /usr/local/bin/popeye ]; then
|
||||
FILENAME=popeye_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
@@ -430,12 +419,12 @@ _cmd_ips() {
|
||||
done < tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
_cmd inventory "List all VMs on a given infrastructure (or all infras if no arg given)"
|
||||
_cmd_inventory() {
|
||||
_cmd list "List all VMs on a given infrastructure (or all infras if no arg given)"
|
||||
_cmd_list() {
|
||||
case "$1" in
|
||||
"")
|
||||
for INFRA in infra/*; do
|
||||
$0 inventory $INFRA
|
||||
$0 list $INFRA
|
||||
done
|
||||
;;
|
||||
*/example.*)
|
||||
@@ -448,6 +437,21 @@ _cmd_inventory() {
|
||||
esac
|
||||
}
|
||||
|
||||
_cmd listall "List VMs running on all configured infrastructures"
|
||||
_cmd_listall() {
|
||||
for infra in infra/*; do
|
||||
case $infra in
|
||||
infra/example.*)
|
||||
;;
|
||||
*)
|
||||
info "Listing infrastructure $infra:"
|
||||
need_infra $infra
|
||||
infra_list
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
_cmd maketag "Generate a quasi-unique tag for a group of instances"
|
||||
_cmd_maketag() {
|
||||
if [ -z $USER ]; then
|
||||
@@ -585,8 +589,7 @@ _cmd_ssh() {
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh $SSHOPTS docker@$IP
|
||||
|
||||
ssh docker@$IP
|
||||
}
|
||||
|
||||
_cmd start "Start a group of VMs"
|
||||
@@ -725,7 +728,7 @@ _cmd_tmux() {
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Opening ssh+tmux with $IP"
|
||||
rm -f /tmp/tmux-$UID/default
|
||||
ssh $SSHOPTS -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
|
||||
ssh -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
|
||||
}
|
||||
|
||||
_cmd helmprom "Install Helm and Prometheus"
|
||||
@@ -744,31 +747,6 @@ _cmd_helmprom() {
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd passwords "Set individual passwords for each cluster"
|
||||
_cmd_passwords() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
PASSWORDS_FILE="tags/$TAG/passwords"
|
||||
if ! [ -f "$PASSWORDS_FILE" ]; then
|
||||
error "File $PASSWORDS_FILE not found. Please create it first."
|
||||
error "It should contain one password per line."
|
||||
error "It should have as many lines as there are clusters."
|
||||
die "Aborting."
|
||||
fi
|
||||
N_CLUSTERS=$($0 ips "$TAG" | wc -l)
|
||||
N_PASSWORDS=$(wc -l < "$PASSWORDS_FILE")
|
||||
if [ "$N_CLUSTERS" != "$N_PASSWORDS" ]; then
|
||||
die "Found $N_CLUSTERS clusters and $N_PASSWORDS passwords. Aborting."
|
||||
fi
|
||||
$0 ips "$TAG" | paste "$PASSWORDS_FILE" - | while read password nodes; do
|
||||
info "Setting password for $nodes..."
|
||||
for node in $nodes; do
|
||||
echo docker:$password | ssh $SSHOPTS ubuntu@$node sudo chpasswd
|
||||
done
|
||||
done
|
||||
info "Done."
|
||||
}
|
||||
|
||||
# Sometimes, weave fails to come up on some nodes.
|
||||
# Symptom: the pods on a node are unreachable (they don't even ping).
|
||||
# Remedy: wipe out Weave state and delete weave pod on that node.
|
||||
@@ -897,7 +875,10 @@ test_vm() {
|
||||
"ls -la /home/docker/.ssh"; do
|
||||
sep "$cmd"
|
||||
echo "$cmd" \
|
||||
| ssh -A $SSHOPTS $user@$ip sudo -u docker -i \
|
||||
| ssh -A -q \
|
||||
-o "UserKnownHostsFile /dev/null" \
|
||||
-o "StrictHostKeyChecking=no" \
|
||||
$user@$ip sudo -u docker -i \
|
||||
|| {
|
||||
status=$?
|
||||
error "$cmd exit status: $status"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
if ! command -v aws >/dev/null; then
|
||||
warning "AWS CLI (aws) not found."
|
||||
warn "AWS CLI (aws) not found."
|
||||
fi
|
||||
|
||||
infra_list() {
|
||||
@@ -217,7 +217,7 @@ aws_tag_instances() {
|
||||
|
||||
aws_get_ami() {
|
||||
##VERSION##
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a ${AWS_ARCHITECTURE-amd64} -v 18.04 -t hvm:ebs -N -q
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
|
||||
}
|
||||
|
||||
aws_greet() {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
if ! command -v hcloud >/dev/null; then
|
||||
warning "Hetzner CLI (hcloud) not found."
|
||||
warn "Hetzner CLI (hcloud) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/hcloud/cli.toml ]; then
|
||||
warning "~/.config/hcloud/cli.toml not found."
|
||||
warn "~/.config/hcloud/cli.toml not found."
|
||||
fi
|
||||
|
||||
infra_list() {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
if ! command -v linode-cli >/dev/null; then
|
||||
warning "Linode CLI (linode-cli) not found."
|
||||
warn "Linode CLI (linode-cli) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/linode-cli ]; then
|
||||
warning "~/.config/linode-cli not found."
|
||||
warn "~/.config/linode-cli not found."
|
||||
fi
|
||||
|
||||
# To view available regions: "linode-cli regions list"
|
||||
|
||||
@@ -1,28 +1,20 @@
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
COUNT=$1
|
||||
|
||||
cp terraform/*.tf tags/$TAG
|
||||
(
|
||||
cd tags/$TAG
|
||||
if ! terraform init; then
|
||||
error "'terraform init' failed."
|
||||
error "If it mentions the following error message:"
|
||||
error "openpgp: signature made by unknown entity."
|
||||
error "Then you need to upgrade Terraform to 0.11.15"
|
||||
error "to upgrade its signing keys following the"
|
||||
error "codecov breach."
|
||||
die "Aborting."
|
||||
fi
|
||||
echo prefix = \"$TAG\" >> terraform.tfvars
|
||||
echo count = \"$COUNT\" >> terraform.tfvars
|
||||
terraform apply -auto-approve
|
||||
terraform output ip_addresses > ips.txt
|
||||
)
|
||||
cp terraform/*.tf tags/$TAG
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform init
|
||||
echo prefix = \"$TAG\" >> terraform.tfvars
|
||||
echo count = \"$COUNT\" >> terraform.tfvars
|
||||
terraform apply -auto-approve
|
||||
terraform output ip_addresses > ips.txt
|
||||
)
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform destroy -auto-approve
|
||||
)
|
||||
}
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform destroy -auto-approve
|
||||
)
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
if ! command -v scw >/dev/null; then
|
||||
warning "Scaleway CLI (scw) not found."
|
||||
warn "Scaleway CLI (scw) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/scw/config.yaml ]; then
|
||||
warning "~/.config/scw/config.yaml not found."
|
||||
warn "~/.config/scw/config.yaml not found."
|
||||
fi
|
||||
|
||||
SCW_INSTANCE_TYPE=${SCW_INSTANCE_TYPE-DEV1-M}
|
||||
|
||||
24
prepare-vms/settings/kube101.yaml
Normal file
24
prepare-vms/settings/kube101.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# 3 nodes for k8s 101 workshops
|
||||
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -30,7 +30,7 @@ TAG=$PREFIX-$SETTINGS
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
--count $STUDENTS
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl disabledocker $TAG
|
||||
@@ -45,7 +45,7 @@ TAG=$PREFIX-$SETTINGS
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
@@ -60,7 +60,7 @@ TAG=$PREFIX-$SETTINGS
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
@@ -79,9 +79,10 @@ TAG=$PREFIX-$SETTINGS
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kube $TAG 1.19.11
|
||||
retry 5 ./workshopctl kube $TAG 1.17.13
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
@@ -17,7 +17,6 @@ done
|
||||
DEPENDENCIES="
|
||||
ssh
|
||||
curl
|
||||
fping
|
||||
jq
|
||||
pssh
|
||||
wkhtmltopdf
|
||||
|
||||
103
slides/2.yml
103
slides/2.yml
@@ -1,103 +0,0 @@
|
||||
title: |
|
||||
Fondamentaux Kubernetes
|
||||
|
||||
chat: "[Slack](https://scaleway.slack.com/archives/C024GSPUGG1)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-06-scaleway.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- shared/toc.md
|
||||
- # 1
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- # 2
|
||||
- k8s/kubectl-run.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
- # 3
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/yamldeploy.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- # 4
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
- # 5
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- # 6
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- # 7
|
||||
- k8s/ingress.md
|
||||
- k8s/ingress-tls.md
|
||||
- # 8
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/batch-jobs.md
|
||||
#- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
#- k8s/whatsnext.md
|
||||
#- k8s/lastwords.md
|
||||
- shared/thankyou.md
|
||||
- k8s/links.md
|
||||
#-
|
||||
# - |
|
||||
# # (Bonus)
|
||||
# - k8s/record.md
|
||||
# - k8s/dryrun.md
|
||||
52
slides/4.yml
52
slides/4.yml
@@ -1,52 +0,0 @@
|
||||
title: |
|
||||
Kubernetes Avancé
|
||||
|
||||
chat: "[Slack](https://scaleway.slack.com/archives/C024GSPUGG1)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-06-scaleway.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- shared/toc.md
|
||||
- #1
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- #2
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/crd.md
|
||||
- k8s/exercise-sealed-secrets.md
|
||||
- #3
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- #4
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/hpa-v2.md
|
||||
- #5
|
||||
- k8s/admission.md
|
||||
- k8s/kyverno.md
|
||||
- #6
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/eck.md
|
||||
#- k8s/portworx.md
|
||||
- k8s/openebs.md
|
||||
@@ -2,6 +2,7 @@
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /lke.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
@@ -21,5 +22,3 @@
|
||||
|
||||
# Survey form
|
||||
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform
|
||||
|
||||
/ /highfive.html 200!
|
||||
|
||||
1132
slides/autopilot/package-lock.json
generated
1132
slides/autopilot/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -44,64 +44,6 @@ Fri Feb 20 00:28:55 UTC 2015
|
||||
|
||||
---
|
||||
|
||||
## When `^C` doesn't work...
|
||||
|
||||
Sometimes, `^C` won't be enough.
|
||||
|
||||
Why? And how can we stop the container in that case?
|
||||
|
||||
---
|
||||
|
||||
## What happens when we hit `^C`
|
||||
|
||||
`SIGINT` gets sent to the container, which means:
|
||||
|
||||
- `SIGINT` gets sent to PID 1 (default case)
|
||||
|
||||
- `SIGINT` gets sent to *foreground processes* when running with `-ti`
|
||||
|
||||
But there is a special case for PID 1: it ignores all signals!
|
||||
|
||||
- except `SIGKILL` and `SIGSTOP`
|
||||
|
||||
- except signals handled explicitly
|
||||
|
||||
TL,DR: there are many circumstances when `^C` won't stop the container.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why is PID 1 special?
|
||||
|
||||
- PID 1 has some extra responsibilities:
|
||||
|
||||
- it starts (directly or indirectly) every other process
|
||||
|
||||
- when a process exits, its processes are "reparented" under PID 1
|
||||
|
||||
- When PID 1 exits, everything stops:
|
||||
|
||||
- on a "regular" machine, it causes a kernel panic
|
||||
|
||||
- in a container, it kills all the processes
|
||||
|
||||
- We don't want PID 1 to stop accidentally
|
||||
|
||||
- That's why it has these extra protections
|
||||
|
||||
---
|
||||
|
||||
## How to stop these containers, then?
|
||||
|
||||
- Start another terminal and forget about them
|
||||
|
||||
(for now!)
|
||||
|
||||
- We'll shortly learn about `docker kill`
|
||||
|
||||
---
|
||||
|
||||
## Run a container in the background
|
||||
|
||||
Containers can be started in the background, with the `-d` flag (daemon mode):
|
||||
|
||||
@@ -131,7 +131,7 @@ root@fcfb62f0bfde:/# figlet hello
|
||||
|_| |_|\___|_|_|\___/
|
||||
```
|
||||
|
||||
It works! 🎉
|
||||
It works! .emoji[🎉]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -89,44 +89,6 @@ To keep things simple for now: this is the directory where our Dockerfile is loc
|
||||
|
||||
## What happens when we build the image?
|
||||
|
||||
It depends if we're using BuildKit or not!
|
||||
|
||||
If there are lots of blue lines and the first line looks like this:
|
||||
```
|
||||
[+] Building 1.8s (4/6)
|
||||
```
|
||||
... then we're using BuildKit.
|
||||
|
||||
If the output is mostly black-and-white and the first line looks like this:
|
||||
```
|
||||
Sending build context to Docker daemon 2.048kB
|
||||
```
|
||||
... then we're using the "classic" or "old-style" builder.
|
||||
|
||||
---
|
||||
|
||||
## To BuildKit or Not To BuildKit
|
||||
|
||||
Classic builder:
|
||||
|
||||
- copies the whole "build context" to the Docker Engine
|
||||
|
||||
- linear (processes lines one after the other)
|
||||
|
||||
- requires a full Docker Engine
|
||||
|
||||
BuildKit:
|
||||
|
||||
- only transfers parts of the "build context" when needed
|
||||
|
||||
- will parallelize operations (when possible)
|
||||
|
||||
- can run in non-privileged containers (e.g. on Kubernetes)
|
||||
|
||||
---
|
||||
|
||||
## With the classic builder
|
||||
|
||||
The output of `docker build` looks like this:
|
||||
|
||||
.small[
|
||||
@@ -169,7 +131,7 @@ Sending build context to Docker daemon 2.048 kB
|
||||
|
||||
* Be careful (or patient) if that directory is big and your link is slow.
|
||||
|
||||
* You can speed up the process with a [`.dockerignore`](https://docs.docker.com/engine/reference/builder/#dockerignore-file) file
|
||||
* You can speed up the process with a [`.dockerignore`](https://docs.docker.com/engine/reference/builder/#dockerignore-file) file
|
||||
|
||||
* It tells docker to ignore specific files in the directory
|
||||
|
||||
@@ -199,64 +161,6 @@ Removing intermediate container e01b294dbffd
|
||||
|
||||
---
|
||||
|
||||
## With BuildKit
|
||||
|
||||
.small[
|
||||
```bash
|
||||
[+] Building 7.9s (7/7) FINISHED
|
||||
=> [internal] load build definition from Dockerfile 0.0s
|
||||
=> => transferring dockerfile: 98B 0.0s
|
||||
=> [internal] load .dockerignore 0.0s
|
||||
=> => transferring context: 2B 0.0s
|
||||
=> [internal] load metadata for docker.io/library/ubuntu:latest 1.2s
|
||||
=> [1/3] FROM docker.io/library/ubuntu@sha256:cf31af331f38d1d7158470e095b132acd126a7180a54f263d386 3.2s
|
||||
=> => resolve docker.io/library/ubuntu@sha256:cf31af331f38d1d7158470e095b132acd126a7180a54f263d386 0.0s
|
||||
=> => sha256:cf31af331f38d1d7158470e095b132acd126a7180a54f263d386da88eb681d93 1.20kB / 1.20kB 0.0s
|
||||
=> => sha256:1de4c5e2d8954bf5fa9855f8b4c9d3c3b97d1d380efe19f60f3e4107a66f5cae 943B / 943B 0.0s
|
||||
=> => sha256:6a98cbe39225dadebcaa04e21dbe5900ad604739b07a9fa351dd10a6ebad4c1b 3.31kB / 3.31kB 0.0s
|
||||
=> => sha256:80bc30679ac1fd798f3241208c14accd6a364cb8a6224d1127dfb1577d10554f 27.14MB / 27.14MB 2.3s
|
||||
=> => sha256:9bf18fab4cfbf479fa9f8409ad47e2702c63241304c2cdd4c33f2a1633c5f85e 850B / 850B 0.5s
|
||||
=> => sha256:5979309c983a2adeff352538937475cf961d49c34194fa2aab142effe19ed9c1 189B / 189B 0.4s
|
||||
=> => extracting sha256:80bc30679ac1fd798f3241208c14accd6a364cb8a6224d1127dfb1577d10554f 0.7s
|
||||
=> => extracting sha256:9bf18fab4cfbf479fa9f8409ad47e2702c63241304c2cdd4c33f2a1633c5f85e 0.0s
|
||||
=> => extracting sha256:5979309c983a2adeff352538937475cf961d49c34194fa2aab142effe19ed9c1 0.0s
|
||||
=> [2/3] RUN apt-get update 2.5s
|
||||
=> [3/3] RUN apt-get install figlet 0.9s
|
||||
=> exporting to image 0.1s
|
||||
=> => exporting layers 0.1s
|
||||
=> => writing image sha256:3b8aee7b444ab775975dfba691a72d8ac24af2756e0a024e056e3858d5a23f7c 0.0s
|
||||
=> => naming to docker.io/library/figlet 0.0s
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Understanding BuildKit output
|
||||
|
||||
- BuildKit transfers the Dockerfile and the *build context*
|
||||
|
||||
(these are the first two `[internal]` stages)
|
||||
|
||||
- Then it executes the steps defined in the Dockerfile
|
||||
|
||||
(`[1/3]`, `[2/3]`, `[3/3]`)
|
||||
|
||||
- Finally, it exports the result of the build
|
||||
|
||||
(image definition + collection of layers)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## BuildKit plain output
|
||||
|
||||
- When running BuildKit in e.g. a CI pipeline, its output will be different
|
||||
|
||||
- We can see the same output format by using `--progress=plain`
|
||||
|
||||
---
|
||||
|
||||
## The caching system
|
||||
|
||||
If you run the same build again, it will be instantaneous. Why?
|
||||
@@ -267,10 +171,10 @@ If you run the same build again, it will be instantaneous. Why?
|
||||
|
||||
* Docker uses the exact strings defined in your Dockerfile, so:
|
||||
|
||||
* `RUN apt-get install figlet cowsay`
|
||||
* `RUN apt-get install figlet cowsay `
|
||||
<br/> is different from
|
||||
<br/> `RUN apt-get install cowsay figlet`
|
||||
|
||||
|
||||
* `RUN apt-get update` is not re-executed when the mirrors are updated
|
||||
|
||||
You can force a rebuild with `docker build --no-cache ...`.
|
||||
@@ -292,7 +196,7 @@ root@91f3c974c9a1:/# figlet hello
|
||||
```
|
||||
|
||||
|
||||
Yay! 🎉
|
||||
Yay! .emoji[🎉]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -272,45 +272,6 @@ $ docker run -it --entrypoint bash myfiglet
|
||||
root@6027e44e2955:/#
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## `CMD` and `ENTRYPOINT` recap
|
||||
|
||||
- `docker run myimage` executes `ENTRYPOINT` + `CMD`
|
||||
|
||||
- `docker run myimage args` executes `ENTRYPOINT` + `args` (overriding `CMD`)
|
||||
|
||||
- `docker run --entrypoint prog myimage` executes `prog` (overriding both)
|
||||
|
||||
.small[
|
||||
| Command | `ENTRYPOINT` | `CMD` | Result
|
||||
|---------------------------------|--------------------|---------|-------
|
||||
| `docker run figlet` | none | none | Use values from base image (`bash`)
|
||||
| `docker run figlet hola` | none | none | Error (executable `hola` not found)
|
||||
| `docker run figlet` | `figlet -f script` | none | `figlet -f script`
|
||||
| `docker run figlet hola` | `figlet -f script` | none | `figlet -f script hola`
|
||||
| `docker run figlet` | none | `figlet -f script` | `figlet -f script`
|
||||
| `docker run figlet hola` | none | `figlet -f script` | Error (executable `hola` not found)
|
||||
| `docker run figlet` | `figlet -f script` | `hello` | `figlet -f script hello`
|
||||
| `docker run figlet hola` | `figlet -f script` | `hello` | `figlet -f script hola`
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## When to use `ENTRYPOINT` vs `CMD`
|
||||
|
||||
`ENTRYPOINT` is great for "containerized binaries".
|
||||
|
||||
Example: `docker run consul --help`
|
||||
|
||||
(Pretend that the `docker run` part isn't there!)
|
||||
|
||||
`CMD` is great for images with multiple binaries.
|
||||
|
||||
Example: `docker run busybox ifconfig`
|
||||
|
||||
(It makes sense to indicate *which* program we want to run!)
|
||||
|
||||
???
|
||||
|
||||
:EN:- CMD and ENTRYPOINT
|
||||
|
||||
@@ -1,40 +1,51 @@
|
||||
# Compose for development stacks
|
||||
|
||||
Dockerfile = great to build *one* container image.
|
||||
Dockerfiles are great to build container images.
|
||||
|
||||
What if we have multiple containers?
|
||||
But what if we work with a complex stack made of multiple containers?
|
||||
|
||||
What if some of them require particular `docker run` parameters?
|
||||
Eventually, we will want to write some custom scripts and automation to build, run, and connect
|
||||
our containers together.
|
||||
|
||||
How do we connect them all together?
|
||||
There is a better way: using Docker Compose.
|
||||
|
||||
... Compose solves these use-cases (and a few more).
|
||||
In this section, you will use Compose to bootstrap a development environment.
|
||||
|
||||
---
|
||||
|
||||
## Life before Compose
|
||||
## What is Docker Compose?
|
||||
|
||||
Before we had Compose, we would typically write custom scripts to:
|
||||
Docker Compose (formerly known as `fig`) is an external tool.
|
||||
|
||||
- build container images,
|
||||
Unlike the Docker Engine, it is written in Python. It's open source as well.
|
||||
|
||||
- run containers using these images,
|
||||
The general idea of Compose is to enable a very simple, powerful onboarding workflow:
|
||||
|
||||
- connect the containers together,
|
||||
|
||||
- rebuild, restart, update these images and containers.
|
||||
|
||||
---
|
||||
|
||||
## Life with Compose
|
||||
|
||||
Compose enables a simple, powerful onboarding workflow:
|
||||
|
||||
1. Checkout our code.
|
||||
1. Checkout your code.
|
||||
|
||||
2. Run `docker-compose up`.
|
||||
|
||||
3. Our app is up and running!
|
||||
3. Your app is up and running!
|
||||
|
||||
---
|
||||
|
||||
## Compose overview
|
||||
|
||||
This is how you work with Compose:
|
||||
|
||||
* You describe a set (or stack) of containers in a YAML file called `docker-compose.yml`.
|
||||
|
||||
* You run `docker-compose up`.
|
||||
|
||||
* Compose automatically pulls images, builds containers, and starts them.
|
||||
|
||||
* Compose can set up links, volumes, and other Docker options for you.
|
||||
|
||||
* Compose can run the containers in the background, or in the foreground.
|
||||
|
||||
* When containers are running in the foreground, their aggregated output is shown.
|
||||
|
||||
Before diving in, let's see a small example of Compose in action.
|
||||
|
||||
---
|
||||
|
||||
@@ -44,61 +55,20 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
## Life after Compose
|
||||
## Checking if Compose is installed
|
||||
|
||||
(Or: when do we need something else?)
|
||||
If you are using the official training virtual machines, Compose has been
|
||||
pre-installed.
|
||||
|
||||
- Compose is *not* an orchestrator
|
||||
If you are using Docker for Mac/Windows or the Docker Toolbox, Compose comes with them.
|
||||
|
||||
- It isn't designed to need to run containers on multiple nodes
|
||||
If you are on Linux (desktop or server environment), you will need to install Compose from its [release page](https://github.com/docker/compose/releases) or with `pip install docker-compose`.
|
||||
|
||||
(it can, however, work with Docker Swarm Mode)
|
||||
You can always check that it is installed by running:
|
||||
|
||||
- Compose isn't ideal if we want to run containers on Kubernetes
|
||||
|
||||
- it uses different concepts (Compose services ≠ Kubernetes services)
|
||||
|
||||
- it needs a Docker Engine (althought containerd support might be coming)
|
||||
|
||||
---
|
||||
|
||||
## First rodeo with Compose
|
||||
|
||||
1. Write Dockerfiles
|
||||
|
||||
2. Describe our stack of containers in a YAML file called `docker-compose.yml`
|
||||
|
||||
3. `docker-compose up` (or `docker-compose up -d` to run in the background)
|
||||
|
||||
4. Compose pulls and builds the required images, and starts the containers
|
||||
|
||||
5. Compose shows the combined logs of all the containers
|
||||
|
||||
(if running in the background, use `docker-compose logs`)
|
||||
|
||||
6. Hit Ctrl-C to stop the whole stack
|
||||
|
||||
(if running in the background, use `docker-compose stop`)
|
||||
|
||||
---
|
||||
|
||||
## Iterating
|
||||
|
||||
After making changes to our source code, we can:
|
||||
|
||||
1. `docker-compose build` to rebuild container images
|
||||
|
||||
2. `docker-compose up` to restart the stack with the new images
|
||||
|
||||
We can also combine both with `docker-compose up --build`
|
||||
|
||||
Compose will be smart, and only recreate the containers that have changed.
|
||||
|
||||
When working with interpreted languages:
|
||||
|
||||
- dont' rebuild each time
|
||||
|
||||
- leverage a `volumes` section instead
|
||||
```bash
|
||||
$ docker-compose --version
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -107,37 +77,38 @@ When working with interpreted languages:
|
||||
First step: clone the source code for the app we will be working on.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/jpetazzo/trainingwheels
|
||||
cd trainingwheels
|
||||
$ cd
|
||||
$ git clone https://github.com/jpetazzo/trainingwheels
|
||||
...
|
||||
$ cd trainingwheels
|
||||
```
|
||||
|
||||
Second step: start the app.
|
||||
|
||||
Second step: start your app.
|
||||
|
||||
```bash
|
||||
docker-compose up
|
||||
$ docker-compose up
|
||||
```
|
||||
|
||||
Watch Compose build and run the app.
|
||||
|
||||
That Compose stack exposes a web server on port 8000; try connecting to it.
|
||||
Watch Compose build and run your app with the correct parameters,
|
||||
including linking the relevant containers together.
|
||||
|
||||
---
|
||||
|
||||
## Launching Our First Stack with Compose
|
||||
|
||||
We should see a web page like this:
|
||||
Verify that the app is running at `http://<yourHostIP>:8000`.
|
||||
|
||||

|
||||
|
||||
Each time we reload, the counter should increase.
|
||||
|
||||
---
|
||||
|
||||
## Stopping the app
|
||||
|
||||
When we hit Ctrl-C, Compose tries to gracefully terminate all of the containers.
|
||||
When you hit `^C`, Compose tries to gracefully terminate all of the containers.
|
||||
|
||||
After ten seconds (or if we press `^C` again) it will forcibly kill them.
|
||||
After ten seconds (or if you press `^C` again) it will forcibly kill
|
||||
them.
|
||||
|
||||
---
|
||||
|
||||
@@ -147,13 +118,13 @@ Here is the file used in the demo:
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
version: "3"
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
www:
|
||||
build: www
|
||||
ports:
|
||||
- ${PORT-8000}:5000
|
||||
- 8000:5000
|
||||
user: nobody
|
||||
environment:
|
||||
DEBUG: 1
|
||||
@@ -172,9 +143,9 @@ services:
|
||||
|
||||
A Compose file has multiple sections:
|
||||
|
||||
* `version` is mandatory. (Typically use "3".)
|
||||
* `version` is mandatory. (We should use `"2"` or later; version 1 is deprecated.)
|
||||
|
||||
* `services` is mandatory. Each service corresponds to a container.
|
||||
* `services` is mandatory. A service is one or more replicas of the same image running as containers.
|
||||
|
||||
* `networks` is optional and indicates to which networks containers should be connected.
|
||||
<br/>(By default, containers will be connected on a private, per-compose-file network.)
|
||||
@@ -193,8 +164,6 @@ A Compose file has multiple sections:
|
||||
|
||||
* Version 3 added support for deployment options (scaling, rolling updates, etc).
|
||||
|
||||
* Typically use `version: "3"`.
|
||||
|
||||
The [Docker documentation](https://docs.docker.com/compose/compose-file/)
|
||||
has excellent information about the Compose file format if you need to know more about versions.
|
||||
|
||||
@@ -232,45 +201,34 @@ For the full list, check: https://docs.docker.com/compose/compose-file/
|
||||
|
||||
---
|
||||
|
||||
## Environment variables
|
||||
## Compose commands
|
||||
|
||||
- We can use environment variables in Compose files
|
||||
We already saw `docker-compose up`, but another one is `docker-compose build`.
|
||||
|
||||
(like `$THIS` or `${THAT}`)
|
||||
It will execute `docker build` for all containers mentioning a `build` path.
|
||||
|
||||
- We can provide default values, e.g. `${PORT-8000}`
|
||||
It can also be invoked automatically when starting the application:
|
||||
|
||||
- Compose will also automatically load the environment file `.env`
|
||||
```bash
|
||||
docker-compose up --build
|
||||
```
|
||||
|
||||
(it should contain `VAR=value`, one per line)
|
||||
Another common option is to start containers in the background:
|
||||
|
||||
- This is a great way to customize build and run parameters
|
||||
|
||||
(base image versions to use, build and run secrets, port numbers...)
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Running multiple copies of a stack
|
||||
## Check container status
|
||||
|
||||
- Copy the stack in two different directories, e.g. `front` and `frontcopy`
|
||||
It can be tedious to check the status of your containers with `docker ps`,
|
||||
especially when running multiple apps at the same time.
|
||||
|
||||
- Compose prefixes images and containers with the directory name:
|
||||
Compose makes it easier; with `docker-compose ps` you will see only the status of the
|
||||
containers of the current stack:
|
||||
|
||||
`front_www`, `front_www_1`, `front_db_1`
|
||||
|
||||
`frontcopy_www`, `frontcopy_www_1`, `frontcopy_db_1`
|
||||
|
||||
- Alternatively, use `docker-compose -p frontcopy`
|
||||
|
||||
(to set the `--project-name` of a stack, which default to the dir name)
|
||||
|
||||
- Each copy is isolated from the others (runs on a different network)
|
||||
|
||||
---
|
||||
|
||||
## Checking stack status
|
||||
|
||||
We have `ps`, `docker ps`, and similarly, `docker-compose ps`:
|
||||
|
||||
```bash
|
||||
$ docker-compose ps
|
||||
@@ -280,10 +238,6 @@ trainingwheels_redis_1 /entrypoint.sh red Up 6379/tcp
|
||||
trainingwheels_www_1 python counter.py Up 0.0.0.0:8000->5000/tcp
|
||||
```
|
||||
|
||||
Shows the status of all the containers of our stack.
|
||||
|
||||
Doesn't show the other containers.
|
||||
|
||||
---
|
||||
|
||||
## Cleaning up (1)
|
||||
@@ -327,39 +281,47 @@ Use `docker-compose down -v` to remove everything including volumes.
|
||||
|
||||
## Special handling of volumes
|
||||
|
||||
- When an image gets updated, Compose automatically creates a new container
|
||||
Compose is smart. If your container uses volumes, when you restart your
|
||||
application, Compose will create a new container, but carefully re-use
|
||||
the volumes it was using previously.
|
||||
|
||||
- The data in the old container is lost...
|
||||
|
||||
- ... Except if the container is using a *volume*
|
||||
|
||||
- Compose will then re-attach that volume to the new container
|
||||
|
||||
(and data is then retained across database upgrades)
|
||||
|
||||
- All good database images use volumes
|
||||
|
||||
(e.g. all official images)
|
||||
This makes it easy to upgrade a stateful service, by pulling its
|
||||
new image and just restarting your stack with Compose.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
## Compose project name
|
||||
|
||||
## A bit of history and trivia
|
||||
* When you run a Compose command, Compose infers the "project name" of your app.
|
||||
|
||||
- Compose was initially named "Fig"
|
||||
* By default, the "project name" is the name of the current directory.
|
||||
|
||||
- Compose is one of the only components of Docker written in Python
|
||||
* For instance, if you are in `/home/zelda/src/ocarina`, the project name is `ocarina`.
|
||||
|
||||
(almost everything else is in Go)
|
||||
* All resources created by Compose are tagged with this project name.
|
||||
|
||||
- In 2020, Docker introduced "Compose CLI":
|
||||
* The project name also appears as a prefix of the names of the resources.
|
||||
|
||||
- `docker compose` command to deploy Compose stacks to some clouds
|
||||
E.g. in the previous example, service `www` will create a container `ocarina_www_1`.
|
||||
|
||||
- progressively getting feature parity with `docker-compose`
|
||||
* The project name can be overridden with `docker-compose -p`.
|
||||
|
||||
- also provides numerous improvements (e.g. leverages BuildKit by default)
|
||||
---
|
||||
|
||||
## Running two copies of the same app
|
||||
|
||||
If you want to run two copies of the same app simultaneously, all you have to do is to
|
||||
make sure that each copy has a different project name.
|
||||
|
||||
You can:
|
||||
|
||||
* copy your code in a directory with a different name
|
||||
|
||||
* start each copy with `docker-compose -p myprojname up`
|
||||
|
||||
Each copy will run in a different network, totally isolated from the other.
|
||||
|
||||
This is ideal to debug regressions, do side-by-side comparisons, etc.
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -27,9 +27,9 @@ We will also explain the principle of overlay networks and network plugins.
|
||||
|
||||
## The Container Network Model
|
||||
|
||||
Docker has "networks".
|
||||
The CNM was introduced in Engine 1.9.0 (November 2015).
|
||||
|
||||
We can manage them with the `docker network` commands; for instance:
|
||||
The CNM adds the notion of a *network*, and a new top-level command to manipulate and see those networks: `docker network`.
|
||||
|
||||
```bash
|
||||
$ docker network ls
|
||||
@@ -41,79 +41,59 @@ eb0eeab782f4 host host
|
||||
228a4355d548 blog-prod overlay
|
||||
```
|
||||
|
||||
New networks can be created (with `docker network create`).
|
||||
---
|
||||
|
||||
(Note: networks `none` and `host` are special; let's set them aside for now.)
|
||||
## What's in a network?
|
||||
|
||||
* Conceptually, a network is a virtual switch.
|
||||
|
||||
* It can be local (to a single Engine) or global (spanning multiple hosts).
|
||||
|
||||
* A network has an IP subnet associated to it.
|
||||
|
||||
* Docker will allocate IP addresses to the containers connected to a network.
|
||||
|
||||
* Containers can be connected to multiple networks.
|
||||
|
||||
* Containers can be given per-network names and aliases.
|
||||
|
||||
* The names and aliases can be resolved via an embedded DNS server.
|
||||
|
||||
---
|
||||
|
||||
## What's a network?
|
||||
## Network implementation details
|
||||
|
||||
- Conceptually, a Docker "network" is a virtual switch
|
||||
* A network is managed by a *driver*.
|
||||
|
||||
(we can also think about it like a VLAN, or a WiFi SSID, for instance)
|
||||
* The built-in drivers include:
|
||||
|
||||
- By default, containers are connected to a single network
|
||||
* `bridge` (default)
|
||||
|
||||
(but they can be connected to zero, or many networks, even dynamically)
|
||||
* `none`
|
||||
|
||||
- Each network has its own subnet (IP address range)
|
||||
* `host`
|
||||
|
||||
- A network can be local (to a single Docker Engine) or global (span multiple hosts)
|
||||
* `macvlan`
|
||||
|
||||
- Containers can have *network aliases* providing DNS-based service discovery
|
||||
* A multi-host driver, *overlay*, is available out of the box (for Swarm clusters).
|
||||
|
||||
(and each network has its own "domain", "zone", or "scope")
|
||||
* More drivers can be provided by plugins (OVS, VLAN...)
|
||||
|
||||
* A network can have a custom IPAM (IP allocator).
|
||||
|
||||
---
|
||||
|
||||
## Service discovery
|
||||
class: extra-details
|
||||
|
||||
- A container can be given a network alias
|
||||
## Differences with the CNI
|
||||
|
||||
(e.g. with `docker run --net some-network --net-alias db ...`)
|
||||
* CNI = Container Network Interface
|
||||
|
||||
- The containers running in the same network can resolve that network alias
|
||||
* CNI is used notably by Kubernetes
|
||||
|
||||
(i.e. if they do a DNS lookup on `db`, it will give the container's address)
|
||||
* With CNI, all the nodes and containers are on a single IP network
|
||||
|
||||
- We can have a different `db` container in each network
|
||||
|
||||
(this avoids naming conflicts between different stacks)
|
||||
|
||||
- When we name a container, it automatically adds the name as a network alias
|
||||
|
||||
(i.e. `docker run --name xyz ...` is like `docker run --net-alias xyz ...`
|
||||
|
||||
---
|
||||
|
||||
## Network isolation
|
||||
|
||||
- Networks are isolated
|
||||
|
||||
- By default, containers in network A cannot reach those in network B
|
||||
|
||||
- A container connected to both networks A and B can act as a router or proxy
|
||||
|
||||
- Published ports are always reachable through the Docker host address
|
||||
|
||||
(`docker run -P ...` makes a container port available to everyone)
|
||||
|
||||
---
|
||||
|
||||
## How to use networks
|
||||
|
||||
- We typically create one network per "stack" or app that we deploy
|
||||
|
||||
- More complex apps or stacks might require multiple networks
|
||||
|
||||
(e.g. `frontend`, `backend`, ...)
|
||||
|
||||
- Networks allow us to deploy multiple copies of the same stack
|
||||
|
||||
(e.g. `prod`, `dev`, `pr-442`, ....)
|
||||
|
||||
- If we use Docker Compose, this is managed automatically for us
|
||||
* Both CNI and CNM offer the same functionality, but with very different methods
|
||||
|
||||
---
|
||||
|
||||
@@ -141,30 +121,6 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## CNM vs CNI
|
||||
|
||||
- CNM is the model used by Docker
|
||||
|
||||
- Kubernetes uses a different model, architectured around CNI
|
||||
|
||||
(CNI is a kind of API between a container engine and *CNI plugins*)
|
||||
|
||||
- Docker model:
|
||||
|
||||
- multiple isolated networks
|
||||
- per-network service discovery
|
||||
- network interconnection requires extra steps
|
||||
|
||||
- Kubernetes model:
|
||||
|
||||
- single flat network
|
||||
- per-namespace service discovery
|
||||
- network isolation requires extra steps (Network Policies)
|
||||
|
||||
---
|
||||
|
||||
## Creating a network
|
||||
|
||||
Let's create a network called `dev`.
|
||||
@@ -234,12 +190,8 @@ class: extra-details
|
||||
|
||||
## Resolving container addresses
|
||||
|
||||
Since Docker Engine 1.10, name resolution is implemented by a dynamic resolver.
|
||||
|
||||
Archeological note: when CNM was intoduced (in Docker Engine 1.9, November 2015)
|
||||
name resolution was implemented with `/etc/hosts`, and it was updated each time
|
||||
CONTAINERs were added/removed. This could cause interesting race conditions
|
||||
since `/etc/hosts` was a bind-mount (and couldn't be updated atomically).
|
||||
In Docker Engine 1.9, name resolution is implemented with `/etc/hosts`, and
|
||||
updating it each time containers are added/removed.
|
||||
|
||||
.small[
|
||||
```bash
|
||||
@@ -256,6 +208,10 @@ ff02::2 ip6-allrouters
|
||||
```
|
||||
]
|
||||
|
||||
In Docker Engine 1.10, this has been replaced by a dynamic resolver.
|
||||
|
||||
(This avoids race conditions when updating `/etc/hosts`.)
|
||||
|
||||
---
|
||||
|
||||
# Service discovery with containers
|
||||
@@ -309,12 +265,12 @@ Note: we're not using a FQDN or an IP address here; just `redis`.
|
||||
|
||||
* That container must be on the same network as the web server.
|
||||
|
||||
* It must have the right network alias (`redis`) so the application can find it.
|
||||
* It must have the right name (`redis`) so the application can find it.
|
||||
|
||||
Start the container:
|
||||
|
||||
```bash
|
||||
$ docker run --net dev --net-alias redis -d redis
|
||||
$ docker run --net dev --name redis -d redis
|
||||
```
|
||||
|
||||
---
|
||||
@@ -331,19 +287,36 @@ $ docker run --net dev --net-alias redis -d redis
|
||||
|
||||
## A few words on *scope*
|
||||
|
||||
- Container names are unique (there can be only one `--name redis`)
|
||||
* What if we want to run multiple copies of our application?
|
||||
|
||||
- Network aliases are not unique
|
||||
* Since names are unique, there can be only one container named `redis` at a time.
|
||||
|
||||
- We can have the same network alias in different networks:
|
||||
```bash
|
||||
docker run --net dev --net-alias redis ...
|
||||
docker run --net prod --net-alias redis ...
|
||||
```
|
||||
* However, we can specify the network name of our container with `--net-alias`.
|
||||
|
||||
- We can even have multiple containers with the same alias in the same network
|
||||
* `--net-alias` is scoped per network, and independent from the container name.
|
||||
|
||||
(in that case, we get multiple DNS entries, aka "DNS round robin")
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using a network alias instead of a name
|
||||
|
||||
Let's remove the `redis` container:
|
||||
|
||||
```bash
|
||||
$ docker rm -f redis
|
||||
```
|
||||
|
||||
* `-f`: Force the removal of a running container (uses SIGKILL)
|
||||
|
||||
And create one that doesn't block the `redis` name:
|
||||
|
||||
```bash
|
||||
$ docker run --net dev --net-alias redis -d redis
|
||||
```
|
||||
|
||||
Check that the app still works (but the counter is back to 1,
|
||||
since we wiped out the old Redis container).
|
||||
|
||||
---
|
||||
|
||||
@@ -376,9 +349,7 @@ A container can have multiple network aliases.
|
||||
|
||||
Network aliases are *local* to a given network (only exist in this network).
|
||||
|
||||
Multiple containers can have the same network alias (even on the same network).
|
||||
|
||||
Since Docker Engine 1.11, resolving a network alias yields the IP addresses of all containers holding this alias.
|
||||
Multiple containers can have the same network alias (even on the same network). In Docker Engine 1.11, resolving a network alias yields the IP addresses of all containers holding this alias.
|
||||
|
||||
---
|
||||
|
||||
@@ -531,24 +502,6 @@ b2887adeb5578a01fd9c55c435cad56bbbe802350711d2743691f95743680b09
|
||||
|
||||
---
|
||||
|
||||
## Network drivers
|
||||
|
||||
* A network is managed by a *driver*.
|
||||
|
||||
* The built-in drivers include:
|
||||
|
||||
* `bridge` (default)
|
||||
* `none`
|
||||
* `host`
|
||||
* `macvlan`
|
||||
* `overlay` (for Swarm clusters)
|
||||
|
||||
* More drivers can be provided by plugins (OVS, VLAN...)
|
||||
|
||||
* A network can have a custom IPAM (IP allocator).
|
||||
|
||||
---
|
||||
|
||||
## Overlay networks
|
||||
|
||||
* The features we've seen so far only work when all containers are on a single host.
|
||||
|
||||
@@ -15,84 +15,53 @@ At the end of this section, you will be able to:
|
||||
|
||||
* Run a network service in a container.
|
||||
|
||||
* Connect to that network service.
|
||||
* Manipulate container networking basics.
|
||||
|
||||
* Find a container's IP address.
|
||||
|
||||
---
|
||||
|
||||
## Running a very simple service
|
||||
|
||||
- We need something small, simple, easy to configure
|
||||
|
||||
(or, even better, that doesn't require any configuration at all)
|
||||
|
||||
- Let's use the official NGINX image (named `nginx`)
|
||||
|
||||
- It runs a static web server listening on port 80
|
||||
|
||||
- It serves a default "Welcome to nginx!" page
|
||||
We will also explain the different network models used by Docker.
|
||||
|
||||
---
|
||||
|
||||
## Runing an NGINX server
|
||||
## A simple, static web server
|
||||
|
||||
Run the Docker Hub image `nginx`, which contains a basic web server:
|
||||
|
||||
```bash
|
||||
$ docker run -d -P nginx
|
||||
66b1ce719198711292c8f34f84a7b68c3876cf9f67015e752b94e189d35a204e
|
||||
```
|
||||
|
||||
- Docker will automatically pull the `nginx` image from the Docker Hub
|
||||
* Docker will download the image from the Docker Hub.
|
||||
|
||||
- `-d` / `--detach` tells Docker to run it in the background
|
||||
* `-d` tells Docker to run the image in the background.
|
||||
|
||||
- `P` / `--publish-all` tells Docker to publish all ports
|
||||
* `-P` tells Docker to make this service reachable from other computers.
|
||||
<br/>(`-P` is the short version of `--publish-all`.)
|
||||
|
||||
(publish = make them reachable from other computers)
|
||||
|
||||
- ...OK, how do we connect to our web server now?
|
||||
But, how do we connect to our web server now?
|
||||
|
||||
---
|
||||
|
||||
## Finding our web server port
|
||||
|
||||
- First, we need to find the *port number* used by Docker
|
||||
We will use `docker ps`:
|
||||
|
||||
(the NGINX container listens on port 80, but this port will be *mapped*)
|
||||
```bash
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE ... PORTS ...
|
||||
e40ffb406c9e nginx ... 0.0.0.0:32768->80/tcp ...
|
||||
```
|
||||
|
||||
- We can use `docker ps`:
|
||||
```bash
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE ... PORTS ...
|
||||
e40ffb406c9e nginx ... 0.0.0.0:`12345`->80/tcp ...
|
||||
```
|
||||
|
||||
- This means:
|
||||
* The web server is running on port 80 inside the container.
|
||||
|
||||
*port 12345 on the Docker host is mapped to port 80 in the container*
|
||||
* This port is mapped to port 32768 on our Docker host.
|
||||
|
||||
- Now we need to connect to the Docker host!
|
||||
We will explain the whys and hows of this port mapping.
|
||||
|
||||
---
|
||||
But first, let's make sure that everything works properly.
|
||||
|
||||
## Finding the address of the Docker host
|
||||
|
||||
- When running Docker on your Linux workstation:
|
||||
|
||||
*use `localhost`, or any IP address of your machine*
|
||||
|
||||
- When running Docker on a remote Linux server:
|
||||
|
||||
*use any IP address of the remote machine*
|
||||
|
||||
- When running Docker Desktop on Mac or Windows:
|
||||
|
||||
*use `localhost`*
|
||||
|
||||
- In other scenarios (`docker-machine`, local VM...):
|
||||
|
||||
*use the IP address of the Docker VM*
|
||||
|
||||
---
|
||||
|
||||
## Connecting to our web server (GUI)
|
||||
@@ -112,7 +81,7 @@ Make sure to use the right port number if it is different
|
||||
from the example below:
|
||||
|
||||
```bash
|
||||
$ curl localhost:12345
|
||||
$ curl localhost:32768
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
@@ -147,41 +116,17 @@ IMAGE CREATED CREATED BY
|
||||
|
||||
---
|
||||
|
||||
## Why can't we just connect to port 80?
|
||||
## Why are we mapping ports?
|
||||
|
||||
- Our Docker host has only one port 80
|
||||
* We are out of IPv4 addresses.
|
||||
|
||||
- Therefore, we can only have one container at a time on port 80
|
||||
* Containers cannot have public IPv4 addresses.
|
||||
|
||||
- Therefore, if multiple containers want port 80, only one can get it
|
||||
* They have private addresses.
|
||||
|
||||
- By default, containers *do not* get "their" port number, but a random one
|
||||
* Services have to be exposed port by port.
|
||||
|
||||
(not "random" as "crypto random", but as "it depends on various factors")
|
||||
|
||||
- We'll see later how to force a port number (including port 80!)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using multiple IP addresses
|
||||
|
||||
*Hey, my network-fu is strong, and I have questions...*
|
||||
|
||||
- Can I publish one container on 127.0.0.2:80, and another on 127.0.0.3:80?
|
||||
|
||||
- My machine has multiple (public) IP addresses, let's say A.A.A.A and B.B.B.B.
|
||||
<br/>
|
||||
Can I have one container on A.A.A.A:80 and another on B.B.B.B:80?
|
||||
|
||||
- I have a whole IPV4 subnet, can I allocate it to my containers?
|
||||
|
||||
- What about IPV6?
|
||||
|
||||
You can do all these things when running Docker directly on Linux.
|
||||
|
||||
(On other platforms, *generally not*, but there are some exceptions.)
|
||||
* Ports have to be mapped to avoid conflicts.
|
||||
|
||||
---
|
||||
|
||||
@@ -193,7 +138,7 @@ There is a command to help us:
|
||||
|
||||
```bash
|
||||
$ docker port <containerID> 80
|
||||
0.0.0.0:12345
|
||||
32768
|
||||
```
|
||||
|
||||
---
|
||||
@@ -227,11 +172,13 @@ There are many ways to integrate containers in your network.
|
||||
* Pick a fixed port number in advance, when you generate your configuration.
|
||||
<br/>Then start your container by setting the port numbers manually.
|
||||
|
||||
* Use an orchestrator like Kubernetes or Swarm.
|
||||
<br/>The orchestrator will provide its own networking facilities.
|
||||
* Use a network plugin, connecting your containers with e.g. VLANs, tunnels...
|
||||
|
||||
Orchestrators typically provide mechanisms to enable direct container-to-container
|
||||
communication across hosts, and publishing/load balancing for inbound traffic.
|
||||
* Enable *Swarm Mode* to deploy across a cluster.
|
||||
<br/>The container will then be reachable through any node of the cluster.
|
||||
|
||||
When using Docker through an extra management layer like Mesos or Kubernetes,
|
||||
these will usually provide their own mechanism to expose containers.
|
||||
|
||||
---
|
||||
|
||||
@@ -255,34 +202,16 @@ $ docker inspect --format '{{ .NetworkSettings.IPAddress }}' <yourContainerID>
|
||||
|
||||
## Pinging our container
|
||||
|
||||
Let's try to ping our container *from another container.*
|
||||
We can test connectivity to the container using the IP address we've
|
||||
just discovered. Let's see this now by using the `ping` tool.
|
||||
|
||||
```bash
|
||||
docker run alpine ping `<ipaddress>`
|
||||
PING 172.17.0.X (172.17.0.X): 56 data bytes
|
||||
64 bytes from 172.17.0.X: seq=0 ttl=64 time=0.106 ms
|
||||
64 bytes from 172.17.0.X: seq=1 ttl=64 time=0.250 ms
|
||||
64 bytes from 172.17.0.X: seq=2 ttl=64 time=0.188 ms
|
||||
$ ping <ipAddress>
|
||||
64 bytes from <ipAddress>: icmp_req=1 ttl=64 time=0.085 ms
|
||||
64 bytes from <ipAddress>: icmp_req=2 ttl=64 time=0.085 ms
|
||||
64 bytes from <ipAddress>: icmp_req=3 ttl=64 time=0.085 ms
|
||||
```
|
||||
|
||||
When running on Linux, we can even ping that IP address directly!
|
||||
|
||||
(And connect to a container's ports even if they aren't published.)
|
||||
|
||||
---
|
||||
|
||||
## How often do we use `-p` and `-P` ?
|
||||
|
||||
- When running a stack of containers, we will often use Compose
|
||||
|
||||
- Compose will take care of exposing containers
|
||||
|
||||
(through a `ports:` section in the `docker-compose.yml` file)
|
||||
|
||||
- It is, however, fairly common to use `docker run -P` for a quick test
|
||||
|
||||
- Or `docker run -p ...` when an image doesn't `EXPOSE` a port correctly
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
@@ -291,10 +220,13 @@ We've learned how to:
|
||||
|
||||
* Expose a network port.
|
||||
|
||||
* Connect to an application running in a container.
|
||||
* Manipulate container networking basics.
|
||||
|
||||
* Find a container's IP address.
|
||||
|
||||
In the next chapter, we will see how to connect
|
||||
containers together without exposing their ports.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Exposing single containers
|
||||
|
||||
@@ -88,43 +88,16 @@ Success!
|
||||
|
||||
## Details
|
||||
|
||||
* We can `COPY` whole directories recursively
|
||||
* You can `COPY` whole directories recursively.
|
||||
|
||||
* It is possible to do e.g. `COPY . .`
|
||||
|
||||
(but it might require some extra precautions to avoid copying too much)
|
||||
|
||||
* In older Dockerfiles, you might see the `ADD` command; consider it deprecated
|
||||
|
||||
(it is similar to `COPY` but can automatically extract archives)
|
||||
* Older Dockerfiles also have the `ADD` instruction.
|
||||
<br/>It is similar but can automatically extract archives.
|
||||
|
||||
* If we really wanted to compile C code in a container, we would:
|
||||
|
||||
* place it in a different directory, with the `WORKDIR` instruction
|
||||
* Place it in a different directory, with the `WORKDIR` instruction.
|
||||
|
||||
* even better, use the `gcc` official image
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `.dockerignore`
|
||||
|
||||
- We can create a file named `.dockerignore`
|
||||
|
||||
(at the top-level of the build context)
|
||||
|
||||
- It can contain file names and globs to ignore
|
||||
|
||||
- They won't be sent to the builder
|
||||
|
||||
(and won't end up in the resulting image)
|
||||
|
||||
- See the [documentation] for the little details
|
||||
|
||||
(exceptions can be made with `!`, multiple directory levels with `**`...)
|
||||
|
||||
[documentation]: https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
||||
* Even better, use the `gcc` official image.
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -66,9 +66,9 @@ Adding the dependencies as a separate step means that Docker can cache more effi
|
||||
|
||||
```bash
|
||||
FROM python
|
||||
COPY requirements.txt /tmp/requirements.txt
|
||||
RUN pip install -qr /tmp/requirements.txt
|
||||
WORKDIR /src
|
||||
COPY requirements.txt .
|
||||
RUN pip install -qr requirements.txt
|
||||
COPY . .
|
||||
EXPOSE 5000
|
||||
CMD ["python", "app.py"]
|
||||
|
||||
@@ -2,99 +2,4 @@
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
1. Check out the code repository
|
||||
|
||||
2. Read all the instructions
|
||||
|
||||
3. Write Dockerfiles
|
||||
|
||||
4. Build and test them individually
|
||||
|
||||
<!--
|
||||
5. Test them together with the provided Compose file
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
## Code repository
|
||||
|
||||
Clone the repository available at:
|
||||
|
||||
https://github.com/jpetazzo/wordsmith
|
||||
|
||||
It should look like this:
|
||||
```
|
||||
├── LICENSE
|
||||
├── README
|
||||
├── db/
|
||||
│ └── words.sql
|
||||
├── web/
|
||||
│ ├── dispatcher.go
|
||||
│ └── static/
|
||||
└── words/
|
||||
├── pom.xml
|
||||
└── src/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Instructions
|
||||
|
||||
The repository contains instructions in English and French.
|
||||
<br/>
|
||||
For now, we only care about the first part (about writing Dockerfiles).
|
||||
<br/>
|
||||
Place each Dockerfile in its own directory, like this:
|
||||
```
|
||||
├── LICENSE
|
||||
├── README
|
||||
├── db/
|
||||
│ ├── `Dockerfile`
|
||||
│ └── words.sql
|
||||
├── web/
|
||||
│ ├── `Dockerfile`
|
||||
│ ├── dispatcher.go
|
||||
│ └── static/
|
||||
└── words/
|
||||
├── `Dockerfile`
|
||||
├── pom.xml
|
||||
└── src/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Build and test
|
||||
|
||||
Build and run each Dockerfile individually.
|
||||
|
||||
For `db`, we should be able to see some messages confirming that the data set
|
||||
was loaded successfully (some `INSERT` lines in the container output).
|
||||
|
||||
For `web` and `words`, we should be able to see some message looking like
|
||||
"server started successfully".
|
||||
|
||||
That's all we care about for now!
|
||||
|
||||
Bonus question: make sure that each container stops correctly when hitting Ctrl-C.
|
||||
|
||||
???
|
||||
|
||||
## Test with a Compose file
|
||||
|
||||
Place the following Compose file at the root of the repository:
|
||||
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
services:
|
||||
db:
|
||||
build: db
|
||||
words:
|
||||
build: words
|
||||
web:
|
||||
build: web
|
||||
ports:
|
||||
- 8888:80
|
||||
```
|
||||
|
||||
Test the whole app by bringin up the stack and connecting to port 8888.
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
@@ -106,7 +106,7 @@ root@04c0bb0a6c07:/# figlet hello
|
||||
|_| |_|\___|_|_|\___/
|
||||
```
|
||||
|
||||
Beautiful! 😍
|
||||
Beautiful! .emoji[😍]
|
||||
|
||||
---
|
||||
|
||||
@@ -118,7 +118,7 @@ Let's check how many packages are installed there.
|
||||
|
||||
```bash
|
||||
root@04c0bb0a6c07:/# dpkg -l | wc -l
|
||||
97
|
||||
190
|
||||
```
|
||||
|
||||
* `dpkg -l` lists the packages installed in our container
|
||||
@@ -175,7 +175,7 @@ Now try to run `figlet`. Does that work?
|
||||
|
||||
* We can run *any container* on *any host*.
|
||||
|
||||
(One exception: Windows containers can only run on Windows hosts; at least for now.)
|
||||
(One exception: Windows containers cannot run on Linux machines; at least not yet.)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -56,8 +56,6 @@ Each of the following items will correspond to one layer:
|
||||
* Our application code and assets
|
||||
* Our application configuration
|
||||
|
||||
(Note: app config is generally added by orchestration facilities.)
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
@@ -369,44 +367,6 @@ This is similar to what we would do with `pip install`, `npm install`, etc.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Multi-arch images
|
||||
|
||||
- An image can support multiple architectures
|
||||
|
||||
- More precisely, a specific *tag* in a given *repository* can have either:
|
||||
|
||||
- a single *manifest* referencing an image for a single architecture
|
||||
|
||||
- a *manifest list* (or *fat manifest*) referencing multiple images
|
||||
|
||||
- In a *manifest list*, each image is identified by a combination of:
|
||||
|
||||
- `os` (linux, windows)
|
||||
|
||||
- `architecture` (amd64, arm, arm64...)
|
||||
|
||||
- optional fields like `variant` (for arm and arm64), `os.version` (for windows)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Working with multi-arch images
|
||||
|
||||
- The Docker Engine will pull "native" images when available
|
||||
|
||||
(images matching its own os/architecture/variant)
|
||||
|
||||
- We can ask for a specific image platform with `--platform`
|
||||
|
||||
- The Docker Engine can run non-native images thanks to QEMU+binfmt
|
||||
|
||||
(automatically on Docker Desktop; with a bit of setup on Linux)
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
|
||||
We've learned how to:
|
||||
|
||||
@@ -154,7 +154,7 @@ Option 2:
|
||||
|
||||
Option 3:
|
||||
|
||||
* Use a *bind mount* to share local files with the container
|
||||
* Use a *volume* to mount local files into the container
|
||||
* Make changes locally
|
||||
* Changes are reflected in the container
|
||||
|
||||
@@ -199,28 +199,7 @@ The flag structure is:
|
||||
|
||||
* If you don't specify `rw` or `ro`, it will be `rw` by default.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Hold your horses... and your mounts
|
||||
|
||||
- The `-v /path/on/host:/path/in/container` syntax is the "old" syntax
|
||||
|
||||
- The modern syntax looks like this:
|
||||
|
||||
`--mount type=bind,source=/path/on/host,target=/path/in/container`
|
||||
|
||||
- `--mount` is more explicit, but `-v` is quicker to type
|
||||
|
||||
- `--mount` supports all mount types; `-v` doesn't support `tmpfs` mounts
|
||||
|
||||
- `--mount` fails if the path on the host doesn't exist; `-v` creates it
|
||||
|
||||
With the new syntax, our command becomes:
|
||||
```bash
|
||||
docker run --mount=type=bind,source=$(pwd),target=/src -dP namer
|
||||
```
|
||||
There will be a full chapter about volumes!
|
||||
|
||||
---
|
||||
|
||||
@@ -274,43 +253,15 @@ color: red;
|
||||
|
||||
## Understanding volumes
|
||||
|
||||
- Volumes are *not* copying or synchronizing files between the host and the container
|
||||
* Volumes are *not* copying or synchronizing files between the host and the container.
|
||||
|
||||
- Changes made in the host are immediately visible in the container (and vice versa)
|
||||
* Volumes are *bind mounts*: a kernel mechanism associating one path with another.
|
||||
|
||||
- When running on Linux:
|
||||
* Bind mounts are *kind of* similar to symbolic links, but at a very different level.
|
||||
|
||||
- volumes and bind mounts correspond to directories on the host
|
||||
* Changes made on the host or on the container will be visible on the other side.
|
||||
|
||||
- if Docker runs in a Linux VM, these directories are in the Linux VM
|
||||
|
||||
- When running on Docker Desktop:
|
||||
|
||||
- volumes correspond to directories in a small Linux VM running Docker
|
||||
|
||||
- access to bind mounts is translated to host filesystem access
|
||||
<br/>
|
||||
(a bit like a network filesystem)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Docker Desktop caveats
|
||||
|
||||
- When running Docker natively on Linux, accessing a mount = native I/O
|
||||
|
||||
- When running Docker Desktop, accessing a bind mount = file access translation
|
||||
|
||||
- That file access translation has relatively good performance *in general*
|
||||
|
||||
(watch out, however, for that big `npm install` working on a bind mount!)
|
||||
|
||||
- There are some corner cases when watching files (with mechanisms like inotify)
|
||||
|
||||
- Features like "live reload" or programs like `entr` don't always behave properly
|
||||
|
||||
(due to e.g. file attribute caching, and other interesting details!)
|
||||
(Under the hood, it's the same file anyway.)
|
||||
|
||||
---
|
||||
|
||||
@@ -446,4 +397,4 @@ We've learned how to:
|
||||
:EN:- “Containerize” a development environment
|
||||
|
||||
:FR:Développer au jour le jour
|
||||
:FR:- « Containeriser » son environnement de développement
|
||||
:FR:- « Containeriser » son environnement de développement
|
||||
@@ -298,20 +298,21 @@ virtually "free."
|
||||
|
||||
## Build targets
|
||||
|
||||
* We can also tag an intermediary stage with the following command:
|
||||
```bash
|
||||
docker build --target STAGE --tag NAME
|
||||
```
|
||||
* We can also tag an intermediary stage with `docker build --target STAGE --tag NAME`
|
||||
|
||||
* This will create an image (named `NAME`) corresponding to stage `STAGE`
|
||||
|
||||
* This can be used to easily access an intermediary stage for inspection
|
||||
|
||||
(instead of parsing the output of `docker build` to find out the image ID)
|
||||
(Instead of parsing the output of `docker build` to find out the image ID)
|
||||
|
||||
* This can also be used to describe multiple images from a single Dockerfile
|
||||
|
||||
(instead of using multiple Dockerfiles, which could go out of sync)
|
||||
(Instead of using multiple Dockerfiles, which could go out of sync)
|
||||
|
||||
* Sometimes, we want to inspect a specific intermediary build stage.
|
||||
|
||||
* Or, we want to describe multiple images using a single Dockerfile.
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
# Container network drivers
|
||||
|
||||
The Docker Engine supports different network drivers.
|
||||
The Docker Engine supports many different network drivers.
|
||||
|
||||
The built-in drivers include:
|
||||
|
||||
* `bridge` (default)
|
||||
|
||||
* `null` (for the special network called `none`)
|
||||
* `none`
|
||||
|
||||
* `host` (for the special network called `host`)
|
||||
* `host`
|
||||
|
||||
* `container` (that one is a bit magic!)
|
||||
* `container`
|
||||
|
||||
The network is selected with `docker run --net ...`.
|
||||
|
||||
Each network is managed by a driver.
|
||||
The driver is selected with `docker run --net ...`.
|
||||
|
||||
The different drivers are explained with more details on the following slides.
|
||||
|
||||
|
||||
@@ -11,10 +11,10 @@ class State(object):
|
||||
self.section_title = None
|
||||
self.section_start = 0
|
||||
self.section_slides = 0
|
||||
self.parts = {}
|
||||
self.modules = {}
|
||||
self.sections = {}
|
||||
def show(self):
|
||||
if self.section_title.startswith("part-"):
|
||||
if self.section_title.startswith("module-"):
|
||||
return
|
||||
print("{0.section_title}\t{0.section_start}\t{0.section_slides}".format(self))
|
||||
self.sections[self.section_title] = self.section_slides
|
||||
@@ -38,10 +38,10 @@ for line in open(sys.argv[1]):
|
||||
if line == "--":
|
||||
state.current_slide += 1
|
||||
toc_links = re.findall("\(#toc-(.*)\)", line)
|
||||
if toc_links and state.section_title.startswith("part-"):
|
||||
if state.section_title not in state.parts:
|
||||
state.parts[state.section_title] = []
|
||||
state.parts[state.section_title].append(toc_links[0])
|
||||
if toc_links and state.section_title.startswith("module-"):
|
||||
if state.section_title not in state.modules:
|
||||
state.modules[state.section_title] = []
|
||||
state.modules[state.section_title].append(toc_links[0])
|
||||
# This is really hackish
|
||||
if line.startswith("class:"):
|
||||
for klass in EXCLUDED:
|
||||
@@ -51,7 +51,7 @@ for line in open(sys.argv[1]):
|
||||
|
||||
state.show()
|
||||
|
||||
for part in sorted(state.parts, key=lambda f: int(f.split("-")[1])):
|
||||
part_size = sum(state.sections[s] for s in state.parts[part])
|
||||
print("{}\t{}\t{}".format("total size for", part, part_size))
|
||||
for module in sorted(state.modules, key=lambda f: int(f.split("-")[1])):
|
||||
module_size = sum(state.sections[s] for s in state.modules[module])
|
||||
print("{}\t{}\t{}".format("total size for", module, module_size))
|
||||
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<html>
|
||||
<head>
|
||||
<style>
|
||||
td {
|
||||
background: #ccc;
|
||||
padding: 1em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table>
|
||||
<td>Mardi 8 juin 2021</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 9 juin 2021</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 10 juin 2021</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 11 juin 2021</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<td>Mercredi 23 juin 2021</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
</tr>
|
||||
<td>Jeudi 24 juin 2021</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
</tr>
|
||||
<td>Vendredi 25 juin 2021</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
@@ -112,10 +112,7 @@ TEMPLATE="""<html>
|
||||
{% for item in all_past_workshops %}
|
||||
<tr>
|
||||
<td>{{ item.title }}</td>
|
||||
<td>{% if item.slides %}<a class="slides" href="{{ item.slides }}" />
|
||||
{% else %}
|
||||
<p class="details">{{ item.status }}</p>
|
||||
{% endif %}</td>
|
||||
<td><a class="slides" href="{{ item.slides }}" /></td>
|
||||
{% if item.video %}
|
||||
<td><a class="video" href="{{ item.video }}" /></td>
|
||||
{% endif %}
|
||||
|
||||
@@ -1,103 +1,3 @@
|
||||
- date: [2021-09-27, 2021-09-29]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
#slides: https://2021-05-enix.container.training/1.yml.html
|
||||
|
||||
- date: [2021-10-04, 2021-10-07]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
#slides: https://2021-05-enix.container.training/2.yml.html
|
||||
|
||||
- date: [2021-10-11, 2021-10-12]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Packaging et CI/CD pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
#slides: https://2021-05-enix.container.training/3.yml.html
|
||||
|
||||
- date: [2021-11-08, 2021-11-16]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
#slides: https://2021-05-enix.container.training/4.yml.html
|
||||
|
||||
- date: [2021-11-18, 2021-11-19]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
#slides: https://2021-05-enix.container.training/5.yml.html
|
||||
|
||||
- date: [2021-05-10, 2021-05-12]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-05-enix.container.training/1.yml.html
|
||||
|
||||
- date: [2021-05-17, 2021-05-20]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-05-enix.container.training/2.yml.html
|
||||
|
||||
- date: [2021-05-24, 2021-05-25]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Packaging et CI/CD pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-05-enix.container.training/3.yml.html
|
||||
|
||||
- date: [2021-05-26, 2021-05-28]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-05-enix.container.training/4.yml.html
|
||||
|
||||
- date: [2021-05-31, 2021-06-01]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-05-enix.container.training/5.yml.html
|
||||
|
||||
- date: [2021-02-08, 2021-02-10]
|
||||
country: www
|
||||
city: streaming
|
||||
@@ -106,7 +6,6 @@
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-02-enix.container.training/1.yml.html
|
||||
|
||||
- date: [2021-02-15, 2021-02-18]
|
||||
country: www
|
||||
@@ -116,7 +15,6 @@
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-02-enix.container.training/2.yml.html
|
||||
|
||||
- date: [2021-02-22, 2021-02-23]
|
||||
country: www
|
||||
@@ -126,7 +24,6 @@
|
||||
title: Packaging et CI/CD pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-02-enix.container.training/3.yml.html
|
||||
|
||||
- date: [2021-02-24, 2021-02-26]
|
||||
country: www
|
||||
@@ -136,7 +33,6 @@
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-02-enix.container.training/4.yml.html
|
||||
|
||||
- date: [2021-03-01, 2021-03-02]
|
||||
country: www
|
||||
@@ -146,7 +42,6 @@
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-02-enix.container.training/5.yml.html
|
||||
|
||||
- date: [2020-10-05, 2020-10-06]
|
||||
country: www
|
||||
@@ -156,7 +51,6 @@
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-10-enix.container.training/1.yml.html
|
||||
|
||||
- date: [2020-10-07, 2020-10-09]
|
||||
country: www
|
||||
@@ -166,7 +60,6 @@
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-10-enix.container.training/2.yml.html
|
||||
|
||||
- date: 2020-10-12
|
||||
country: www
|
||||
@@ -176,7 +69,6 @@
|
||||
title: Packaging pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-10-enix.container.training/3.yml.html
|
||||
|
||||
- date: [2020-10-13, 2020-10-14]
|
||||
country: www
|
||||
@@ -186,7 +78,6 @@
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-10-enix.container.training/4.yml.html
|
||||
|
||||
- date: [2020-10-19, 2020-10-20]
|
||||
country: www
|
||||
@@ -196,7 +87,6 @@
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-10-enix.container.training/5.yml.html
|
||||
|
||||
- date: [2020-09-28, 2020-10-01]
|
||||
country: www
|
||||
@@ -205,7 +95,6 @@
|
||||
speaker: jpetazzo
|
||||
title: Advanced Kubernetes Concepts
|
||||
attend: https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
|
||||
slides: https://2020-09-skillsmatter.container.training/
|
||||
|
||||
- date: [2020-08-29, 2020-08-30]
|
||||
country: www
|
||||
@@ -241,7 +130,6 @@
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-06-enix.container.training/1.yml.html
|
||||
|
||||
- date: [2020-06-17, 2020-06-19]
|
||||
country: www
|
||||
@@ -251,7 +139,6 @@
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-06-enix.container.training/2.yml.html
|
||||
|
||||
- date: 2020-06-22
|
||||
country: www
|
||||
@@ -261,7 +148,6 @@
|
||||
title: Packaging pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-06-enix.container.training/3.yml.html
|
||||
|
||||
- date: [2020-06-23, 2020-06-24]
|
||||
country: www
|
||||
@@ -271,7 +157,6 @@
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-06-enix.container.training/4.yml.html
|
||||
|
||||
- date: [2020-06-25, 2020-06-26]
|
||||
country: www
|
||||
@@ -281,8 +166,6 @@
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-06-enix.container.training/5.yml.html
|
||||
|
||||
|
||||
- date: [2020-06-09, 2020-06-11]
|
||||
country: www
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
#- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
#- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Container_Networking_Basics.md
|
||||
#- containers/Network_Drivers.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Multi_Stage_Builds.md
|
||||
#- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Advanced_Dockerfiles.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
#- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,71 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Pods_Anatomy.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,79 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
-
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Start_And_Attach.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Resource_Limits.md
|
||||
- # DAY 2
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
-
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Installing_Docker.md
|
||||
- containers/Container_Engines.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
-
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Orchestration_Overview.md
|
||||
-
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
#-
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Ambassadors.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
@@ -1,41 +1,13 @@
|
||||
# Accessing internal services
|
||||
|
||||
- When we are logged in on a cluster node, we can access internal services
|
||||
|
||||
(by virtue of the Kubernetes network model: all nodes can reach all pods and services)
|
||||
|
||||
- When we are accessing a remote cluster, things are different
|
||||
|
||||
(generally, our local machine won't have access to the cluster's internal subnet)
|
||||
|
||||
- How can we temporarily access a service without exposing it to everyone?
|
||||
|
||||
--
|
||||
|
||||
- `kubectl proxy`: gives us access to the API, which includes a proxy for HTTP resources
|
||||
|
||||
- `kubectl port-forward`: allows forwarding of TCP ports to arbitrary pods, services, ...
|
||||
|
||||
---
|
||||
|
||||
## Suspension of disbelief
|
||||
|
||||
The exercises in this section assume that we have set up `kubectl` on our
|
||||
local machine in order to access a remote cluster.
|
||||
|
||||
We will therefore show how to access services and pods of the remote cluster,
|
||||
from our local machine.
|
||||
|
||||
You can also run these exercises directly on the cluster (if you haven't
|
||||
installed and set up `kubectl` locally).
|
||||
|
||||
Running commands locally will be less useful
|
||||
(since you could access services and pods directly),
|
||||
but keep in mind that these commands will work anywhere as long as you have
|
||||
installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
---
|
||||
|
||||
## `kubectl proxy` in theory
|
||||
|
||||
- Running `kubectl proxy` gives us access to the entire Kubernetes API
|
||||
@@ -56,7 +28,7 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
## `kubectl proxy` in practice
|
||||
|
||||
- Let's access the `webui` service through `kubectl proxy`
|
||||
- Let's access the `web` service through `kubectl proxy`
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -65,9 +37,9 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
kubectl proxy &
|
||||
```
|
||||
|
||||
- Access the `webui` service:
|
||||
- Access the `web` service:
|
||||
```bash
|
||||
curl localhost:8001/api/v1/namespaces/default/services/webui/proxy/index.html
|
||||
curl localhost:8001/api/v1/namespaces/default/services/web/proxy/
|
||||
```
|
||||
|
||||
- Terminate the proxy:
|
||||
@@ -99,22 +71,20 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
## `kubectl port-forward` in practice
|
||||
|
||||
- Let's access our remote Redis server
|
||||
- Let's access our remote NGINX server
|
||||
|
||||
.exercise[
|
||||
|
||||
- Forward connections from local port 10000 to remote port 6379:
|
||||
- Forward connections from local port 1234 to remote port 80:
|
||||
```bash
|
||||
kubectl port-forward svc/redis 10000:6379 &
|
||||
kubectl port-forward svc/web 1234:80 &
|
||||
```
|
||||
|
||||
- Connect to the Redis server:
|
||||
- Connect to the NGINX server:
|
||||
```bash
|
||||
telnet localhost 10000
|
||||
curl localhost:1234
|
||||
```
|
||||
|
||||
- Issue a few commands, e.g. `INFO server` then `QUIT`
|
||||
|
||||
<!--
|
||||
```wait Connected to localhost```
|
||||
```keys INFO server```
|
||||
|
||||
@@ -84,13 +84,15 @@
|
||||
|
||||
## Creating the ClusterIssuer
|
||||
|
||||
- The manifest shown on the previous slide is in @@LINK[k8s/cm-clusterissuer.yaml]
|
||||
- Download the file @@LINK[k8s/cm-clusterissuer.yaml]
|
||||
|
||||
(or copy-paste from the previous slide)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the ClusterIssuer:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/cm-clusterissuer.yaml
|
||||
kubectl apply cm-clusterissuer.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -113,7 +115,9 @@
|
||||
|
||||
## Creating the Certificate
|
||||
|
||||
- The manifest shown on the previous slide is in @@LINK[k8s/cm-certificate.yaml]
|
||||
- Download the file @@LINK[k8s/cm-certificate.yaml]
|
||||
|
||||
(or copy-paste from the previous slide)
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -123,7 +127,7 @@
|
||||
|
||||
- Create the Certificate:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/cm-certificate.yaml
|
||||
kubectl apply -f cm-certificate.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -170,25 +174,14 @@
|
||||
|
||||
---
|
||||
|
||||
## What's missing ?
|
||||
## And then...
|
||||
|
||||
--
|
||||
|
||||
An Ingress Controller! 😅
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install an Ingress Controller:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/traefik-v2.yaml
|
||||
```
|
||||
|
||||
- Wait a little bit, and check that we now have a `kubernetes.io/tls` Secret:
|
||||
- A little bit later, we will have a `kubernetes.io/tls` Secret:
|
||||
```bash
|
||||
kubectl get secrets
|
||||
```
|
||||
|
||||
]
|
||||
- Note that this might take a few minutes, because of the DNS integration!
|
||||
|
||||
---
|
||||
|
||||
@@ -232,25 +225,6 @@ class: extra-details
|
||||
|
||||
- Note: the Ingress still needs the `tls` section with `secretName` and `hosts`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Let's Encrypt and nip.io
|
||||
|
||||
- Let's Encrypt has [rate limits](https://letsencrypt.org/docs/rate-limits/) per domain
|
||||
|
||||
(the limits only apply to the production environment, not staging)
|
||||
|
||||
- There is a limit of 50 certificates per registered domain
|
||||
|
||||
- If we try to use the production environment, we will probably hit the limit
|
||||
|
||||
- It's fine to use the staging environment for these experiments
|
||||
|
||||
(our certs won't validate in a browser, but we can always check
|
||||
the details of the cert to verify that it was issued by Let's Encrypt!)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Obtaining certificates with cert-manager
|
||||
|
||||
@@ -187,7 +187,7 @@ Note: we can update a CRD without having to re-create the corresponding resource
|
||||
|
||||
---
|
||||
|
||||
## OpenAPI v3 schema example
|
||||
## OpenAPI v3 scheme exapmle
|
||||
|
||||
This is what we have in @@LINK[k8s/coffee-3.yaml]:
|
||||
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
# Exercise — sealed secrets
|
||||
|
||||
This is a "combo exercise" to practice the following concepts:
|
||||
|
||||
- Secrets (mounting them in containers)
|
||||
|
||||
- RBAC (granting specific permissions to specific users)
|
||||
|
||||
- Operators (specifically, sealed secrets)
|
||||
|
||||
- Migrations (copying/transferring resources from a cluster to another)
|
||||
|
||||
For this exercise, you will need two clusters.
|
||||
|
||||
(It can be two local clusters.)
|
||||
|
||||
We will call them "source cluster" and "target cluster".
|
||||
|
||||
---
|
||||
|
||||
## Step 1 (easy)
|
||||
|
||||
- Install the sealed secrets operator on both clusters
|
||||
|
||||
- On source cluster, create a Namespace called `dev`
|
||||
|
||||
- Create two sealed secrets, `verysecure` and `veryverysecure`
|
||||
|
||||
(the content doesn't matter; put a random string of your choice)
|
||||
|
||||
- Create a Deployment called `app` using both secrets
|
||||
|
||||
(use a mount or environment variables; whatever you prefer!)
|
||||
|
||||
- Verify that the secrets are available to the Deployment
|
||||
|
||||
---
|
||||
|
||||
## Step 2 (medium)
|
||||
|
||||
- Create another Namespace called `prod`
|
||||
|
||||
(on the source cluster)
|
||||
|
||||
- Create the same Deployment `app` using both secrets
|
||||
|
||||
- Verify that the secrets are available to the Deployment
|
||||
|
||||
---
|
||||
|
||||
## Step 3 (hard)
|
||||
|
||||
- On the target cluster, create a Namespace called `prod`
|
||||
|
||||
- Create the `app` Deployment and both sealed secrets
|
||||
|
||||
(do not copy the Secrets; only the sealed secrets)
|
||||
|
||||
- Check the next slide if you need a hint!
|
||||
|
||||
--
|
||||
|
||||
- You will have to copy the Sealed Secret private key
|
||||
|
||||
---
|
||||
|
||||
## Step 4 (medium)
|
||||
|
||||
On the target cluster, create the Namespace `dev`.
|
||||
|
||||
Let's say that user `alice` has access to the target cluster.
|
||||
|
||||
(You can use `kubectl --as=alice` to impersonate her.)
|
||||
|
||||
We want Alice to be able to:
|
||||
|
||||
- deploy the whole application
|
||||
|
||||
- access the `verysecure` secret
|
||||
|
||||
- but *not* the `veryverysecure` secret
|
||||
|
||||
---
|
||||
|
||||
## Step 5 (hard)
|
||||
|
||||
- Make sure that Alice can view the logs of the Deployment
|
||||
|
||||
- Can you think of a way for Alice to access the `veryverysecure` Secret?
|
||||
|
||||
(check next slide for a hint)
|
||||
|
||||
--
|
||||
|
||||
- `kubectl exec`, maybe?
|
||||
|
||||
--
|
||||
|
||||
- Can you think of a way to prevent that?
|
||||
@@ -164,154 +164,493 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Ingress
|
||||
|
||||
- We will assume that we have a domain name pointing to our cluster
|
||||
|
||||
(i.e. with a wildcard record pointing to at least one node of the cluster)
|
||||
|
||||
- We will get traffic in the cluster by leveraging `ExternalIPs` services
|
||||
|
||||
(but it would be easy to use `LoadBalancer` services instead)
|
||||
|
||||
- We will use Traefik as the ingress controller
|
||||
|
||||
(but any other one should work too)
|
||||
|
||||
- We will use cert-manager to obtain certificates with Let's Encrypt
|
||||
|
||||
---
|
||||
|
||||
## Other details
|
||||
## Install GitLab itself
|
||||
|
||||
- We will deploy GitLab with its official Helm chart
|
||||
|
||||
- It will still require a bunch of parameters and customization
|
||||
|
||||
- We also need a Storage Class
|
||||
|
||||
(unless our cluster already has one, of course)
|
||||
|
||||
- We suggest the [Rancher local path provisioner](https://github.com/rancher/local-path-provisioner)
|
||||
- Brace!
|
||||
|
||||
---
|
||||
|
||||
## Setting everything up
|
||||
## Installing the GitLab chart
|
||||
|
||||
1. `git clone https://github.com/jpetazzo/kubecoin`
|
||||
```bash
|
||||
helm repo add gitlab https://charts.gitlab.io/
|
||||
DOMAIN=`cloudnative.party`
|
||||
ISSUER=letsencrypt-production
|
||||
helm upgrade --install gitlab gitlab/gitlab \
|
||||
--create-namespace --namespace gitlab \
|
||||
--set global.hosts.domain=$DOMAIN \
|
||||
--set certmanager.install=false \
|
||||
--set nginx-ingress.enabled=false \
|
||||
--set global.ingress.class=traefik \
|
||||
--set global.ingress.provider=traefik \
|
||||
--set global.ingress.configureCertmanager=false \
|
||||
--set global.ingress.annotations."cert-manager\.io/cluster-issuer"=$ISSUER \
|
||||
--set gitlab.webservice.ingress.tls.secretName=gitlab-gitlab-tls \
|
||||
--set registry.ingress.tls.secretName=gitlab-registry-tls \
|
||||
--set minio.ingress.tls.secretName=gitlab-minio-tls
|
||||
```
|
||||
|
||||
2. `export EMAIL=xxx@example.com DOMAIN=awesome-kube-ci.io`
|
||||
|
||||
(we need a real email address and a domain pointing to the cluster!)
|
||||
|
||||
3. `. setup-gitlab-on-k8s.rc`
|
||||
|
||||
(this doesn't do anything, but defines a number of helper functions)
|
||||
|
||||
4. Execute each helper function, one after another
|
||||
|
||||
(try `do_[TAB]` to see these functions)
|
||||
😰 Can we talk about all these parameters?
|
||||
|
||||
---
|
||||
|
||||
## Local Storage
|
||||
## Breaking down all these parameters
|
||||
|
||||
`do_1_localstorage`
|
||||
- `certmanager.install=false`
|
||||
|
||||
Applies the YAML directly from Rancher's repository.
|
||||
do not install cert-manager, we already have it
|
||||
|
||||
Annotate the Storage Class so that it becomes the default one.
|
||||
- `nginx-ingress.enabled=false`
|
||||
|
||||
do not install the NGINX ingress controller, we already have Traefik
|
||||
|
||||
- `global.ingress.class=traefik`, `global.ingress.provider=traefik`
|
||||
|
||||
these merely enable creation of Ingress resources
|
||||
|
||||
- `global.ingress.configureCertmanager=false`
|
||||
|
||||
do not create a cert-manager Issuer or ClusterIssuer, we have ours
|
||||
|
||||
---
|
||||
|
||||
## Traefik
|
||||
## More parameters
|
||||
|
||||
`do_2_traefik_with_externalips`
|
||||
- `global.ingress.annotations."cert-manager\.io/cluster-issuer"=$ISSUER`
|
||||
|
||||
Install the official Traefik Helm chart.
|
||||
this annotation tells cert-manager to automatically issue certs
|
||||
|
||||
Instead of a `LoadBalancer` service, use a `ClusterIP` with `ExternalIPs`.
|
||||
- `gitlab.webservice.ingress.tls.secretName=gitlab-gitlab-tls`,
|
||||
<br/>
|
||||
`registry.ingress.tls.secretName=gitlab-registry-tls`,
|
||||
<br/>
|
||||
`minio.ingress.tls.secretName=gitlab-minio-tls`
|
||||
|
||||
Automatically infer the `ExternalIPs` from `kubectl get nodes`.
|
||||
|
||||
Enable TLS.
|
||||
these annotations enable TLS in the Ingress controller
|
||||
|
||||
---
|
||||
|
||||
## cert-manager
|
||||
## Wait for GitLab to come up
|
||||
|
||||
`do_3_certmanager`
|
||||
- Let's watch what's happening in the GitLab namespace:
|
||||
```bash
|
||||
watch kubectl get all --namespace gitlab
|
||||
```
|
||||
|
||||
Install cert-manager using their official YAML.
|
||||
- We want to wait for all the Pods to be "Running" or "Completed"
|
||||
|
||||
Easy-peasy.
|
||||
- This will take a few minutes (10-15 minutes for me)
|
||||
|
||||
- Don't worry if you see Pods crashing and restarting
|
||||
|
||||
(it happens when they are waiting on a dependency which isn't up yet)
|
||||
|
||||
---
|
||||
|
||||
## Certificate issuers
|
||||
## Things that could go wrong
|
||||
|
||||
`do_4_issuers`
|
||||
- Symptom: Pods remain "Pending" or "ContainerCreating" for a while
|
||||
|
||||
Create a couple of `ClusterIssuer` resources for cert-manager.
|
||||
- Investigate these pods (with `kubectl describe pod ...`)
|
||||
|
||||
(One for the staging Let's Encrypt environment, one for production.)
|
||||
- Also look at events:
|
||||
```bash
|
||||
kubectl get events \
|
||||
--field-selector=type=Warning --sort-by=metadata.creationTimestamp
|
||||
```
|
||||
|
||||
Note: this requires to specify a valid `$EMAIL` address!
|
||||
- Make sure your cluster is big enough
|
||||
|
||||
Note: if this fails, wait a bit and try again (cert-manager needs to be up).
|
||||
(I use 3 `g6-standard-4` nodes)
|
||||
|
||||
---
|
||||
|
||||
## GitLab
|
||||
## Log into GitLab
|
||||
|
||||
`do_5_gitlab`
|
||||
- First, let's check that we can connect to GitLab (with TLS):
|
||||
|
||||
Deploy GitLab using their official Helm chart.
|
||||
`https://gitlab.$DOMAIN`
|
||||
|
||||
We pass a lot of parameters to this chart:
|
||||
- the domain name to use
|
||||
- disable GitLab's own ingress and cert-manager
|
||||
- annotate the ingress resources so that cert-manager kicks in
|
||||
- bind the shell service (git over SSH) to port 222 to avoid conflict
|
||||
- use ExternalIPs for that shell service
|
||||
- It's asking us for a login and password!
|
||||
|
||||
Note: on modest cloud instances, it can take 10 minutes for GitLab to come up.
|
||||
|
||||
We can check the status with `kubectl get pods --namespace=gitlab`
|
||||
- The login is `root`, and the password is stored in a Secret:
|
||||
```bash
|
||||
kubectl get secrets --namespace=gitlab gitlab-gitlab-initial-root-password \
|
||||
-o jsonpath={.data.password} | base64 -d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Log into GitLab and configure it
|
||||
## Configure GitLab
|
||||
|
||||
`do_6_showlogin`
|
||||
- For simplicity, we're going to use that "root" user
|
||||
|
||||
This will get the GitLab root password (stored in a Secret).
|
||||
(but later, you can create multiple users, teams, etc.)
|
||||
|
||||
Then we need to:
|
||||
- log into GitLab
|
||||
- add our SSH key (top-right user menu → settings, then SSH keys on the left)
|
||||
- create a project (using the + menu next to the search bar on top)
|
||||
- go to project configuration (on the left, settings → CI/CD)
|
||||
- add a `KUBECONFIG` file variable with the content of our `.kube/config` file
|
||||
- go to settings → access tokens to create a read-only registry token
|
||||
- add variables `REGISTRY_USER` and `REGISTRY_PASSWORD` with that token
|
||||
- push our repo (`git remote add gitlab ...` then `git push gitlab ...`)
|
||||
- First, let's add our SSH key
|
||||
|
||||
(top-right user menu → settings, then SSH keys on the left)
|
||||
|
||||
- Then, create a project
|
||||
|
||||
(using the + menu next to the search bar on top)
|
||||
|
||||
- Let's call it `kubecoin`
|
||||
|
||||
(you can change it, but you'll have to adjust Git paths later on)
|
||||
|
||||
---
|
||||
|
||||
## Monitoring progress and troubleshooting
|
||||
## Try to push our repository
|
||||
|
||||
- Click on "CI/CD" in the left bar to view pipelines
|
||||
- This is the repository that we're going to use:
|
||||
|
||||
- If you see a permission issue mentioning `system:serviceaccount:gitlab:...`:
|
||||
https://github.com/jpetazzo/kubecoin
|
||||
|
||||
*make sure you did set `KUBECONFIG` correctly!*
|
||||
- Let's clone that repository locally first:
|
||||
```bash
|
||||
git clone https://github.com/jpetazzo/kubecoin
|
||||
```
|
||||
|
||||
- GitLab will create namespaces named `gl-<user>-<project>`
|
||||
- Add our GitLab instance as a remote:
|
||||
```bash
|
||||
git remote add gitlab git@gitlab.$DOMAIN:root/kubecoin.git
|
||||
```
|
||||
|
||||
- At the end of the deployment, the web UI will be available on some unique URL
|
||||
- Try to push:
|
||||
```bash
|
||||
git push -u gitlab
|
||||
```
|
||||
|
||||
(`http://<user>-<project>-<githash>-gitlab.<domain>`)
|
||||
---
|
||||
|
||||
## Connection refused?
|
||||
|
||||
- Normally, we get the following error:
|
||||
|
||||
`port 22: Connection refused`
|
||||
|
||||
- Why? 🤔
|
||||
|
||||
--
|
||||
|
||||
- What does `gitlab.$DOMAIN` point to?
|
||||
|
||||
--
|
||||
|
||||
- Our Ingress Controller! (i.e. Traefik) 💡
|
||||
|
||||
- Our Ingress Controller has nothing to do with port 22
|
||||
|
||||
- So how do we solve this?
|
||||
|
||||
---
|
||||
|
||||
## Routing port 22
|
||||
|
||||
- Whatever is on `gitlab.$DOMAIN` needs to have the following "routing":
|
||||
|
||||
- port 80 → GitLab web service
|
||||
|
||||
- port 443 → GitLab web service, with TLS
|
||||
|
||||
- port 22 → GitLab shell service
|
||||
|
||||
- Currently, Traefik is managing `gitlab.$DOMAIN`
|
||||
|
||||
- We are going to tell Traefik to:
|
||||
|
||||
- accept connections on port 22
|
||||
|
||||
- send them to GitLab
|
||||
|
||||
---
|
||||
|
||||
## TCP routing
|
||||
|
||||
- The technique that we are going to use is specific to Traefik
|
||||
|
||||
- Other Ingress Controllers may or may not have similar features
|
||||
|
||||
- When they have similar features, they will be enabled very differently
|
||||
|
||||
---
|
||||
|
||||
## Telling Traefik to open port 22
|
||||
|
||||
- Let's reconfigure Traefik:
|
||||
```bash
|
||||
helm upgrade --install traefik traefik/traefik \
|
||||
--create-namespace --namespace traefik \
|
||||
--set "ports.websecure.tls.enabled=true" \
|
||||
--set "providers.kubernetesIngress.publishedService.enabled=true" \
|
||||
--set "ports.ssh.port=2222" \
|
||||
--set "ports.ssh.exposedPort=22" \
|
||||
--set "ports.ssh.expose=true" \
|
||||
--set "ports.ssh.protocol=TCP"
|
||||
```
|
||||
|
||||
- This creates a new "port" on Traefik, called "ssh", listening on port 22
|
||||
|
||||
- Internally, Traefik listens on port 2222 (for permission reasons)
|
||||
|
||||
- Note: Traefik docs also call these ports "entrypoints"
|
||||
|
||||
(these entrypoints are totally unrelated to the `ENTRYPOINT` in Dockerfiles)
|
||||
|
||||
---
|
||||
|
||||
## Knocking on port 22
|
||||
|
||||
- What happens if we try to connect to that port 22 right now?
|
||||
```bash
|
||||
curl gitlab.$DOMAIN:22
|
||||
```
|
||||
|
||||
- We hit GitLab's web service!
|
||||
|
||||
- We need to tell Traefik what to do with connections to that port 22
|
||||
|
||||
- For that, we will create a "TCP route"
|
||||
|
||||
---
|
||||
|
||||
## Traefik TCP route
|
||||
|
||||
The following custom resource tells Traefik to route the `ssh` port that we
|
||||
created earlier, to the `gitlab-gitlab-shell` service belonging to GitLab.
|
||||
|
||||
```yaml
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRouteTCP
|
||||
metadata:
|
||||
name: gitlab-shell
|
||||
namespace: gitlab
|
||||
spec:
|
||||
entryPoints:
|
||||
- ssh
|
||||
routes:
|
||||
- match: HostSNI(\`*`)
|
||||
services:
|
||||
- name: gitlab-gitlab-shell
|
||||
port: 22
|
||||
```
|
||||
|
||||
The `HostSNI` wildcard is the magic option to define a "default route".
|
||||
|
||||
---
|
||||
|
||||
## Creating the TCP route
|
||||
|
||||
Since our manifest has backticks, we must pay attention to quoting:
|
||||
|
||||
```bash
|
||||
kubectl apply -f- << "EOF"
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRouteTCP
|
||||
metadata:
|
||||
name: gitlab-shell
|
||||
namespace: gitlab
|
||||
spec:
|
||||
entryPoints:
|
||||
- ssh
|
||||
routes:
|
||||
- match: HostSNI(\`*`)
|
||||
services:
|
||||
- name: gitlab-gitlab-shell
|
||||
port: 22
|
||||
EOF
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Knocking on port 22, again
|
||||
|
||||
- Let's see what happens if we try port 22 now:
|
||||
```bash
|
||||
curl gitlab.$DOMAIN:22
|
||||
```
|
||||
|
||||
- This should tell us something like `Received HTTP/0.9 when not allowed`
|
||||
|
||||
(because we're no longer talking to an HTTP server, but to SSH!)
|
||||
|
||||
- Try with SSH:
|
||||
```bash
|
||||
ssh git@gitlab.$DOMAIN
|
||||
```
|
||||
|
||||
- After accepting the key fingerprint, we should see `Welcome to GitLab, @root!`
|
||||
|
||||
---
|
||||
|
||||
## Pushing again
|
||||
|
||||
- Now we can try to push our repository again:
|
||||
```bash
|
||||
git push -u gitlab
|
||||
```
|
||||
|
||||
- Reload the project page in GitLab
|
||||
|
||||
- We should see our repository!
|
||||
|
||||
---
|
||||
|
||||
## CI/CD
|
||||
|
||||
- Click on the CI/CD tab on the left
|
||||
|
||||
(the one with the shuttle / space rocket icon)
|
||||
|
||||
- Our pipeline was detected...
|
||||
|
||||
- But it failed 😕
|
||||
|
||||
- Let's click on one of the failed jobs
|
||||
|
||||
- This is a permission issue!
|
||||
|
||||
---
|
||||
|
||||
## Fixing permissions
|
||||
|
||||
- GitLab needs to do a few of things in our cluster:
|
||||
|
||||
- create Pods to build our container images with BuildKit
|
||||
|
||||
- create Namespaces to deploy staging and production versions of our app
|
||||
|
||||
- create and update resources in these Namespaces
|
||||
|
||||
- For the time being, we're going to grant broad permissions
|
||||
|
||||
(and we will revisit and discuss what to do later)
|
||||
|
||||
---
|
||||
|
||||
## Granting permissions
|
||||
|
||||
- Let's give `cluster-admin` permissions to the GitLab ServiceAccount:
|
||||
```bash
|
||||
kubectl create clusterrolebinding gitlab \
|
||||
--clusterrole=cluster-admin --serviceaccount=gitlab:default
|
||||
```
|
||||
|
||||
- Then retry the CI/CD pipeline
|
||||
|
||||
- The build steps will now succeed; but the deploy steps will fail
|
||||
|
||||
- We need to set the `REGISTRY_USER` and `REGISTRY_PASSWORD` variables
|
||||
|
||||
- Let's explain what this is about!
|
||||
|
||||
---
|
||||
|
||||
## GitLab container registry access
|
||||
|
||||
- A registry access token is created for the duration of the CI/CD pipeline
|
||||
|
||||
(it is exposed through the `$CI_JOB_TOKEN` environment variable)
|
||||
|
||||
- This token gives access only to a specific repository in the registry
|
||||
|
||||
- It is valid only during the execution of the CI/CD pipeline
|
||||
|
||||
- We can (and we do!) use it to *push* images to the registry
|
||||
|
||||
- We cannot use it to *pull* images when running in staging or production
|
||||
|
||||
(because Kubernetes might need to pull images *after* the token expires)
|
||||
|
||||
- We need to create a separate read-only registry access token
|
||||
|
||||
---
|
||||
|
||||
## Creating the registry access token
|
||||
|
||||
- Let's go to "Settings" (the cog wheel on the left) / "Access Tokens"
|
||||
|
||||
- Create a token with `read_registry` permission
|
||||
|
||||
- Save the token name and the token value
|
||||
|
||||
- Then go to "Settings" / "CI/CD"
|
||||
|
||||
- In the "Variables" section, add two variables:
|
||||
|
||||
- `REGISTRY_USER` → token name
|
||||
- `REGISTRY_PASSWORD` → token value
|
||||
|
||||
- Make sure that they are **not** protected!
|
||||
|
||||
(otherwise, they won't be available in non-default tags and branches)
|
||||
|
||||
---
|
||||
|
||||
## Trying again
|
||||
|
||||
- Go back to the CI/CD pipeline view, and hit "Retry"
|
||||
|
||||
- The deploy stage should now work correctly! 🎉
|
||||
|
||||
---
|
||||
|
||||
## Our CI/CD pipeline
|
||||
|
||||
- Let's have a look at the [.gitlab-ci.yml](https://github.com/jpetazzo/kubecoin/blob/107dac5066087c52747e557babc97e57f42dd71d/.gitlab-ci.yml) file
|
||||
|
||||
- We have multiple *stages*:
|
||||
|
||||
- lint (currently doesn't do much, it's mostly as an example)
|
||||
|
||||
- build (currently uses BuildKit)
|
||||
|
||||
- deploy
|
||||
|
||||
- "Deploy" behaves differently in staging and production
|
||||
|
||||
- Let's investigate that!
|
||||
|
||||
---
|
||||
|
||||
## Staging vs production
|
||||
|
||||
- In our pipeline, "production" means "a tag or branch named `production`"
|
||||
|
||||
(see the `except:` and `only:` sections)
|
||||
|
||||
- Everything else is "staging"
|
||||
|
||||
- In "staging":
|
||||
|
||||
- we build and push images
|
||||
- we create a staging Namespace and deploy a copy of the app there
|
||||
|
||||
- In "production":
|
||||
|
||||
- we do not build anything
|
||||
- we deploy (or update) a copy of the app in the production Namespace
|
||||
|
||||
---
|
||||
|
||||
## Namespace naming
|
||||
|
||||
- GitLab will create Namespaces named `gl-<user>-<project>-<hash>`
|
||||
|
||||
- At the end of the deployment, the web UI will be available at:
|
||||
|
||||
`http://<user>-<project>-<githash>-gitlab.<domain>`
|
||||
|
||||
- The "production" Namespace will be `<user>-<project>`
|
||||
|
||||
- And it will be available on its own domain as well:
|
||||
|
||||
`http://<project>-<githash>-gitlab.<domain>`
|
||||
|
||||
---
|
||||
|
||||
@@ -325,7 +664,7 @@ Then we need to:
|
||||
|
||||
- It will do it *only* if that same git commit was pushed to staging first
|
||||
|
||||
(look in the pipeline configuration file to see how it's done!)
|
||||
(because the "production" pipeline skips the build phase)
|
||||
|
||||
---
|
||||
|
||||
@@ -411,35 +750,15 @@ Then we need to:
|
||||
|
||||
---
|
||||
|
||||
## Pros
|
||||
## Why not use GitLab's Kubernetes integration?
|
||||
|
||||
- GitLab is an amazing, open source, all-in-one platform
|
||||
- "All-in-one" approach
|
||||
|
||||
- Available as hosted, community, or enterprise editions
|
||||
(deploys its own Ingress, cert-manager, Prometheus, and much more)
|
||||
|
||||
- Rich ecosystem, very customizable
|
||||
- I wanted to show you something flexible and customizable instead
|
||||
|
||||
- Can run on Kubernetes, or somewhere else
|
||||
|
||||
---
|
||||
|
||||
## Cons
|
||||
|
||||
- It can be difficult to use components separately
|
||||
|
||||
(e.g. use a different registry, or a different job runner)
|
||||
|
||||
- More than one way to configure it
|
||||
|
||||
(it's not an opinionated platform)
|
||||
|
||||
- Not "Kubernetes-native"
|
||||
|
||||
(for instance, jobs are not Kubernetes jobs)
|
||||
|
||||
- Job latency could be improved
|
||||
|
||||
*Note: most of these drawbacks are the flip side of the "pros" on the previous slide!*
|
||||
- But feel free to explore it now that we have shown the basics!
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -134,7 +134,7 @@ use Bitnami's Redis chart.
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: ">=11, <12"
|
||||
version: ">=11 <12"
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
```
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@
|
||||
|
||||
- On April 30th 2020, Helm was the 10th project to *graduate* within the CNCF
|
||||
|
||||
🎉
|
||||
.emoji[🎉]
|
||||
|
||||
(alongside Containerd, Prometheus, and Kubernetes itself)
|
||||
|
||||
@@ -504,8 +504,7 @@ The `readme` may or may not have (accurate) explanations for the values.
|
||||
|
||||
- Update `my-juice-shop`:
|
||||
```bash
|
||||
helm upgrade my-juice-shop juice/my-juice-shop \
|
||||
--set service.type=NodePort
|
||||
helm upgrade my-juice-shop juice/juice-shop --set service.type=NodePort
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -160,8 +160,3 @@ class: extra-details
|
||||
- The problem was fixed in Kubernetes 1.13
|
||||
|
||||
*See [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for details.*
|
||||
|
||||
???
|
||||
|
||||
:EN:- Viewing logs with "kubectl logs"
|
||||
:FR:- Consulter les logs avec "kubectl logs"
|
||||
|
||||
@@ -69,7 +69,7 @@ Without further ado, let's start this application!
|
||||
|
||||
--
|
||||
|
||||
- It is a DockerCoin miner! 💰🐳📦🚢
|
||||
- It is a DockerCoin miner! .emoji[💰🐳📦🚢]
|
||||
|
||||
--
|
||||
|
||||
|
||||
@@ -1,60 +1,38 @@
|
||||
# Kustomize
|
||||
|
||||
- Kustomize lets us transform Kubernetes resources:
|
||||
- Kustomize lets us transform YAML files representing Kubernetes resources
|
||||
|
||||
*YAML + kustomize → new YAML*
|
||||
- The original YAML files are valid resource files
|
||||
|
||||
- Starting point = valid resource files
|
||||
(e.g. they can be loaded with `kubectl apply -f`)
|
||||
|
||||
(i.e. something that we could load with `kubectl apply -f`)
|
||||
- They are left untouched by Kustomize
|
||||
|
||||
- Recipe = a *kustomization* file
|
||||
- Kustomize lets us define *kustomizations*
|
||||
|
||||
(describing how to transform the resources)
|
||||
- A *kustomization* is conceptually similar to a *layer*
|
||||
|
||||
- Result = new resource files
|
||||
- Technically, a *kustomization* is a file named `kustomization.yaml`
|
||||
|
||||
(that we can load with `kubectl apply -f`)
|
||||
(or a directory containing that files + additional files)
|
||||
|
||||
---
|
||||
|
||||
## Pros and cons
|
||||
## What's in a kustomization
|
||||
|
||||
- Relatively easy to get started
|
||||
- A kustomization can do any combination of the following:
|
||||
|
||||
(just get some existing YAML files)
|
||||
- include other kustomizations
|
||||
|
||||
- Easy to leverage existing "upstream" YAML files
|
||||
- include Kubernetes resources defined in YAML files
|
||||
|
||||
(or other *kustomizations*)
|
||||
- patch Kubernetes resources (change values)
|
||||
|
||||
- Somewhat integrated with `kubectl`
|
||||
- add labels or annotations to all resources
|
||||
|
||||
(but only "somewhat" because of version discrepancies)
|
||||
- specify ConfigMaps and Secrets from literal values or local files
|
||||
|
||||
- Less complex than e.g. Helm, but also less powerful
|
||||
|
||||
- No central index like the Artifact Hub (but is there a need for it?)
|
||||
|
||||
---
|
||||
|
||||
## Kustomize in a nutshell
|
||||
|
||||
- Get some valid YAML (our "resources")
|
||||
|
||||
- Write a *kustomization* (technically, a file named `kustomization.yaml`)
|
||||
|
||||
- reference our resources
|
||||
|
||||
- reference other kustomizations
|
||||
|
||||
- add some *patches*
|
||||
|
||||
- ...
|
||||
|
||||
- Use that kustomization either with `kustomize build` or `kubectl apply -k`
|
||||
|
||||
- Write new kustomizations referencing the first one to handle minor differences
|
||||
(... And a few more advanced features that we won't cover today!)
|
||||
|
||||
---
|
||||
|
||||
@@ -80,17 +58,11 @@ On the next slide, let's see a more complex example ...
|
||||
|
||||
---
|
||||
|
||||
## A more complex Kustomization
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
commonAnnotations:
|
||||
mood: 😎
|
||||
commonLabels:
|
||||
add-this-to-all-my-resources: please
|
||||
namePrefix: prod-
|
||||
patchesStrategicMerge:
|
||||
- prod-scaling.yaml
|
||||
- prod-healthchecks.yaml
|
||||
@@ -108,7 +80,6 @@ configMapGenerator:
|
||||
- global.conf
|
||||
- local.conf=prod.conf
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -168,7 +139,7 @@ configMapGenerator:
|
||||
|
||||
## Remote bases
|
||||
|
||||
- Kustomize can also use bases that are remote git repositories
|
||||
- Kustomize can fetch remote bases using Hashicorp go-getter library
|
||||
|
||||
- Examples:
|
||||
|
||||
@@ -176,31 +147,11 @@ configMapGenerator:
|
||||
|
||||
github.com/jpetazzo/kubercoins?ref=kustomize (specific tag or branch)
|
||||
|
||||
- Note that this only works for kustomizations, not individual resources
|
||||
|
||||
(the specified repository or directory must contain a `kustomization.yaml` file)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Hashicorp go-getter
|
||||
|
||||
- Some versions of Kustomize support additional forms for remote resources
|
||||
|
||||
- Examples:
|
||||
|
||||
https://releases.hello.io/k/1.0.zip (remote archive)
|
||||
|
||||
https://releases.hello.io/k/1.0.zip//some-subdir (subdirectory in archive)
|
||||
|
||||
- This relies on [hashicorp/go-getter](https://github.com/hashicorp/go-getter#url-format)
|
||||
|
||||
- ... But it prevents Kustomize inclusion in `kubectl`
|
||||
|
||||
- Avoid them!
|
||||
|
||||
- See [kustomize#3578](https://github.com/kubernetes-sigs/kustomize/issues/3578) for details
|
||||
- See [hashicorp/go-getter URL format docs](https://github.com/hashicorp/go-getter#url-format) for more examples
|
||||
|
||||
---
|
||||
|
||||
@@ -332,7 +283,7 @@ class: extra-details
|
||||
kubectl apply -f rendered.yaml --namespace=kustomcoins
|
||||
```
|
||||
|
||||
- Or, with Kubernetes 1.14, we can also do this:
|
||||
- Or, with Kubernetes 1.14, you can also do this:
|
||||
```bash
|
||||
kubectl apply -k overlays/ship --namespace=kustomcoins
|
||||
```
|
||||
@@ -386,163 +337,39 @@ Note: it might take a minute or two for the worker to start.
|
||||
|
||||
---
|
||||
|
||||
## `kubectl` integration
|
||||
## `kubectl apply -k`
|
||||
|
||||
- Kustomize has been integrated in `kubectl` (since Kubernetes 1.14)
|
||||
|
||||
- `kubectl kustomize` can apply a kustomization
|
||||
|
||||
- commands that use `-f` can also use `-k` (`kubectl apply`/`delete`/...)
|
||||
- Kustomize has been integrated in `kubectl`
|
||||
|
||||
- The `kustomize` tool is still needed if we want to use `create`, `edit`, ...
|
||||
|
||||
- Kubernetes 1.14 to 1.20 uses Kustomize 2.0.3
|
||||
- Also, warning: `kubectl apply -k` is a slightly older version than `kustomize`!
|
||||
|
||||
- Kubernetes 1.21 jumps to Kustomize 4.1.2
|
||||
- In recent versions of `kustomize`, bases can be listed in `resources`
|
||||
|
||||
- Future versions should track Kustomize updates more closely
|
||||
(and `kustomize edit add base` will add its arguments to `resources`)
|
||||
|
||||
---
|
||||
- `kubectl apply -k` requires bases to be listed in `bases`
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Differences between 2.0.3 and later
|
||||
|
||||
- Kustomize 2.1 / 3.0 deprecates `bases` (they should be listed in `resources`)
|
||||
|
||||
(this means that "modern" `kustomize edit add resource` won't work with "old" `kubectl apply -k`)
|
||||
|
||||
- Kustomize 2.1 introduces `replicas` and `envs`
|
||||
|
||||
- Kustomize 3.1 introduces multipatches
|
||||
|
||||
- Kustomize 3.2 introduce inline patches in `kustomization.yaml`
|
||||
|
||||
- Kustomize 3.3 to 3.10 is mostly internal refactoring
|
||||
|
||||
- Kustomize 4.0 drops go-getter again
|
||||
|
||||
- Kustomize 4.1 allows patching kind and name
|
||||
|
||||
---
|
||||
|
||||
## Scaling
|
||||
|
||||
Instead of using a patch, scaling can be done like this:
|
||||
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
...
|
||||
replicas:
|
||||
- name: worker
|
||||
count: 5
|
||||
```
|
||||
|
||||
It will automatically work with Deployments, ReplicaSets, StatefulSets.
|
||||
|
||||
(For other resource types, fall back to a patch.)
|
||||
|
||||
---
|
||||
|
||||
## Updating images
|
||||
|
||||
Instead of using patches, images can be changed like this:
|
||||
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
...
|
||||
images:
|
||||
- name: postgres
|
||||
newName: harbor.enix.io/my-postgres
|
||||
- name: dockercoins/worker
|
||||
newTag: v0.2
|
||||
- name: dockercoins/hasher
|
||||
newName: registry.dockercoins.io/hasher
|
||||
newTag: v0.2
|
||||
- name: alpine
|
||||
digest: sha256:24a0c4b4a4c0eb97a1aabb8e29f18e917d05abfe1b7a7c07857230879ce7d3d3
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Updating images, pros and cons
|
||||
|
||||
- Very convenient when the same image appears multiple times
|
||||
|
||||
- Very convenient to define tags (or pin to hashes) outside of the main YAML
|
||||
|
||||
- Doesn't support wildcard or generic substitutions:
|
||||
|
||||
- cannot "replace `dockercoins/*` with `ghcr.io/dockercoins/*`"
|
||||
|
||||
- cannot "tag all `dockercoins/*` with `v0.2`"
|
||||
|
||||
- Only patches "well-known" image fields (won't work with CRDs referencing images)
|
||||
|
||||
- Helm can deal with these scenarios, for instance:
|
||||
```yaml
|
||||
image: {{ .Values.registry }}/worker:{{ .Values.version }}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Advanced resource patching
|
||||
|
||||
The example below shows how to:
|
||||
|
||||
- patch multiple resources with a selector (new in Kustomize 3.1)
|
||||
- use an inline patch instead of a separate patch file (new in Kustomize 3.2)
|
||||
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
...
|
||||
patches:
|
||||
- patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/image
|
||||
value: alpine
|
||||
target:
|
||||
kind: Deployment
|
||||
labelSelector: "app"
|
||||
```
|
||||
|
||||
(This replaces all images of Deployments matching the `app` selector with `alpine`.)
|
||||
|
||||
---
|
||||
|
||||
## Advanced resource patching, pros and cons
|
||||
|
||||
- Very convenient to patch an arbitrary number of resources
|
||||
|
||||
- Very convenient to patch any kind of resource, including CRDs
|
||||
|
||||
- Doesn't support "fine-grained" patching (e.g. image registry or tag)
|
||||
|
||||
- Once again, Helm can do it:
|
||||
```yaml
|
||||
image: {{ .Values.registry }}/worker:{{ .Values.version }}
|
||||
```
|
||||
(so after using `kustomize edit add base`, we need to fix `kustomization.yaml`)
|
||||
|
||||
---
|
||||
|
||||
## Differences with Helm
|
||||
|
||||
- Helm charts generally require more upfront work
|
||||
- Helm charts use placeholders `{{ like.this }}`
|
||||
|
||||
(while kustomize "bases" are standard Kubernetes YAML)
|
||||
- Kustomize "bases" are standard Kubernetes YAML
|
||||
|
||||
- ... But Helm charts are also more powerful; their templating language can:
|
||||
- It is possible to use an existing set of YAML as a Kustomize base
|
||||
|
||||
- conditionally include/exclude resources or blocks within resources
|
||||
- As a result, writing a Helm chart is more work ...
|
||||
|
||||
- generate values by concatenating, hashing, transforming parameters
|
||||
- ... But Helm charts are also more powerful; e.g. they can:
|
||||
|
||||
- generate values or resources by iteration (`{{ range ... }}`)
|
||||
- use flags to conditionally include resources or blocks
|
||||
|
||||
- access the Kubernetes API during template evaluation
|
||||
- check if a given Kubernetes API group is supported
|
||||
|
||||
- [and much more](https://helm.sh/docs/chart_template_guide/)
|
||||
|
||||
@@ -550,3 +377,4 @@ patches:
|
||||
|
||||
:EN:- Packaging and running apps with Kustomize
|
||||
:FR:- *Packaging* d'applications avec Kustomize
|
||||
|
||||
|
||||
@@ -1,182 +1,69 @@
|
||||
# Checking Node and Pod resource usage
|
||||
# Checking pod and node resource usage
|
||||
|
||||
- We've installed a few things on our cluster so far
|
||||
- Since Kubernetes 1.8, metrics are collected by the [resource metrics pipeline](https://kubernetes.io/docs/tasks/debug-application-cluster/resource-metrics-pipeline/)
|
||||
|
||||
- How much resources (CPU, RAM) are we using?
|
||||
- The resource metrics pipeline is:
|
||||
|
||||
- We need metrics!
|
||||
- optional (Kubernetes can function without it)
|
||||
|
||||
- necessary for some features (like the Horizontal Pod Autoscaler)
|
||||
|
||||
- exposed through the Kubernetes API using the [aggregation layer](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)
|
||||
|
||||
- usually implemented by the "metrics server"
|
||||
|
||||
---
|
||||
|
||||
## How to know if the metrics server is running?
|
||||
|
||||
- The easiest way to know is to run `kubectl top`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's try the following command:
|
||||
- Check if the core metrics pipeline is available:
|
||||
```bash
|
||||
kubectl top nodes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If it shows our nodes and their CPU and memory load, we're good!
|
||||
|
||||
---
|
||||
|
||||
## Installing metrics server
|
||||
|
||||
- The metrics server doesn't have any particular requirements
|
||||
|
||||
(it doesn't need persistence, as it doesn't *store* metrics)
|
||||
|
||||
- It has its own repository, [kubernetes-incubator/metrics-server](https://github.com/kubernetes-incubator/metrics-server)
|
||||
|
||||
- The repository comes with [YAML files for deployment](https://github.com/kubernetes-incubator/metrics-server/tree/master/deploy/1.8%2B)
|
||||
|
||||
- These files may not work on some clusters
|
||||
|
||||
(e.g. if your node names are not in DNS)
|
||||
|
||||
- The container.training repository has a [metrics-server.yaml](https://github.com/jpetazzo/container.training/blob/master/k8s/metrics-server.yaml#L90) file to help with that
|
||||
|
||||
(we can `kubectl apply -f` that file if needed)
|
||||
|
||||
---
|
||||
|
||||
## Showing container resource usage
|
||||
|
||||
- Once the metrics server is running, we can check container resource usage
|
||||
|
||||
.exercise[
|
||||
|
||||
- Show resource usage across all containers:
|
||||
```bash
|
||||
kubectl top pods --containers --all-namespaces
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Is metrics-server installed?
|
||||
|
||||
- If we see a list of nodes, with CPU and RAM usage:
|
||||
|
||||
*great, metrics-server is installed!*
|
||||
|
||||
- If we see `error: Metrics API not available`:
|
||||
|
||||
*metrics-server isn't installed, so we'll install it!*
|
||||
|
||||
---
|
||||
|
||||
## The resource metrics pipeline
|
||||
|
||||
- The `kubectl top` command relies on the Metrics API
|
||||
|
||||
- The Metrics API is part of the "[resource metrics pipeline]"
|
||||
|
||||
- The Metrics API isn't served (built into) the Kubernetes API server
|
||||
|
||||
- It is made available through the [aggregation layer]
|
||||
|
||||
- It is usually served by a component called metrics-server
|
||||
|
||||
- It is optional (Kubernetes can function without it)
|
||||
|
||||
- It is necessary for some features (like the Horizontal Pod Autoscaler)
|
||||
|
||||
[resource metrics pipeline]: https://kubernetes.io/docs/tasks/debug-application-cluster/resource-metrics-pipeline/
|
||||
[aggregation layer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/
|
||||
|
||||
---
|
||||
|
||||
## Other ways to get metrics
|
||||
|
||||
- We could use a SAAS like Datadog, New Relic...
|
||||
|
||||
- We could use a self-hosted solution like Prometheus
|
||||
|
||||
- Or we could use metrics-server
|
||||
|
||||
- What's special about metrics-server?
|
||||
|
||||
---
|
||||
|
||||
## Pros/cons
|
||||
|
||||
Cons:
|
||||
|
||||
- no data retention (no history data, just instant numbers)
|
||||
|
||||
- only CPU and RAM of nodes and pods (no disk or network usage or I/O...)
|
||||
|
||||
Pros:
|
||||
|
||||
- very lightweight
|
||||
|
||||
- doesn't require storage
|
||||
|
||||
- used by Kubernetes autoscaling
|
||||
|
||||
---
|
||||
|
||||
## Why metrics-server
|
||||
|
||||
- We may install something fancier later
|
||||
|
||||
(think: Prometheus with Grafana)
|
||||
|
||||
- But metrics-server will work in *minutes*
|
||||
|
||||
- It will barely use resources on our cluster
|
||||
|
||||
- It's required for autoscaling anyway
|
||||
|
||||
---
|
||||
|
||||
## How metric-server works
|
||||
|
||||
- It runs a single Pod
|
||||
|
||||
- That Pod will fetch metrics from all our Nodes
|
||||
|
||||
- It will expose them through the Kubernetes API agregation layer
|
||||
|
||||
(we won't say much more about that agregation layer; that's fairly advanced stuff!)
|
||||
|
||||
---
|
||||
|
||||
## Installing metrics-server
|
||||
|
||||
- In a lot of places, this is done with a little bit of custom YAML
|
||||
|
||||
(derived from the [official installation instructions](https://github.com/kubernetes-sigs/metrics-server#installation))
|
||||
|
||||
- We're going to use Helm one more time:
|
||||
```bash
|
||||
helm upgrade --install metrics-server bitnami/metrics-server \
|
||||
--create-namespace --namespace metrics-server \
|
||||
--set apiService.create=true \
|
||||
--set extraArgs.kubelet-insecure-tls=true \
|
||||
--set extraArgs.kubelet-preferred-address-types=InternalIP
|
||||
```
|
||||
|
||||
- What are these options for?
|
||||
|
||||
---
|
||||
|
||||
## Installation options
|
||||
|
||||
- `apiService.create=true`
|
||||
|
||||
register `metrics-server` with the Kubernetes agregation layer
|
||||
|
||||
(create an entry that will show up in `kubectl get apiservices`)
|
||||
|
||||
- `extraArgs.kubelet-insecure-tls=true`
|
||||
|
||||
when connecting to nodes to collect their metrics, don't check kubelet TLS certs
|
||||
|
||||
(because most kubelet certs include the node name, but not its IP address)
|
||||
|
||||
- `extraArgs.kubelet-preferred-address-types=InternalIP`
|
||||
|
||||
when connecting to nodes, use their internal IP address instead of node name
|
||||
|
||||
(because the latter requires an internal DNS, which is rarely configured)
|
||||
|
||||
---
|
||||
|
||||
## Testing metrics-server
|
||||
|
||||
- After a minute or two, metrics-server should be up
|
||||
|
||||
- We should now be able to check Nodes resource usage:
|
||||
```bash
|
||||
kubectl top nodes
|
||||
```
|
||||
|
||||
- And Pods resource usage, too:
|
||||
```bash
|
||||
kubectl top pods --all-namespaces
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Keep some padding
|
||||
|
||||
- The RAM usage that we see should correspond more or less to the Resident Set Size
|
||||
|
||||
- Our pods also need some extra space for buffers, caches...
|
||||
|
||||
- Do not aim for 100% memory usage!
|
||||
|
||||
- Some more realistic targets:
|
||||
|
||||
50% (for workloads with disk I/O and leveraging caching)
|
||||
|
||||
90% (on very big nodes with mostly CPU-bound workloads)
|
||||
|
||||
75% (anywhere in between!)
|
||||
- We can also use selectors (`-l app=...`)
|
||||
|
||||
---
|
||||
|
||||
@@ -196,8 +83,5 @@ Pros:
|
||||
|
||||
???
|
||||
|
||||
:EN:- The resource metrics pipeline
|
||||
:EN:- Installing metrics-server
|
||||
|
||||
:EN:- Le *resource metrics pipeline*
|
||||
:FR:- Installtion de metrics-server
|
||||
:EN:- The *core metrics pipeline*
|
||||
:FR:- Le *core metrics pipeline*
|
||||
|
||||
@@ -519,11 +519,3 @@ class: extra-details
|
||||
- The Pod will then be able to start
|
||||
|
||||
- Failover is complete!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Understanding Container Attached Storage (CAS)
|
||||
:EN:- Deploying stateful apps with OpenEBS
|
||||
|
||||
:FR:- Comprendre le "Container Attached Storage" (CAS)
|
||||
:FR:- Déployer une application "stateful" avec OpenEBS
|
||||
@@ -66,7 +66,7 @@ class: extra-details
|
||||
|
||||
- Each request takes 1 second of CPU
|
||||
|
||||
- Average load: 1.66%
|
||||
- Average load: 0.16%
|
||||
|
||||
- Let's say we set a CPU limit of 10%
|
||||
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for Admins and Ops
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- static-pods-exercise
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
#- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
-
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/interco.md
|
||||
-
|
||||
- k8s/apilb.md
|
||||
#- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/staticpods.md
|
||||
-
|
||||
#- k8s/cloud-controller-manager.md
|
||||
#- k8s/bootstrap.md
|
||||
- k8s/control-plane-auth.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
-
|
||||
#- k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,85 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for administrators
|
||||
and operators
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
- - k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
- - k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/interco.md
|
||||
- - k8s/apilb.md
|
||||
- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
# DAY 2
|
||||
- - k8s/kubercoins.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- - k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
###- k8s/bootstrap.md
|
||||
- k8s/netpol.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- - k8s/prometheus.md
|
||||
#- k8s/prometheus-stack.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/crd.md
|
||||
- k8s/operators.md
|
||||
- k8s/eck.md
|
||||
###- k8s/operators-design.md
|
||||
# CONCLUSION
|
||||
- - k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
- |
|
||||
# (All content after this slide is bonus material)
|
||||
# EXTRA
|
||||
- - k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
@@ -1,86 +0,0 @@
|
||||
title: |
|
||||
Advanced
|
||||
Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- #1
|
||||
- k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
- #2
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/interco.md
|
||||
- #3
|
||||
- k8s/cni-internals.md
|
||||
- k8s/apilb.md
|
||||
- k8s/control-plane-auth.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/staticpods.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- #4
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
- #5
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/crd.md
|
||||
#- k8s/exercise-sealed-secrets.md
|
||||
- #6
|
||||
- k8s/ingress-tls.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/eck.md
|
||||
- #7
|
||||
- k8s/admission.md
|
||||
- k8s/kyverno.md
|
||||
- #8
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/prometheus-stack.md
|
||||
- k8s/hpa-v2.md
|
||||
- #9
|
||||
- k8s/operators-design.md
|
||||
- k8s/kubebuilder.md
|
||||
- k8s/events.md
|
||||
- k8s/finalizers.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/apiserver-deepdive.md
|
||||
#- k8s/record.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectl-run.md
|
||||
#- k8s/batch-jobs.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
-
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
-
|
||||
- k8s/dashboard.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/openebs.md
|
||||
#- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
#- k8s/scalingdockercoins.md
|
||||
#- shared/hastyconclusions.md
|
||||
#- k8s/daemonset.md
|
||||
#- k8s/authoring-yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
#- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/healthchecks-more.md
|
||||
#- k8s/record.md
|
||||
#- k8s/ingress-tls.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/helm-intro.md
|
||||
#- k8s/helm-chart-format.md
|
||||
#- k8s/helm-create-basic-chart.md
|
||||
#- k8s/helm-create-better-chart.md
|
||||
#- k8s/helm-dependencies.md
|
||||
#- k8s/helm-values-schema-validation.md
|
||||
#- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
#- k8s/gitlab.md
|
||||
#- k8s/create-chart.md
|
||||
#- k8s/create-more-charts.md
|
||||
#- k8s/netpol.md
|
||||
#- k8s/authn-authz.md
|
||||
#- k8s/user-cert.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
#- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/prometheus-stack.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/crd.md
|
||||
#- k8s/admission.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/finalizers.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
-
|
||||
#- k8s/whatsnext.md
|
||||
- k8s/lastwords.md
|
||||
#- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,88 +0,0 @@
|
||||
title: |
|
||||
Kubernetes 101
|
||||
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
# Bridget-specific; others use logistics.md
|
||||
- logistics-bridget.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# Bridget doesn't go into as much depth with compose
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- - k8s/kubectl-run.md
|
||||
#- k8s/batch-jobs.md
|
||||
#- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
#- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- - k8s/dashboard.md
|
||||
#- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
#- k8s/record.md
|
||||
- - k8s/logs-cli.md
|
||||
# Bridget hasn't added EFK yet
|
||||
#- k8s/logs-centralized.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/helm-intro.md
|
||||
#- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
#- k8s/helm-create-better-chart.md
|
||||
#- k8s/helm-dependencies.md
|
||||
#- k8s/helm-values-schema-validation.md
|
||||
#- k8s/helm-secrets.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/whatsnext.md
|
||||
# - k8s/links.md
|
||||
# Bridget-specific
|
||||
- k8s/links-bridget.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,156 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Docker and Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
-
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
-
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
- k8s/yamldeploy.md
|
||||
-
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/authoring-yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
-
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
-
|
||||
- k8s/ingress.md
|
||||
- k8s/ingress-tls.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
- k8s/gitlab.md
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
-
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- k8s/openebs.md
|
||||
-
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/prometheus-stack.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/hpa-v2.md
|
||||
-
|
||||
- k8s/extending-api.md
|
||||
- k8s/apiserver-deepdive.md
|
||||
- k8s/crd.md
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/admission.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/kubebuilder.md
|
||||
- k8s/sealed-secrets.md
|
||||
#- k8s/exercise-sealed-secrets.md
|
||||
- k8s/kyverno.md
|
||||
- k8s/eck.md
|
||||
- k8s/finalizers.md
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/events.md
|
||||
-
|
||||
- k8s/dmuc.md
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/apilb.md
|
||||
- k8s/staticpods.md
|
||||
-
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/gitworkflows.md
|
||||
-
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,125 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
-
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/authoring-yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
-
|
||||
- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-tls.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
- k8s/gitlab.md
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/prometheus-stack.md
|
||||
-
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
#- k8s/openebs.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/admission.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
-
|
||||
- k8s/whatsnext.md
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
57
slides/lke.yml
Normal file
57
slides/lke.yml
Normal file
@@ -0,0 +1,57 @@
|
||||
title: |
|
||||
Cloud Native
|
||||
Continuous Deployment
|
||||
with GitLab, Helm, and
|
||||
Linode Kubernetes Engine
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-03-lke.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- lke/intro.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- lke/deploy-cluster.md
|
||||
- lke/kubernetes-review.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/accessinternal.md
|
||||
- lke/what-is-missing.md
|
||||
-
|
||||
- k8s/helm-intro.md
|
||||
- lke/external-dns.md
|
||||
- lke/traefik.md
|
||||
- lke/metrics-server.md
|
||||
#- k8s/prometheus.md
|
||||
- lke/prometheus.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/gitlab.md
|
||||
#- k8s/helm-chart-format.md
|
||||
#- k8s/helm-create-basic-chart.md
|
||||
#- k8s/helm-create-better-chart.md
|
||||
#- k8s/helm-dependencies.md
|
||||
#- k8s/helm-values-schema-validation.md
|
||||
#- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
-
|
||||
- shared/thankyou.md
|
||||
|
||||
|
||||
#grep md$ lke.yml | grep -v '#' | cut -d- -f2- | xargs subl3
|
||||
163
slides/lke/deploy-cluster.md
Normal file
163
slides/lke/deploy-cluster.md
Normal file
@@ -0,0 +1,163 @@
|
||||
# Deploying our LKE cluster
|
||||
|
||||
- *If we wanted to deploy Kubernetes manually*, what would we need to do?
|
||||
|
||||
(not that I recommend doing that...)
|
||||
|
||||
- Control plane (etcd, API server, scheduler, controllers)
|
||||
|
||||
- Nodes (VMs with a container engine + the Kubelet agent; CNI setup)
|
||||
|
||||
- High availability (etcd clustering, API load balancer)
|
||||
|
||||
- Security (CA and TLS certificates everywhere)
|
||||
|
||||
- Cloud integration (to provision LoadBalancer services, storage...)
|
||||
|
||||
*And that's just to get a basic cluster!*
|
||||
|
||||
---
|
||||
|
||||
## The best way to deploy Kubernetes
|
||||
|
||||
*The best way to deploy Kubernetes is to get someone else to
|
||||
do it for us.*
|
||||
|
||||
(Me, ever since I've been working with Kubernetes)
|
||||
|
||||
---
|
||||
|
||||
## Managed Kubernetes
|
||||
|
||||
- Cloud provider runs the control plane
|
||||
|
||||
(including etcd, API load balancer, TLS setup, cloud integration)
|
||||
|
||||
- We run nodes
|
||||
|
||||
(the cloud provider generally gives us an easy way to provision them)
|
||||
|
||||
- Get started in *minutes*
|
||||
|
||||
- We're going to use [Linode Kubernetes Engine](https://www.linode.com/products/kubernetes/)
|
||||
|
||||
---
|
||||
|
||||
## Creating a cluster
|
||||
|
||||
- With the web console:
|
||||
|
||||
https://cloud.linode.com/kubernetes/clusters
|
||||
|
||||
- Pick the region of your choice
|
||||
|
||||
- Pick the latest available Kubernetes version
|
||||
|
||||
- Pick 3 nodes with 8 GB of RAM
|
||||
|
||||
- Click! ✨
|
||||
|
||||
- Wait a few minutes... ⌚️
|
||||
|
||||
- Download the kubeconfig file 💾
|
||||
|
||||
---
|
||||
|
||||
## With the CLI
|
||||
|
||||
- View available regions with `linode-cli regions list`
|
||||
|
||||
- View available server types with `linode-cli linodes types`
|
||||
|
||||
- View available Kubernetes versions with `linode-cli lke versions-list`
|
||||
|
||||
- Create cluster:
|
||||
```bash
|
||||
linode-cli lke cluster-create --label=hello-lke --region=us-east \
|
||||
--k8s_version=1.20 --node_pools.type=g6-standard-4 --node_pools.count=3
|
||||
```
|
||||
|
||||
- Note the cluster ID (e.g.: 12345)
|
||||
|
||||
- Download the kubeconfig file:
|
||||
```bash
|
||||
linode-cli lke kubeconfig-view `12345` --text --no-headers | base64 -d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Communicating with the cluster
|
||||
|
||||
- All the Kubernetes tools (`kubectl`, but also `helm` etc) use the same config file
|
||||
|
||||
- That file is (by default) `$HOME/.kube/config`
|
||||
|
||||
- It can hold multiple cluster definitions (or *contexts*)
|
||||
|
||||
- Or, we can have multiple config files and switch between them:
|
||||
|
||||
- by adding the `--kubeconfig` flag each time we invoke a tool (🙄)
|
||||
|
||||
- or by setting the `KUBECONFIG` environment variable (☺️)
|
||||
|
||||
---
|
||||
|
||||
## Using the kubeconfig file
|
||||
|
||||
Option 1:
|
||||
|
||||
- move the kubeconfig file to e.g. `~/.kube/config.lke`
|
||||
|
||||
- set the environment variable: `export KUBECONFIG=~/.kube/config.lke`
|
||||
|
||||
Option 2:
|
||||
|
||||
- directly move the kubeconfig file to `~/.kube/config`
|
||||
|
||||
- **do not** do that if you already have a file there!
|
||||
|
||||
Option 3:
|
||||
|
||||
- merge the new kubeconfig file with our existing file
|
||||
|
||||
---
|
||||
|
||||
## Merging kubeconfig
|
||||
|
||||
- Assuming that we want to merge `~/.kube/config` and `~/.kube/config.lke` ...
|
||||
|
||||
- Move our existing kubeconfig file:
|
||||
```bash
|
||||
cp ~/.kube/config ~/.kube/config.old
|
||||
```
|
||||
|
||||
- Merge both files:
|
||||
```bash
|
||||
KUBECONFIG=~/.kube/config.old:~/.kube/config.lke kubectl config \
|
||||
view --raw > ~/.kube/config
|
||||
```
|
||||
|
||||
- Check that everything is there:
|
||||
```bash
|
||||
kubectl config get-contexts
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Are we there yet?
|
||||
|
||||
- Let's check if our control plane is available:
|
||||
```bash
|
||||
kubectl get services
|
||||
```
|
||||
|
||||
→ This should show the `kubernetes` `ClusterIP` service
|
||||
|
||||
- Look for our nodes:
|
||||
```bash
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
→ This should show 3 nodes (or whatever amount we picked earlier)
|
||||
|
||||
- If the nodes aren't visible yet, give them a minute to join the cluster
|
||||
108
slides/lke/external-dns.md
Normal file
108
slides/lke/external-dns.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# [ExternalDNS](https://github.com/kubernetes-sigs/external-dns)
|
||||
|
||||
- ExternalDNS will automatically create DNS records from Kubernetes resources
|
||||
|
||||
- Services (with the annotation `external-dns.alpha.kubernetes.io/hostname`)
|
||||
|
||||
- Ingresses (automatically)
|
||||
|
||||
- It requires a domain name (obviously)
|
||||
|
||||
- ... And that domain name should be configurable through an API
|
||||
|
||||
- As of April 2021, it supports [a few dozens of providers](https://github.com/kubernetes-sigs/external-dns#status-of-providers)
|
||||
|
||||
- We're going to use Linode DNS
|
||||
|
||||
---
|
||||
|
||||
## Prep work
|
||||
|
||||
- We need a domain name
|
||||
|
||||
(if you need a cheap one, look e.g. at [GANDI](https://shop.gandi.net/?search=funwithlinode); there are many options below $10)
|
||||
|
||||
- That domain name should be configured to point to Linode DNS servers
|
||||
|
||||
(ns1.linode.com to ns5.linode.com)
|
||||
|
||||
- We need to generate a Linode API token with DNS API access
|
||||
|
||||
- Pro-tip: reduce the default TTL of the domain to 5 minutes!
|
||||
|
||||
---
|
||||
|
||||
## Deploying ExternalDNS
|
||||
|
||||
- The ExternalDNS documentation has a [tutorial](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/linode.md) for Linode
|
||||
|
||||
- ... It's basically a lot of YAML!
|
||||
|
||||
- That's where using a Helm chart will be very helpful
|
||||
|
||||
- There are a few ExternalDNS charts available out there
|
||||
|
||||
- We will use the one from Bitnami
|
||||
|
||||
(these folks maintain *a lot* of great Helm charts!)
|
||||
|
||||
---
|
||||
|
||||
## How we'll install things with Helm
|
||||
|
||||
- We will install each chart in its own namespace
|
||||
|
||||
(this is not mandatory, but it helps to see what belongs to what)
|
||||
|
||||
- We will use `helm upgrade --install` instead of `helm install`
|
||||
|
||||
(that way, if we want to change something, we can just re-run the command)
|
||||
|
||||
- We will use the `--create-namespace` and `--namespace ...` options
|
||||
|
||||
- To keep things boring and predictible, if we are installing chart `xyz`:
|
||||
|
||||
- we will install it in namespace `xyz`
|
||||
|
||||
- we will name the release `xyz` as well
|
||||
|
||||
---
|
||||
|
||||
## Installing ExternalDNS
|
||||
|
||||
- First, let's add the Bitnami repo:
|
||||
```bash
|
||||
helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
```
|
||||
|
||||
- Then, install ExternalDNS:
|
||||
```bash
|
||||
LINODE_API_TOKEN=`1234abcd...6789`
|
||||
helm upgrade --install external-dns bitnami/external-dns \
|
||||
--namespace external-dns --create-namespace \
|
||||
--set provider=linode \
|
||||
--set linode.apiToken=$LINODE_API_TOKEN
|
||||
```
|
||||
|
||||
(Make sure to update your API token above!)
|
||||
|
||||
---
|
||||
|
||||
## Testing ExternalDNS
|
||||
|
||||
- Let's annotate our NGINX service to expose it with a DNS record:
|
||||
```bash
|
||||
kubectl annotate service web \
|
||||
external-dns.alpha.kubernetes.io/hostname=nginx.`cloudnative.party`
|
||||
```
|
||||
|
||||
(make sure to use *your* domain name above, otherwise that won't work!)
|
||||
|
||||
- Check ExternalDNS logs:
|
||||
```bash
|
||||
kubectl logs -n external-dns -l app.kubernetes.io/name=external-dns
|
||||
```
|
||||
|
||||
- It might take a few minutes for ExternalDNS to start, patience!
|
||||
|
||||
- Then try to access `nginx.cloudnative.party` (or whatever domain you picked)
|
||||
175
slides/lke/intro.md
Normal file
175
slides/lke/intro.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# Get ready!
|
||||
|
||||
- We're going to set up a whole Continous Deployment pipeline
|
||||
|
||||
- ... for Kubernetes apps
|
||||
|
||||
- ... on a Kubernetes cluster
|
||||
|
||||
- Ingredients: cert-manager, GitLab, Helm, Linode DNS, LKE, Traefik
|
||||
|
||||
---
|
||||
|
||||
## Philosophy
|
||||
|
||||
- "Do one thing, do it well"
|
||||
|
||||
--
|
||||
|
||||
- ... But a CD pipeline is a complex system with interconnected parts!
|
||||
|
||||
- GitLab is no exception to that rule
|
||||
|
||||
- Let's have a look at its components!
|
||||
|
||||
---
|
||||
|
||||
## GitLab components
|
||||
|
||||
- GitLab dependencies listed in the GitLab official Helm chart
|
||||
|
||||
- External dependencies:
|
||||
|
||||
cert-manager, grafana, minio, nginx-ingress, postgresql, prometheus,
|
||||
redis, registry, shared-secrets
|
||||
|
||||
(these dependencies correspond to external charts not created by GitLab)
|
||||
|
||||
- Internal dependencies:
|
||||
|
||||
geo-logcursor, gitaly, gitlab-exporter, gitlab-grafana, gitlab-pages,
|
||||
gitlab-shell, kas, mailroom, migrations, operator, praefect, sidekiq,
|
||||
task-runner, webservice
|
||||
|
||||
(these dependencies correspond to subcharts embedded in the GitLab chart)
|
||||
|
||||
---
|
||||
|
||||
## Philosophy
|
||||
|
||||
- Use the GitLab chart to deploy everything that is specific to GitLab
|
||||
|
||||
- Deploy cluster-wide components separately
|
||||
|
||||
(cert-manager, ExternalDNS, Ingress Controller...)
|
||||
|
||||
---
|
||||
|
||||
## What we're going to do
|
||||
|
||||
- Spin up an LKE cluster
|
||||
|
||||
- Run a simple test app
|
||||
|
||||
- Install a few extras
|
||||
|
||||
(the cluster-wide components mentioned earlier)
|
||||
|
||||
- Set up GitLab
|
||||
|
||||
- Push an app with a CD pipeline to GitLab
|
||||
|
||||
---
|
||||
|
||||
## What you need to know
|
||||
|
||||
- If you just want to follow along and watch...
|
||||
|
||||
- container basics (what's an image, what's a container...)
|
||||
|
||||
- Kubernetes basics (what are Deployments, Namespaces, Pods, Services)
|
||||
|
||||
- If you want to run this on your own Kubernetes cluster...
|
||||
|
||||
- intermediate Kubernetes concepts (annotations, Ingresses)
|
||||
|
||||
- Helm basic concepts (how to install/upgrade releases; how to set "values")
|
||||
|
||||
- basic Kubernetes troubleshooting commands (view logs, events)
|
||||
|
||||
- There will be a lot of explanations and reminders along the way
|
||||
|
||||
---
|
||||
|
||||
## What you need to have
|
||||
|
||||
If you want to run this on your own...
|
||||
|
||||
- A Linode account
|
||||
|
||||
- A domain name that you will point to Linode DNS
|
||||
|
||||
(I got cloudnative.party for $5)
|
||||
|
||||
- Local tools to control your Kubernetes cluster:
|
||||
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
|
||||
|
||||
- [helm](https://helm.sh/docs/intro/install/)
|
||||
|
||||
- Patience, as many operations will require us to wait a few minutes!
|
||||
|
||||
---
|
||||
|
||||
## Do I really need a Linode account?
|
||||
|
||||
- *Can I use a local cluster, e.g. with Minikube?*
|
||||
|
||||
It will be very difficult to get valid TLS certs with a local cluster.
|
||||
|
||||
Also, GitLab needs quite a bit of resources.
|
||||
|
||||
- *Can I use another Kubernetes provider?*
|
||||
|
||||
You certainly can: Kubernetes is a standard platform!
|
||||
|
||||
But you'll have to adjust a few things.
|
||||
|
||||
(I'll try my best to tell you what as we go along.)
|
||||
|
||||
---
|
||||
|
||||
## Why do I need a domain name?
|
||||
|
||||
- Because accessing gitlab.cloudnative.party is easier than 102.34.55.67
|
||||
|
||||
- Because we'll need TLS certificates
|
||||
|
||||
(and it's very easy to obtain certs with Let's Encrypt when we have a domain)
|
||||
|
||||
- We'll illustrate automatic DNS configuration with ExternalDNS, too!
|
||||
|
||||
(Kubernetes will automatically create DNS entries in our domain)
|
||||
|
||||
---
|
||||
|
||||
## Nice-to-haves
|
||||
|
||||
Here are a few tools that I like...
|
||||
|
||||
- [linode-cli](https://github.com/linode/linode-cli#installation)
|
||||
to manage Linode resources from the command line
|
||||
|
||||
- [stern](https://github.com/stern/stern)
|
||||
to comfortably view logs of Kubernetes pods
|
||||
|
||||
- [k9s](https://k9scli.io/topics/install/)
|
||||
to manage Kubernetes resources with that retro BBS look and feel 😎
|
||||
|
||||
- [kube-ps1](https://github.com/jonmosco/kube-ps1)
|
||||
to keep track of which Kubernetes cluster and namespace we're working on
|
||||
|
||||
- [kubectx](https://github.com/ahmetb/kubectx)
|
||||
to easily switch between clusters, contexts, and namespaces
|
||||
|
||||
---
|
||||
|
||||
## Warning ⚠️💸
|
||||
|
||||
- We're going to spin up cloud resources
|
||||
|
||||
- Remember to shut them down when you're down!
|
||||
|
||||
- In the immortal words of Cloud Economist [Corey Quinn](https://twitter.com/QuinnyPig):
|
||||
|
||||
*[You're charged for what you forget to turn off.](https://www.theregister.com/2020/09/03/cloud_control_costs/)*
|
||||
168
slides/lke/kubernetes-review.md
Normal file
168
slides/lke/kubernetes-review.md
Normal file
@@ -0,0 +1,168 @@
|
||||
# Quick Kubernetes review
|
||||
|
||||
- Let's deploy a simple HTTP server
|
||||
|
||||
- And expose it to the outside world!
|
||||
|
||||
- Feel free to skip this section if you're familiar with Kubernetes
|
||||
|
||||
---
|
||||
|
||||
## Creating a container
|
||||
|
||||
- On Kubernetes, one doesn't simply run a container
|
||||
|
||||
- We need to create a "Pod"
|
||||
|
||||
- A Pod will be a group of containers running together
|
||||
|
||||
(often, it will be a group of *one* container)
|
||||
|
||||
- We can create a standalone Pod, but generally, we'll use a *controller*
|
||||
|
||||
(for instance: Deployment, Replica Set, Daemon Set, Job, Stateful Set...)
|
||||
|
||||
- The *controller* will take care of scaling and recreating the Pod if needed
|
||||
|
||||
(note that within a Pod, containers can also be restarted automatically if needed)
|
||||
|
||||
---
|
||||
|
||||
## A *controller*, you said?
|
||||
|
||||
- We're going to use one of the most common controllers: a *Deployment*
|
||||
|
||||
- Deployments...
|
||||
|
||||
- can be scaled (will create the requested number of Pods)
|
||||
|
||||
- will recreate Pods if e.g. they get evicted or their Node is down
|
||||
|
||||
- handle rolling updates
|
||||
|
||||
- Deployments actually delegate a lot of these tasks to *Replica Sets*
|
||||
|
||||
- We will generally have the following hierarchy:
|
||||
|
||||
Deployment → Replica Set → Pod
|
||||
|
||||
---
|
||||
|
||||
## Creating a Deployment
|
||||
|
||||
- Without further ado:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx
|
||||
```
|
||||
|
||||
- Check what happened:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
- Wait until the NGINX Pod is "Running"!
|
||||
|
||||
- Note: `kubectl create deployment` is great when getting started...
|
||||
|
||||
- ... But later, we will probably write YAML instead!
|
||||
|
||||
---
|
||||
|
||||
## Exposing the Deployment
|
||||
|
||||
- We need to create a Service
|
||||
|
||||
- We can use `kubectl expose` for that
|
||||
|
||||
(but, again, we will probably use YAML later!)
|
||||
|
||||
- For *internal* use, we can use the default Service type, ClusterIP:
|
||||
```bash
|
||||
kubectl expose deployment web --port=80
|
||||
```
|
||||
|
||||
- For *external* use, we can use a Service of type LoadBalancer:
|
||||
```bash
|
||||
kubectl expose deployment web --port=80 --type=LoadBalancer
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Changing the Service type
|
||||
|
||||
- We can `kubectl delete service web` and recreate it
|
||||
|
||||
- Or, `kubectl edit service web` and dive into the YAML
|
||||
|
||||
- Or, `kubectl patch service web --patch '{"spec": {"type": "LoadBalancer"}}'`
|
||||
|
||||
- ... These are just a few "classic" methods; there are many ways to do this!
|
||||
|
||||
---
|
||||
|
||||
## Deployment → Pod
|
||||
|
||||
- Can we check exactly what's going on when the Pod is created?
|
||||
|
||||
- Option 1: `watch kubectl get all`
|
||||
|
||||
- displays all object types
|
||||
- refreshes every 2 seconds
|
||||
- puts a high load on the API server when there are many objects
|
||||
|
||||
- Option 2: `kubectl get pods --watch --output-watch-events`
|
||||
|
||||
- can only display one type of object
|
||||
- will show all modifications happening (à la `tail -f`)
|
||||
- doesn't put a high load on the API server (except for initial display)
|
||||
|
||||
---
|
||||
|
||||
## Recreating the Deployment
|
||||
|
||||
- Let's delete our Deployment:
|
||||
```bash
|
||||
kubectl delete deployment web
|
||||
```
|
||||
|
||||
- Watch Pod updates:
|
||||
```bash
|
||||
kubectl get pods --watch --output-watch-events
|
||||
```
|
||||
|
||||
- Recreate the Deployment and see what Pods do:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Service stability
|
||||
|
||||
- Our Service *still works* even though we deleted and re-created the Deployment
|
||||
|
||||
- It wouldn't have worked while the Deployment was deleted, though
|
||||
|
||||
- A Service is a *stable endpoint*
|
||||
|
||||
???
|
||||
|
||||
:T: Warming up with a quick Kubernetes review
|
||||
|
||||
:Q: In Kubernetes, what is a Pod?
|
||||
:A: ✔️A basic unit of scaling that can contain one or more containers
|
||||
:A: An abstraction for an application and its dependencies
|
||||
:A: It's just a fancy name for "container" but they're the same
|
||||
:A: A group of cluster nodes used for scheduling purposes
|
||||
|
||||
:Q: In Kubernetes, what is a Replica Set?
|
||||
:A: ✔️A controller used to create one or multiple identical Pods
|
||||
:A: A numeric parameter in a Pod specification, used to scale that Pod
|
||||
:A: A group of containers running on the same node
|
||||
:A: A group of containers running on different nodes
|
||||
|
||||
:Q: In Kubernetes, what is a Deployment?
|
||||
:A: ✔️A controller that can manage Replica Sets corresponding to different configurations
|
||||
:A: A manifest telling Kubernetes how to deploy an app and its dependencies
|
||||
:A: A list of instructions executed in a container to configure that container
|
||||
:A: A basic unit of work for the Kubernetes scheduler
|
||||
147
slides/lke/metrics-server.md
Normal file
147
slides/lke/metrics-server.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Installing metrics-server
|
||||
|
||||
- We've installed a few things on our cluster so far
|
||||
|
||||
- How much resources (CPU, RAM) are we using?
|
||||
|
||||
- We need metrics!
|
||||
|
||||
- If metrics-server is installed, we can get Nodes metrics like this:
|
||||
```bash
|
||||
kubectl top nodes
|
||||
```
|
||||
|
||||
- At the moment, this should show us `error: Metrics API not available`
|
||||
|
||||
- How do we fix this?
|
||||
|
||||
---
|
||||
|
||||
## Many ways to get metrics
|
||||
|
||||
- We could use a SAAS like Datadog, New Relic...
|
||||
|
||||
- We could use a self-hosted solution like Prometheus
|
||||
|
||||
- Or we could use metrics-server
|
||||
|
||||
- What's special about metrics-server?
|
||||
|
||||
---
|
||||
|
||||
## Pros/cons
|
||||
|
||||
Cons:
|
||||
|
||||
- no data retention (no history data, just instant numbers)
|
||||
|
||||
- only CPU and RAM of nodes and pods (no disk or network usage or I/O...)
|
||||
|
||||
Pros:
|
||||
|
||||
- very lightweight
|
||||
|
||||
- doesn't require storage
|
||||
|
||||
- used by Kubernetes autoscaling
|
||||
|
||||
---
|
||||
|
||||
## Why metrics-server
|
||||
|
||||
- We may install something fancier later
|
||||
|
||||
(think: Prometheus with Grafana)
|
||||
|
||||
- But metrics-server will work in *minutes*
|
||||
|
||||
- It will barely use resources on our cluster
|
||||
|
||||
- It's required for autoscaling anyway
|
||||
|
||||
---
|
||||
|
||||
## How metric-server works
|
||||
|
||||
- It runs a single Pod
|
||||
|
||||
- That Pod will fetch metrics from all our Nodes
|
||||
|
||||
- It will expose them through the Kubernetes API agregation layer
|
||||
|
||||
(we won't say much more about that agregation layer; that's fairly advanced stuff!)
|
||||
|
||||
---
|
||||
|
||||
## Installing metrics-server
|
||||
|
||||
- In a lot of places, this is done with a little bit of custom YAML
|
||||
|
||||
(derived from the [official installation instructions](https://github.com/kubernetes-sigs/metrics-server#installation))
|
||||
|
||||
- We're going to use Helm one more time:
|
||||
```bash
|
||||
helm upgrade --install metrics-server bitnami/metrics-server \
|
||||
--create-namespace --namespace metrics-server \
|
||||
--set apiService.create=true \
|
||||
--set extraArgs.kubelet-insecure-tls=true \
|
||||
--set extraArgs.kubelet-preferred-address-types=InternalIP
|
||||
```
|
||||
|
||||
- What are these options for?
|
||||
|
||||
---
|
||||
|
||||
## Installation options
|
||||
|
||||
- `apiService.create=true`
|
||||
|
||||
register `metrics-server` with the Kubernetes agregation layer
|
||||
|
||||
(create an entry that will show up in `kubectl get apiservices`)
|
||||
|
||||
- `extraArgs.kubelet-insecure-tls=true`
|
||||
|
||||
when connecting to nodes to collect their metrics, don't check kubelet TLS certs
|
||||
|
||||
(because most kubelet certs include the node name, but not its IP address)
|
||||
|
||||
- `extraArgs.kubelet-preferred-address-types=InternalIP`
|
||||
|
||||
when connecting to nodes, use their internal IP address instead of node name
|
||||
|
||||
(because the latter requires an internal DNS, which is rarely configured)
|
||||
|
||||
---
|
||||
|
||||
## Testing metrics-server
|
||||
|
||||
- After a minute or two, metrics-server should be up
|
||||
|
||||
- We should now be able to check Nodes resource usage:
|
||||
```bash
|
||||
kubectl top nodes
|
||||
```
|
||||
|
||||
- And Pods resource usage, too:
|
||||
```bash
|
||||
kubectl top pods --all-namespaces
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Keep some padding
|
||||
|
||||
- The RAM usage that we see should correspond more or less to the Resident Set Size
|
||||
|
||||
- Our pods also need some extra space for buffers, caches...
|
||||
|
||||
- Do not aim for 100% memory usage!
|
||||
|
||||
- Some more realistic targets:
|
||||
|
||||
50% (for workloads with disk I/O and leveraging caching)
|
||||
|
||||
90% (on very big nodes with mostly CPU-bound workloads)
|
||||
|
||||
75% (anywhere in between!)
|
||||
@@ -111,9 +111,6 @@
|
||||
|
||||
???
|
||||
|
||||
:EN:- Installing Prometheus and Grafana
|
||||
:FR:- Installer Prometheus et Grafana
|
||||
|
||||
:T: Observing our cluster with Prometheus and Grafana
|
||||
|
||||
:Q: What's the relationship between Prometheus and Grafana?
|
||||
150
slides/lke/traefik.md
Normal file
150
slides/lke/traefik.md
Normal file
@@ -0,0 +1,150 @@
|
||||
# Installing Traefik
|
||||
|
||||
- Traefik is going to be our Ingress Controller
|
||||
|
||||
- Let's install it with a Helm chart, in its own namespace
|
||||
|
||||
- First, let's add the Traefik chart repository:
|
||||
```bash
|
||||
helm repo add traefik https://helm.traefik.io/traefik
|
||||
```
|
||||
|
||||
- Then, install the chart:
|
||||
```bash
|
||||
helm upgrade --install traefik traefik/traefik \
|
||||
--create-namespace --namespace traefik \
|
||||
--set "ports.websecure.tls.enabled=true"
|
||||
```
|
||||
|
||||
(that option that we added enables HTTPS, it will be useful later!)
|
||||
|
||||
---
|
||||
|
||||
## Testing Traefik
|
||||
|
||||
- Let's create an Ingress resource!
|
||||
|
||||
- If we're using Kubernetes 1.20 or later, we can simply do this:
|
||||
```bash
|
||||
kubectl create ingress web \
|
||||
--rule=`ingress-is-fun.cloudnative.party`/*=web:80
|
||||
```
|
||||
|
||||
(make sure to update and use your own domain)
|
||||
|
||||
- Check that the Ingress was correctly created:
|
||||
```bash
|
||||
kubectl get ingress
|
||||
kubectl describe ingress
|
||||
```
|
||||
|
||||
- If we're using Kubernetes 1.19 or earlier, we'll need some YAML
|
||||
|
||||
---
|
||||
|
||||
## Creating an Ingress with YAML
|
||||
|
||||
- This is how we do it with YAML:
|
||||
```bash
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: web
|
||||
spec:
|
||||
rules:
|
||||
- host: `ingress-is-fun.cloudnative.party`
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: web
|
||||
servicePort: 80
|
||||
EOF
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Ingress versions...
|
||||
|
||||
- Note how we used the `v1beta1` Ingress version on the previous YAML
|
||||
|
||||
(to be compatible with older Kubernetes versions)
|
||||
|
||||
- This YAML will give you deprecation warnings on recent version of Kubernetes
|
||||
|
||||
(since the Ingress spec is now at version `v1`)
|
||||
|
||||
- Don't worry too much about the deprecation warnings
|
||||
|
||||
(on Kubernetes, deprecation happens over a long time window, typically 1 year)
|
||||
|
||||
- You will have time to revisit and worry later! 😅
|
||||
|
||||
---
|
||||
|
||||
## Does it work?
|
||||
|
||||
- Try to connect to the Ingress host name
|
||||
|
||||
(in my example, http://ingress-is-fun.cloudnative.party/)
|
||||
|
||||
- *Normally,* it doesn't work (yet) 🤔
|
||||
|
||||
- Let's look at `kubectl get ingress` again
|
||||
|
||||
- ExternalDNS is trying to create records mapping HOSTS to ADDRESS
|
||||
|
||||
- But the ADDRESS field is currently empty!
|
||||
|
||||
- We need to tell Traefik to fill that ADDRESS field
|
||||
|
||||
---
|
||||
|
||||
## Reconfiguring Traefik
|
||||
|
||||
- There is a "magic" flag to tell Traefik to update the address status field
|
||||
|
||||
- Let's update our Traefik install:
|
||||
```bash
|
||||
helm upgrade --install traefik traefik/traefik \
|
||||
--create-namespace --namespace traefik \
|
||||
--set "ports.websecure.tls.enabled=true" \
|
||||
--set "providers.kubernetesIngress.publishedService.enabled=true"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Checking what we did
|
||||
|
||||
- Check the output of `kubectl get ingress`
|
||||
|
||||
(there should be an address now)
|
||||
|
||||
- Check the logs of ExternalDNS
|
||||
|
||||
(there should be a mention of the new DNS record)
|
||||
|
||||
- Try again to connect to the HTTP address
|
||||
|
||||
(now it should work)
|
||||
|
||||
- Note that some of these operations might take a minute or two
|
||||
|
||||
(be patient!)
|
||||
|
||||
???
|
||||
|
||||
:T: Installing the Traefik Ingress Controller
|
||||
|
||||
:Q: What's the job of an Ingress Controller?
|
||||
:A: Prevent unauthorized access to Kubernetes services
|
||||
:A: Firewall inbound traffic on the Kubernetes API
|
||||
:A: ✔️Handle inbound HTTP traffic for Kubernetes services
|
||||
:A: Keep track of the location of Kubernetes operators
|
||||
|
||||
:Q: What happens when we create an "Ingress resource"?
|
||||
:A: A web service is automatically deployed and scaled on our cluster
|
||||
:A: Kubernetes starts tracking the location of our users
|
||||
:A: Traffic coming from the specified addresses will be allowed
|
||||
:A: ✔️A load balancer is configured with HTTP traffic rules
|
||||
87
slides/lke/what-is-missing.md
Normal file
87
slides/lke/what-is-missing.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# DNS, Ingress, Metrics
|
||||
|
||||
- We got a basic app up and running
|
||||
|
||||
- We accessed it over a raw IP address
|
||||
|
||||
- Can we do better?
|
||||
|
||||
(i.e. access it with a domain name!)
|
||||
|
||||
- How much resources is it using?
|
||||
|
||||
---
|
||||
|
||||
## DNS
|
||||
|
||||
- We'd like to associate a fancy name to that LoadBalancer Service
|
||||
|
||||
(e.g. `nginx.cloudnative.party` → `A.B.C.D`)
|
||||
|
||||
- option 1: manually add a DNS record
|
||||
|
||||
- option 2: find a way to create DNS records automatically
|
||||
|
||||
- We will install ExternalDNS to automate DNS records creatoin
|
||||
|
||||
- ExternalDNS supports Linode DNS and dozens of other providers
|
||||
|
||||
---
|
||||
|
||||
## Ingress
|
||||
|
||||
- What if we have multiple web services to expose?
|
||||
|
||||
- We could create one LoadBalancer Service for each of them
|
||||
|
||||
- This would create a lot of cloud load balancers
|
||||
|
||||
(and they typically incur a cost, even if it's a small one)
|
||||
|
||||
- Instead, we can use an *Ingress Controller*
|
||||
|
||||
- Ingress Controller = HTTP load balancer / reverse proxy
|
||||
|
||||
- Put all our HTTP services behind a single LoadBalancer Service
|
||||
|
||||
- Can also do fancy "content-based" routing (using headers, request path...)
|
||||
|
||||
- We will install Traefik as our Ingress Controller
|
||||
|
||||
---
|
||||
|
||||
## Metrics
|
||||
|
||||
- How much resources are we using right now?
|
||||
|
||||
- When will we need to scale up our cluster?
|
||||
|
||||
- We need metrics!
|
||||
|
||||
- We're going to install the *metrics server*
|
||||
|
||||
- It's a very basic metrics system
|
||||
|
||||
(no retention, no graphs, no alerting...)
|
||||
|
||||
- But it's lightweight, and it is used internally by Kubernetes for autoscaling
|
||||
|
||||
---
|
||||
|
||||
## What's next
|
||||
|
||||
- We're going to install all these components
|
||||
|
||||
- Very often, things can be installed with a simple YAML file
|
||||
|
||||
- Very often, that YAML file needs to be customized a little bit
|
||||
|
||||
(add command-line parameters, provide API tokens...)
|
||||
|
||||
- Instead, we're going to use Helm charts
|
||||
|
||||
- Helm charts give us a way to customize what we deploy
|
||||
|
||||
- Helm can also keep track of what we install
|
||||
|
||||
(for easier uninstall and updates)
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
- Hello! We are:
|
||||
|
||||
- ✨ Bridget ([@bridgetkromhout](https://twitter.com/bridgetkromhout))
|
||||
- .emoji[✨] Bridget ([@bridgetkromhout](https://twitter.com/bridgetkromhout))
|
||||
|
||||
- 🌟 Joe ([@joelaha](https://twitter.com/joelaha))
|
||||
- .emoji[🌟] Joe ([@joelaha](https://twitter.com/joelaha))
|
||||
|
||||
- The workshop will run from 13:30-16:45
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
- Hello! We are:
|
||||
|
||||
- 👷🏻♀️ AJ ([@s0ulshake], [EphemeraSearch])
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake], [EphemeraSearch])
|
||||
|
||||
- 🐳 Jérôme ([@jpetazzo], Enix SAS)
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo], Enix SAS)
|
||||
|
||||
- The training will run for 4 hours, with a 10 minutes break every hour
|
||||
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
## Intros
|
||||
|
||||
- Hello!
|
||||
- Hello! I'm Jérôme Petazzoni
|
||||
|
||||
- I'm Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
([@jpetazzo](https://twitter.com/jpetazzo) on Twitter)
|
||||
|
||||
- The training will run from 9:30 to 13:00
|
||||
- I worked at Docker from \~2011 to 2018
|
||||
|
||||
- There will be a break at (approximately) 11:00
|
||||
- I'm now doing consulting, training, etc. on Docker & Kubernetes
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
(check out [container.training](https://container.training/)!)
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
- I'll show you how to deploy a complete CI/CD pipeline on LKE!
|
||||
|
||||
- At the end of every half-day, there will be a mini-homework
|
||||
|
||||
(optional, but recommended)
|
||||
(Linode Kubernetes Engine 😎)
|
||||
|
||||
@@ -42,7 +42,7 @@ def insertslide(markdown, title):
|
||||
|
||||
before = markdown[:slide_position]
|
||||
|
||||
toclink = "toc-part-{}".format(title2part[title])
|
||||
toclink = "toc-module-{}".format(title2path[title][0])
|
||||
_titles_ = [""] + all_titles + [""]
|
||||
currentindex = _titles_.index(title)
|
||||
previouslink = anchor(_titles_[currentindex-1])
|
||||
@@ -54,7 +54,7 @@ def insertslide(markdown, title):
|
||||
|
||||
class: pic
|
||||
|
||||
.interstitial[]
|
||||
.interstitial[]
|
||||
|
||||
---
|
||||
|
||||
@@ -64,11 +64,11 @@ class: title
|
||||
{title}
|
||||
|
||||
.nav[
|
||||
[Previous part](#{previouslink})
|
||||
[Previous section](#{previouslink})
|
||||
|
|
||||
[Back to table of contents](#{toclink})
|
||||
|
|
||||
[Next part](#{nextlink})
|
||||
[Next section](#{nextlink})
|
||||
]
|
||||
|
||||
.debug[(automatically generated title slide)]
|
||||
@@ -156,44 +156,43 @@ def generatefromyaml(manifest, filename):
|
||||
return html
|
||||
|
||||
|
||||
# Maps a title (the string just after "^# ") to its position in the TOC
|
||||
# (to which part it belongs).
|
||||
title2part = {}
|
||||
# Maps a section title (the string just after "^# ") to its position
|
||||
# in the table of content (as a (module,part,subpart,...) tuple).
|
||||
title2path = {}
|
||||
all_titles = []
|
||||
|
||||
# Generate the table of contents for a tree of titles.
|
||||
# "tree" is a list of titles, potentially nested.
|
||||
# Each entry is either:
|
||||
# - a title (then it's a top-level section that doesn't show up in the TOC)
|
||||
# - a list (then it's a part that will show up in the TOC on its own slide)
|
||||
# In a list, we can have:
|
||||
# - titles (simple entry)
|
||||
# - further lists (they are then flattened; we don't represent subsubparts)
|
||||
def gentoc(tree):
|
||||
# First, remove the top-level sections that don't show up in the TOC.
|
||||
tree = [ entry for entry in tree if type(entry)==list ]
|
||||
# Then, flatten the sublists.
|
||||
tree = [ list(flatten(entry)) for entry in tree ]
|
||||
# Now, process each part.
|
||||
parts = []
|
||||
for i, part in enumerate(tree):
|
||||
slide = "name: toc-part-{}\n\n".format(i+1)
|
||||
if len(tree) == 1:
|
||||
slide += "## Table of contents\n\n"
|
||||
def gentoc(tree, path=()):
|
||||
if not tree:
|
||||
return ""
|
||||
if isinstance(tree, str):
|
||||
logging.debug("Path {} Title {}".format(path, tree))
|
||||
title = tree
|
||||
title2path[title] = path
|
||||
all_titles.append(title)
|
||||
return "- [{}](#{})".format(title, anchor(title))
|
||||
if isinstance(tree, list):
|
||||
# If there is only one sub-element, give it index zero.
|
||||
# Otherwise, elements will have indices 1-to-N.
|
||||
offset = 0 if len(tree) == 1 else 1
|
||||
logging.debug(
|
||||
"Path {} Tree [...({} sub-elements)]"
|
||||
.format(path, len(tree)))
|
||||
if len(path) == 0:
|
||||
return "\n---\n".join(gentoc(subtree, path+(i+offset,)) for (i,subtree) in enumerate(tree))
|
||||
elif len(path) == 1:
|
||||
# If there is only one module, don't show "Module 1" but just "TOC"
|
||||
if path[0] == 0:
|
||||
label = "Table of contents"
|
||||
else:
|
||||
label = "Module {}".format(path[0])
|
||||
moduleslide = "name: toc-module-{n}\n\n## {label}\n\n".format(n=path[0], label=label)
|
||||
for (i,subtree) in enumerate(tree):
|
||||
moduleslide += gentoc(subtree, path+(i+offset,)) + "\n\n"
|
||||
moduleslide += ".debug[(auto-generated TOC)]"
|
||||
return moduleslide
|
||||
else:
|
||||
slide += "## Part {}\n\n".format(i+1)
|
||||
for title in part:
|
||||
logging.debug("Generating TOC, part {}, title {}.".format(i+1, title))
|
||||
title2part[title] = i+1
|
||||
all_titles.append(title)
|
||||
slide += "- [{}](#{})\n".format(title, anchor(title))
|
||||
# If we don't have too many subparts, add some space to breathe.
|
||||
# (Otherwise, we display the titles smooched together.)
|
||||
if len(part) < 10:
|
||||
slide += "\n"
|
||||
slide += "\n.debug[(auto-generated TOC)]"
|
||||
parts.append(slide)
|
||||
return "\n---\n".join(parts)
|
||||
return "\n\n".join(gentoc(subtree, path+(i+offset,)) for (i,subtree) in enumerate(tree))
|
||||
|
||||
|
||||
# Arguments:
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
|
||||
- Typos? Mistakes? Questions? Feel free to hover over the bottom of the slide ...
|
||||
|
||||
.footnote[👇 Try it! The source file will be shown and you can view it on GitHub and fork and edit it.]
|
||||
.footnote[.emoji[👇] Try it! The source file will be shown and you can view it on GitHub and fork and edit it.]
|
||||
|
||||
<!--
|
||||
.exercise[
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
|
||||
- Some of our favorites:
|
||||
|
||||
🤔✔️👍🏻👍🏼👍🏽👍🏾👍🏿⚠️🛑
|
||||
.emoji[🤔✔️👍🏻👍🏼👍🏽👍🏾👍🏿⚠️🛑]
|
||||
|
||||
- During the session, we'll often ask audience participation questions
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
|
||||
- Some of our favorites:
|
||||
|
||||
🤔✔️👍🏻👍🏼👍🏽👍🏾👍🏿⚠️🛑
|
||||
.emoji[🤔✔️👍🏻👍🏼👍🏽👍🏾👍🏿⚠️🛑]
|
||||
|
||||
- During the session, we'll often ask audience participation questions
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## Pre-requirements
|
||||
# Pre-requirements
|
||||
|
||||
- Be comfortable with the UNIX command line
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Our sample application
|
||||
|
||||
- We will clone the GitHub repository onto our `node1`
|
||||
- I'm going to run our demo app locally, with Docker
|
||||
|
||||
- The repository also contains scripts and tools that we will use through the workshop
|
||||
(you don't have to do that; do it if you like!)
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -15,7 +15,7 @@ fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Clone the repository on `node1`:
|
||||
- Clone the repository:
|
||||
```bash
|
||||
git clone https://@@GITREPO@@
|
||||
```
|
||||
@@ -34,7 +34,7 @@ Let's start this before we look around, as downloading will take a little time..
|
||||
|
||||
- Go to the `dockercoins` directory, in the cloned repo:
|
||||
```bash
|
||||
cd ~/container.training/dockercoins
|
||||
cd container.training/dockercoins
|
||||
```
|
||||
|
||||
- Use Compose to build and run all containers:
|
||||
@@ -58,7 +58,7 @@ and displays aggregated logs.
|
||||
|
||||
--
|
||||
|
||||
- It is a DockerCoin miner! 💰🐳📦🚢
|
||||
- It is a DockerCoin miner! .emoji[💰🐳📦🚢]
|
||||
|
||||
--
|
||||
|
||||
|
||||
@@ -6,6 +6,6 @@ Thank you!
|
||||
|
||||
class: title, in-person
|
||||
|
||||
That's all, folks! <br/> Questions?
|
||||
That's all, folks! <br/> Thank you ✨
|
||||
|
||||

|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-auto
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/gui.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,70 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-manual
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
#- swarm/hostingregistry.md
|
||||
#- swarm/testingregistry.md
|
||||
#- swarm/btp-manual.md
|
||||
#- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/ipsec.md
|
||||
#- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
#- swarm/secrets.md
|
||||
#- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
#- swarm/stateful.md
|
||||
#- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,79 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,74 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
#- swarm/logging.md
|
||||
#- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -205,7 +205,7 @@ ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||
|
||||
--
|
||||
|
||||
- Don't panic, we can easily see it again 😏
|
||||
- Don't panic, we can easily see it again .emoji[😏]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
--
|
||||
|
||||
.footnote[🐳 Did you know that кит means "whale" in Russian?]
|
||||
.footnote[.emoji[🐳] Did you know that кит means "whale" in Russian?]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -10,6 +10,10 @@
|
||||
font-size: 25px !important;
|
||||
}
|
||||
|
||||
.emoji {
|
||||
font-family: 'EmojiOne Color';
|
||||
}
|
||||
|
||||
h1, h2, h3, h4, h5, h6 {
|
||||
font-family: 'Droid Serif';
|
||||
font-weight: bold;
|
||||
@@ -25,7 +29,7 @@ code {
|
||||
code.remark-code {
|
||||
font-size: 100%;
|
||||
}
|
||||
.exercise ul li code.remark-code.hljs.bash {
|
||||
.x-exercise ul li code.remark-code.hljs.bash {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
@@ -94,31 +98,7 @@ span.footnote {
|
||||
.underline { text-decoration: underline; }
|
||||
.strike { text-decoration: line-through; }
|
||||
|
||||
/*
|
||||
The pic class is tricky.
|
||||
|
||||
It is used to display full screen pictures (for isntance, diagrams).
|
||||
We want the picture to take as much space as possible on the slide.
|
||||
|
||||
But we have two problems here:
|
||||
- some pictures are taller than wide, others are wider than tall;
|
||||
- some pictures are displayed by themselves, others with a title.
|
||||
|
||||
This makes it particularly difficult to resize the pictures. If we
|
||||
set an absolute width or height, it won't work for both taller and
|
||||
wider pictures. If we set an absolute height, it won't work with
|
||||
both pics-with-title and pics-by-themselves.
|
||||
|
||||
Perhaps it would be a good idea to replace the pic class with two
|
||||
different classes, like pic-with-title and pic-by-itself; but in
|
||||
the meantime, we'll check if the <img> tag is within the first <p>
|
||||
in the slide to try and guess if the pic is by itself, or with
|
||||
a title. It won't be 100% bulletproof but it should work for
|
||||
our slides.
|
||||
|
||||
The pixel dimensions in the classes below correspond to the base
|
||||
scaler dimensions of remark (see scaler.js in remark source code).
|
||||
*/
|
||||
/* On pic slides, zoom images as big as possible */
|
||||
div.pic {
|
||||
padding: 0;
|
||||
vertical-align: middle;
|
||||
@@ -129,12 +109,17 @@ div.pic p {
|
||||
div.pic img {
|
||||
display: block;
|
||||
margin: auto;
|
||||
/*
|
||||
"pic" class slides should have a single, full screen picture.
|
||||
We used to have these attributes below but they prevented
|
||||
pictures from taking up the whole slide. Replacing them with
|
||||
100%/100% seems to put the pictures full screen, but I've left
|
||||
these old attributes here just in case.
|
||||
max-width: 1210px;
|
||||
max-height: 550px;
|
||||
}
|
||||
div.pic p:first-child img {
|
||||
max-width: 1210px;
|
||||
max-height: 681px;
|
||||
*/
|
||||
max-width: 100%;
|
||||
max-height: 100%;
|
||||
}
|
||||
div.pic h1, div.pic h2, div.title h1, div.title h2 {
|
||||
text-align: center;
|
||||
@@ -145,7 +130,7 @@ div.title img {
|
||||
display: block;
|
||||
margin: auto;
|
||||
max-width: 1210px;
|
||||
max-height: 420px; /* Arbitrary value to have some space for the title */
|
||||
max-height: 420px; /* Arbitrary value to have so space for the title */
|
||||
}
|
||||
div.title {
|
||||
vertical-align: middle;
|
||||
@@ -192,7 +177,7 @@ div img {
|
||||
background-repeat: no-repeat;
|
||||
background-position: left;
|
||||
}
|
||||
.exercise {
|
||||
.x-exercise {
|
||||
background-color: #eee;
|
||||
background-image: url("images/keyboard.png");
|
||||
background-size: 1.4em;
|
||||
@@ -200,7 +185,7 @@ div img {
|
||||
background-position: 0.2em 0.2em;
|
||||
border: 2px dotted black;
|
||||
}
|
||||
.exercise:before {
|
||||
.x-exercise:before {
|
||||
content: "Exercise";
|
||||
margin-left: 1.8em;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user