Compare commits

..

10 Commits

Author SHA1 Message Date
Jerome Petazzoni
e6016d6a90 Last logistics update 2018-06-05 00:49:54 -05:00
Jerome Petazzoni
9ee6ff9a4a Merge branch 'master' into juin2018 2018-06-04 08:47:40 -05:00
Jerome Petazzoni
7ef6308067 Add k8s wordsmith exercise 2018-06-04 06:56:35 -05:00
Jerome Petazzoni
cd160387b4 Merge decks in a single 1000+ slides one 2018-06-04 06:52:31 -05:00
Jerome Petazzoni
4ebc84341f Merge branch 'master' into juin2018 2018-06-04 06:11:14 -05:00
Jerome Petazzoni
6865aaa21b Merge branch 'master' into juin2018 2018-06-04 05:43:41 -05:00
Jerome Petazzoni
c16a45e2be Add instructions for multistage exercise 2018-06-04 05:40:25 -05:00
Jerome Petazzoni
64c735f456 Update kube TOC 2018-06-03 16:26:34 -05:00
Jerome Petazzoni
674d24c440 Assemble intro TOC 2018-06-03 16:15:57 -05:00
Jerome Petazzoni
867ca714f5 cards.html en français 2018-06-03 15:40:06 -05:00
148 changed files with 522 additions and 8378 deletions

2
.gitignore vendored
View File

@@ -8,6 +8,4 @@ prepare-vms/settings.yaml
prepare-vms/tags
slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html
slides/past.html
node_modules

View File

@@ -28,5 +28,5 @@ def rng(how_many_bytes):
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80, threaded=False)
app.run(host="0.0.0.0", port=80)

View File

@@ -1,62 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
template:
metadata:
labels:
app: consul
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.2.2"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=consul-0.consul.$(NAMESPACE).svc.cluster.local"
- "-retry-join=consul-1.consul.$(NAMESPACE).svc.cluster.local"
- "-retry-join=consul-2.consul.$(NAMESPACE).svc.cluster.local"
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave

View File

@@ -1,28 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: build-image
spec:
restartPolicy: OnFailure
containers:
- name: docker-build
image: docker
env:
- name: REGISTRY_PORT
value: #"30000"
command: ["sh", "-c"]
args:
- |
apk add --no-cache git &&
mkdir /workspace &&
git clone https://github.com/jpetazzo/container.training /workspace &&
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
docker push localhost:$REGISTRY_PORT/worker
volumeMounts:
- name: docker-socket
mountPath: /var/run/docker.sock
volumes:
- name: docker-socket
hostPath:
path: /var/run/docker.sock

View File

@@ -1,222 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: fluentd
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd
roleRef:
kind: ClusterRole
name: fluentd
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: fluentd
namespace: default
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd
labels:
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
template:
metadata:
labels:
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch"
- name: FLUENT_ELASTICSEARCH_PORT
value: "9200"
- name: FLUENT_ELASTICSEARCH_SCHEME
value: "http"
# X-Pack Authentication
# =====================
- name: FLUENT_ELASTICSEARCH_USER
value: "elastic"
- name: FLUENT_ELASTICSEARCH_PASSWORD
value: "changeme"
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
run: elasticsearch
name: elasticsearch
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
run: elasticsearch
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
run: elasticsearch
spec:
containers:
- image: elasticsearch:5.6.8
imagePullPolicy: IfNotPresent
name: elasticsearch
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
run: elasticsearch
name: elasticsearch
selfLink: /api/v1/namespaces/default/services/elasticsearch
spec:
ports:
- port: 9200
protocol: TCP
targetPort: 9200
selector:
run: elasticsearch
sessionAffinity: None
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
run: kibana
name: kibana
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
run: kibana
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
run: kibana
spec:
containers:
- env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch:9200/
image: kibana:5.6.8
imagePullPolicy: Always
name: kibana
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
run: kibana
name: kibana
selfLink: /api/v1/namespaces/default/services/kibana
spec:
externalTrafficPolicy: Cluster
ports:
- port: 5601
protocol: TCP
targetPort: 5601
selector:
run: kibana
sessionAffinity: None
type: NodePort

View File

@@ -1,14 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@@ -1,18 +0,0 @@
global
daemon
maxconn 256
defaults
mode tcp
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
frontend the-frontend
bind *:80
default_backend the-backend
backend the-backend
server google.com-80 google.com:80 maxconn 32 check
server bing.com-80 bing.com:80 maxconn 32 check

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: haproxy
spec:
volumes:
- name: config
configMap:
name: haproxy
containers:
- name: haproxy
image: haproxy
volumeMounts:
- name: config
mountPath: /usr/local/etc/haproxy/

View File

@@ -1,14 +0,0 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cheddar
spec:
rules:
- host: cheddar.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: cheddar
servicePort: 80

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kaniko-build
spec:
initContainers:
- name: git-clone
image: alpine
command: ["sh", "-c"]
args:
- |
apk add --no-cache git &&
git clone git://github.com/jpetazzo/container.training /workspace
volumeMounts:
- name: workspace
mountPath: /workspace
containers:
- name: build-image
image: gcr.io/kaniko-project/executor:latest
args:
- "--context=/workspace/dockercoins/rng"
- "--skip-tls-verify"
- "--destination=registry:5000/rng-kaniko:latest"
volumeMounts:
- name: workspace
mountPath: /workspace
volumes:
- name: workspace

View File

@@ -1,167 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard

View File

@@ -1,14 +0,0 @@
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-testcurl-for-testweb
spec:
podSelector:
matchLabels:
run: testweb
ingress:
- from:
- podSelector:
matchLabels:
run: testcurl

View File

@@ -1,10 +0,0 @@
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-all-for-testweb
spec:
podSelector:
matchLabels:
run: testweb
ingress: []

View File

@@ -1,22 +0,0 @@
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector:
matchLabels:
ingress:
- from:
- podSelector: {}
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-webui
spec:
podSelector:
matchLabels:
run: webui
ingress:
- from: []

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-volume
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
- name: git
image: alpine
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/
restartPolicy: OnFailure

View File

@@ -1,580 +0,0 @@
# SOURCE: https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop0&c=px-workshop&stork=true&lh=true
apiVersion: v1
kind: ConfigMap
metadata:
name: stork-config
namespace: kube-system
data:
policy.cfg: |-
{
"kind": "Policy",
"apiVersion": "v1",
"extenders": [
{
"urlPrefix": "http://stork-service.kube-system.svc:8099",
"apiVersion": "v1beta1",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 5,
"enableHttps": false,
"nodeCacheCapable": false
}
]
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: stork-account
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: stork-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "delete"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
resources: ["volumesnapshotdatas"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "update"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["*"]
resources: ["deployments", "deployments/extensions"]
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
- apiGroups: ["*"]
resources: ["statefulsets", "statefulsets/extensions"]
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: stork-role-binding
subjects:
- kind: ServiceAccount
name: stork-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: stork-role
apiGroup: rbac.authorization.k8s.io
---
kind: Service
apiVersion: v1
metadata:
name: stork-service
namespace: kube-system
spec:
selector:
name: stork
ports:
- protocol: TCP
port: 8099
targetPort: 8099
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
tier: control-plane
name: stork
namespace: kube-system
spec:
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
replicas: 3
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
name: stork
tier: control-plane
spec:
containers:
- command:
- /stork
- --driver=pxd
- --verbose
- --leader-elect=true
- --health-monitor-interval=120
imagePullPolicy: Always
image: openstorage/stork:1.1.3
resources:
requests:
cpu: '0.1'
name: stork
hostPID: false
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "name"
operator: In
values:
- stork
topologyKey: "kubernetes.io/hostname"
serviceAccountName: stork-account
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: stork-snapshot-sc
provisioner: stork-snapshot
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: stork-scheduler-account
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: stork-scheduler-role
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch", "update"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["create"]
- apiGroups: [""]
resourceNames: ["kube-scheduler"]
resources: ["endpoints"]
verbs: ["delete", "get", "patch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["delete", "get", "list", "watch"]
- apiGroups: [""]
resources: ["bindings", "pods/binding"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/status"]
verbs: ["patch", "update"]
- apiGroups: [""]
resources: ["replicationcontrollers", "services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["app", "extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims", "persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: stork-scheduler-role-binding
subjects:
- kind: ServiceAccount
name: stork-scheduler-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: stork-scheduler-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
component: scheduler
tier: control-plane
name: stork-scheduler
name: stork-scheduler
namespace: kube-system
spec:
replicas: 3
template:
metadata:
labels:
component: scheduler
tier: control-plane
name: stork-scheduler
spec:
containers:
- command:
- /usr/local/bin/kube-scheduler
- --address=0.0.0.0
- --leader-elect=true
- --scheduler-name=stork
- --policy-configmap=stork-config
- --policy-configmap-namespace=kube-system
- --lock-object-name=stork-scheduler
image: gcr.io/google_containers/kube-scheduler-amd64:v1.11.2
livenessProbe:
httpGet:
path: /healthz
port: 10251
initialDelaySeconds: 15
name: stork-scheduler
readinessProbe:
httpGet:
path: /healthz
port: 10251
resources:
requests:
cpu: '0.1'
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "name"
operator: In
values:
- stork-scheduler
topologyKey: "kubernetes.io/hostname"
hostPID: false
serviceAccountName: stork-scheduler-account
---
kind: Service
apiVersion: v1
metadata:
name: portworx-service
namespace: kube-system
labels:
name: portworx
spec:
selector:
name: portworx
ports:
- name: px-api
protocol: TCP
port: 9001
targetPort: 9001
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: px-account
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-get-put-list-role
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch", "get", "update", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["delete", "get", "list"]
- apiGroups: [""]
resources: ["persistentvolumeclaims", "persistentvolumes"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "update", "create"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["privileged"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-role-binding
subjects:
- kind: ServiceAccount
name: px-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: node-get-put-list-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Namespace
metadata:
name: portworx
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-role
namespace: portworx
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-role-binding
namespace: portworx
subjects:
- kind: ServiceAccount
name: px-account
namespace: kube-system
roleRef:
kind: Role
name: px-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: portworx
namespace: kube-system
annotations:
portworx.com/install-source: "https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop0&c=px-workshop&stork=true&lh=true"
spec:
minReadySeconds: 0
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
name: portworx
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: px/enabled
operator: NotIn
values:
- "false"
- key: node-role.kubernetes.io/master
operator: DoesNotExist
hostNetwork: true
hostPID: false
containers:
- name: portworx
image: portworx/oci-monitor:1.4.2.2
imagePullPolicy: Always
args:
["-c", "px-workshop", "-s", "/dev/loop0", "-b",
"-x", "kubernetes"]
env:
- name: "PX_TEMPLATE_VERSION"
value: "v4"
livenessProbe:
periodSeconds: 30
initialDelaySeconds: 840 # allow image pull in slow networks
httpGet:
host: 127.0.0.1
path: /status
port: 9001
readinessProbe:
periodSeconds: 10
httpGet:
host: 127.0.0.1
path: /health
port: 9015
terminationMessagePath: "/tmp/px-termination-log"
securityContext:
privileged: true
volumeMounts:
- name: dockersock
mountPath: /var/run/docker.sock
- name: etcpwx
mountPath: /etc/pwx
- name: optpwx
mountPath: /opt/pwx
- name: proc1nsmount
mountPath: /host_proc/1/ns
- name: sysdmount
mountPath: /etc/systemd/system
- name: diagsdump
mountPath: /var/cores
- name: journalmount1
mountPath: /var/run/log
readOnly: true
- name: journalmount2
mountPath: /var/log
readOnly: true
- name: dbusmount
mountPath: /var/run/dbus
restartPolicy: Always
serviceAccountName: px-account
volumes:
- name: dockersock
hostPath:
path: /var/run/docker.sock
- name: etcpwx
hostPath:
path: /etc/pwx
- name: optpwx
hostPath:
path: /opt/pwx
- name: proc1nsmount
hostPath:
path: /proc/1/ns
- name: sysdmount
hostPath:
path: /etc/systemd/system
- name: diagsdump
hostPath:
path: /var/cores
- name: journalmount1
hostPath:
path: /var/run/log
- name: journalmount2
hostPath:
path: /var/log
- name: dbusmount
hostPath:
path: /var/run/dbus
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: px-lh-account
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-lh-role
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "update"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-lh-role-binding
namespace: kube-system
subjects:
- kind: ServiceAccount
name: px-lh-account
namespace: kube-system
roleRef:
kind: Role
name: px-lh-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: px-lighthouse
namespace: kube-system
labels:
tier: px-web-console
spec:
type: NodePort
ports:
- name: http
port: 80
nodePort: 32678
- name: https
port: 443
nodePort: 32679
selector:
tier: px-web-console
---
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: px-lighthouse
namespace: kube-system
labels:
tier: px-web-console
spec:
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
tier: px-web-console
replicas: 1
template:
metadata:
labels:
tier: px-web-console
spec:
initContainers:
- name: config-init
image: portworx/lh-config-sync:0.2
imagePullPolicy: Always
args:
- "init"
volumeMounts:
- name: config
mountPath: /config/lh
containers:
- name: px-lighthouse
image: portworx/px-lighthouse:1.5.0
imagePullPolicy: Always
ports:
- containerPort: 80
- containerPort: 443
volumeMounts:
- name: config
mountPath: /config/lh
- name: config-sync
image: portworx/lh-config-sync:0.2
imagePullPolicy: Always
args:
- "sync"
volumeMounts:
- name: config
mountPath: /config/lh
serviceAccountName: px-lh-account
volumes:
- name: config
emptyDir: {}

View File

@@ -1,30 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
spec:
selector:
matchLabels:
app: postgres
serviceName: postgres
template:
metadata:
labels:
app: postgres
spec:
schedulerName: stork
containers:
- name: postgres
image: postgres:10.5
volumeMounts:
- mountPath: /var/lib/postgresql
name: postgres
volumeClaimTemplates:
- metadata:
name: postgres
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: registry
spec:
containers:
- name: registry
image: registry
env:
- name: REGISTRY_HTTP_ADDR
valueFrom:
configMapKeyRef:
name: registry
key: http.addr

View File

@@ -1,67 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
creationTimestamp: null
generation: 1
labels:
run: socat
name: socat
namespace: kube-system
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
spec:
replicas: 1
selector:
matchLabels:
run: socat
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
run: socat
spec:
containers:
- args:
- sh
- -c
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard:443,verify=0
image: alpine
imagePullPolicy: Always
name: socat
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
run: socat
name: socat
namespace: kube-system
selfLink: /api/v1/namespaces/kube-system/services/socat
spec:
externalTrafficPolicy: Cluster
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
run: socat
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}

View File

@@ -1,11 +0,0 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: portworx-replicated
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/portworx-volume
parameters:
repl: "2"
priority_io: "high"

View File

@@ -1,100 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -93,7 +93,7 @@ wrap Run this program in a container
- The `./workshopctl` script can be executed directly.
- It will run locally if all its dependencies are fulfilled; otherwise it will run in the Docker container you created with `docker-compose build` (preparevms_prepare-vms).
- During `start` it will add your default local SSH key to all instances under the `ubuntu` user.
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file.
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. For now, this is hard coded.
### Example Steps to Launch a Batch of AWS Instances for a Workshop

View File

@@ -1,17 +1,13 @@
{# Feel free to customize or override anything in there! #}
{%- set url = "http://container.training/" -%}
{%- set url = "juin2018.container.training" -%}
{%- set pagesize = 12 -%}
{%- if clustersize == 1 -%}
{%- set workshop_name = "Docker workshop" -%}
{%- set cluster_or_machine = "machine" -%}
{%- set this_or_each = "this" -%}
{%- set machine_is_or_machines_are = "machine is" -%}
{%- set cluster_or_machine = "votre VM" -%}
{%- set machine_is_or_machines_are = "Votre VM" -%}
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
{%- else -%}
{%- set workshop_name = "orchestration workshop" -%}
{%- set cluster_or_machine = "cluster" -%}
{%- set this_or_each = "each" -%}
{%- set machine_is_or_machines_are = "machines are" -%}
{%- set cluster_or_machine = "votre cluster" -%}
{%- set machine_is_or_machines_are = "Votre cluster" -%}
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
{%- set image_src = image_src_swarm -%}
@@ -75,9 +71,9 @@ img {
<div>
<p>
Here is the connection information to your very own
{{ cluster_or_machine }} for this {{ workshop_name }}.
You can connect to {{ this_or_each }} VM with any SSH client.
Voici les informations pour vous connecter à
{{ cluster_or_machine }} pour cette formation.
Vous pouvez vous connecter avec n'importe quel client SSH.
</p>
<p>
<img src="{{ image_src }}" />
@@ -85,19 +81,19 @@ img {
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
<tr><td class="logpass">training</td></tr>
</table>
</p>
<p>
Your {{ machine_is_or_machines_are }}:
{{ machine_is_or_machines_are }}:
<table>
{% for node in cluster %}
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
{% endfor %}
</table>
</p>
<p>You can find the slides at:
<p>Les slides sont à l'adresse suivante :
<center>{{ url }}</center>
</p>
</div>

View File

@@ -48,7 +48,7 @@ _cmd_cards() {
rm -f ips.html ips.pdf
# This will generate two files in the base dir: ips.pdf and ips.html
lib/ips-txt-to-html.py $SETTINGS
python lib/ips-txt-to-html.py $SETTINGS
for f in ips.html ips.pdf; do
# Remove old versions of cards if they exist
@@ -168,22 +168,6 @@ _cmd_kube() {
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
fi"
# Install stern
pssh "
if [ ! -x /usr/local/bin/stern ]; then
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
sudo chmod +x /usr/local/bin/stern
stern --completion bash | sudo tee /etc/bash_completion.d/stern
fi"
# Install helm
pssh "
if [ ! -x /usr/local/bin/helm ]; then
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash
helm completion bash | sudo tee /etc/bash_completion.d/helm
fi"
sep "Done"
}

View File

@@ -13,7 +13,6 @@ COMPOSE_VERSION = config["compose_version"]
MACHINE_VERSION = config["machine_version"]
CLUSTER_SIZE = config["clustersize"]
ENGINE_VERSION = config["engine_version"]
DOCKER_USER_PASSWORD = config["docker_user_password"]
#################################
@@ -55,9 +54,9 @@ system("curl --silent {} > /tmp/ipv4".format(ipv4_retrieval_endpoint))
ipv4 = open("/tmp/ipv4").read()
# Add a "docker" user with password coming from the settings
# Add a "docker" user with password "training"
system("id docker || sudo useradd -d /home/docker -m -s /bin/bash docker")
system("echo docker:{} | sudo chpasswd".format(DOCKER_USER_PASSWORD))
system("echo docker:training | sudo chpasswd")
# Fancy prompt courtesy of @soulshake.
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL

View File

@@ -22,6 +22,3 @@ engine_version: test
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.18.0
machine_version: 0.13.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -7,7 +7,7 @@ clustersize: 1
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: Letter
paper_size: A4
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
@@ -20,8 +20,5 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.22.0
machine_version: 0.15.0
# Password used to connect with the "docker user"
docker_user_password: training
compose_version: 1.21.1
machine_version: 0.14.0

View File

@@ -85,7 +85,7 @@ img {
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
<tr><td class="logpass">training</td></tr>
</table>
</p>

View File

@@ -22,6 +22,3 @@ engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -0,0 +1,24 @@
# This file is passed by trainer-cli to scripts/ips-txt-to-html.py
# Number of VMs per cluster
clustersize: 5
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: A4
# Feel free to reduce this if your printer can handle it
paper_margin: 0.2in
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
# If you print (or generate a PDF) using ips.html, they will be ignored.
# (The equivalent parameters must be set from the browser's print dialog.)
# This can be "test" or "stable"
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
machine_version: 0.14.0

View File

@@ -22,6 +22,3 @@ engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -1 +1 @@
/ /weka.yml.html 200!
/ /deck.yml.html

View File

@@ -29,10 +29,6 @@ class State(object):
self.interactive = True
self.verify_status = False
self.simulate_type = True
self.switch_desktop = False
self.sync_slides = False
self.open_links = False
self.run_hidden = True
self.slide = 1
self.snippet = 0
@@ -41,10 +37,6 @@ class State(object):
self.interactive = bool(data["interactive"])
self.verify_status = bool(data["verify_status"])
self.simulate_type = bool(data["simulate_type"])
self.switch_desktop = bool(data["switch_desktop"])
self.sync_slides = bool(data["sync_slides"])
self.open_links = bool(data["open_links"])
self.run_hidden = bool(data["run_hidden"])
self.slide = int(data["slide"])
self.snippet = int(data["snippet"])
@@ -54,10 +46,6 @@ class State(object):
interactive=self.interactive,
verify_status=self.verify_status,
simulate_type=self.simulate_type,
switch_desktop=self.switch_desktop,
sync_slides=self.sync_slides,
open_links=self.open_links,
run_hidden=self.run_hidden,
slide=self.slide,
snippet=self.snippet,
), f, default_flow_style=False)
@@ -134,20 +122,14 @@ class Slide(object):
def focus_slides():
if not state.switch_desktop:
return
subprocess.check_output(["i3-msg", "workspace", "3"])
subprocess.check_output(["i3-msg", "workspace", "1"])
def focus_terminal():
if not state.switch_desktop:
return
subprocess.check_output(["i3-msg", "workspace", "2"])
subprocess.check_output(["i3-msg", "workspace", "1"])
def focus_browser():
if not state.switch_desktop:
return
subprocess.check_output(["i3-msg", "workspace", "4"])
subprocess.check_output(["i3-msg", "workspace", "1"])
@@ -325,21 +307,17 @@ while True:
slide = slides[state.slide]
snippet = slide.snippets[state.snippet-1] if state.snippet else None
click.clear()
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}] "
"[switch_desktop:{}] [sync_slides:{}] [open_links:{}] [run_hidden:{}]"
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}]"
.format(state.slide, len(slides)-1,
state.snippet, len(slide.snippets) if slide.snippets else 0,
state.simulate_type, state.verify_status,
state.switch_desktop, state.sync_slides,
state.open_links, state.run_hidden))
state.simulate_type, state.verify_status))
print(hrule())
if snippet:
print(slide.content.replace(snippet.content, ansi(7)(snippet.content)))
focus_terminal()
else:
print(slide.content)
if state.sync_slides:
subprocess.check_output(["./gotoslide.js", str(slide.number)])
subprocess.check_output(["./gotoslide.js", str(slide.number)])
focus_slides()
print(hrule())
if state.interactive:
@@ -348,10 +326,6 @@ while True:
print("n/→ Next")
print("s Simulate keystrokes")
print("v Validate exit status")
print("d Switch desktop")
print("k Sync slides")
print("o Open links")
print("h Run hidden commands")
print("g Go to a specific slide")
print("q Quit")
print("c Continue non-interactively until next error")
@@ -367,14 +341,6 @@ while True:
state.simulate_type = not state.simulate_type
elif command == "v":
state.verify_status = not state.verify_status
elif command == "d":
state.switch_desktop = not state.switch_desktop
elif command == "k":
state.sync_slides = not state.sync_slides
elif command == "o":
state.open_links = not state.open_links
elif command == "h":
state.run_hidden = not state.run_hidden
elif command == "g":
state.slide = click.prompt("Enter slide number", type=int)
state.snippet = 0
@@ -400,7 +366,7 @@ while True:
logging.info("Running with method {}: {}".format(method, data))
if method == "keys":
send_keys(data)
elif method == "bash" or (method == "hide" and state.run_hidden):
elif method == "bash":
# Make sure that we're ready
wait_for_prompt()
# Strip leading spaces
@@ -439,12 +405,11 @@ while True:
screen = capture_pane()
url = data.replace("/node1", "/{}".format(IPADDR))
# This should probably be adapted to run on different OS
if state.open_links:
subprocess.check_output(["xdg-open", url])
focus_browser()
if state.interactive:
print("Press any key to continue to next step...")
click.getchar()
subprocess.check_output(["xdg-open", url])
focus_browser()
if state.interactive:
print("Press any key to continue to next step...")
click.getchar()
else:
logging.warning("Unknown method {}: {!r}".format(method, data))
move_forward()

View File

@@ -1 +0,0 @@
click

View File

@@ -1,8 +1,6 @@
#!/bin/sh
set -e
case "$1" in
once)
./index.py
for YAML in *.yml; do
./markmaker.py $YAML > $YAML.html || {
rm $YAML.html
@@ -17,7 +15,6 @@ once)
;;
forever)
set +e
# check if entr is installed
if ! command -v entr >/dev/null; then
echo >&2 "First install 'entr' with apt, brew, etc."

View File

@@ -35,7 +35,7 @@ class: extra-details
- This slide has a little magnifying glass in the top left corner
- This magnifying glass indicates slides that provide extra details
- This magnifiying glass indicates slides that provide extra details
- Feel free to skip them if:

View File

@@ -1,14 +1,9 @@
# Pre-requirements
# Orchestration
- Be comfortable with the UNIX command line
- Now that we have learned some container knowledge,
we can get started with orchestration!
- navigating directories
- editing files
- a little bit of bash-fu (environment variables, loops)
- Some Docker knowledge
- Note: all that is needed to follow along the orchestration part is some *basic* Docker knowledge, i.e.:
- `docker run`, `docker ps`, `docker build`
@@ -36,7 +31,7 @@ Misattributed to Benjamin Franklin
## Hands-on sections
- The whole workshop is hands-on
- Of course, we have tons of exercises and hands-on labs
- We are going to build, ship, and run containers!
@@ -78,7 +73,9 @@ class: in-person
- They'll remain up for the duration of the workshop
- You should have a little card with login+password+IP addresses
- You should have **another** little card with login+password+IP addresses
(But that one has 5 nodes instead of only 1)
- You can automatically SSH from one VM to another
@@ -189,9 +186,7 @@ done
```bash
if which kubectl; then
kubectl get deploy,ds -o name | xargs -rn1 kubectl delete
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete --ignore-not-found=true
kubectl -n kube-system get deploy,svc -o name | grep -v dns | xargs -rn1 kubectl -n kube-system delete
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete
fi
```
-->
@@ -214,7 +209,7 @@ If anything goes wrong — ask for help!
- Use something like
[Play-With-Docker](http://play-with-docker.com/) or
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
[Play-With-Kubernetes](https://medium.com/@marcosnils/introducing-pwk-play-with-k8s-159fcfeb787b)
Zero setup effort; but environment are short-lived and
might have limited resources

View File

@@ -8,9 +8,8 @@
<!--
```bash
cd ~
if [ -d container.training ]; then
mv container.training container.training.$RANDOM
mv container.training container.training.$$
fi
```
-->

View File

@@ -51,7 +51,7 @@ for line in open(sys.argv[1]):
state.show()
for chapter in sorted(state.chapters, key=lambda f: int(f.split("-")[1])):
for chapter in sorted(state.chapters):
chapter_size = sum(state.sections[s] for s in state.chapters[chapter])
print("{}\t{}\t{}".format("total size for", chapter, chapter_size))

114
slides/deck.yml Normal file
View File

@@ -0,0 +1,114 @@
title: |
Introduction
to Containers
and Orchestration
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/jpetazzo/training-20180605-montpellier)"
gitrepo: github.com/jpetazzo/container.training
slides: http://juin2018.container.training/
exclude:
- self-paced
chapters:
- common/title.md
- logistics.md
- intro/intro.md
- common/about-slides.md
- common/toc.md
- - intro/Docker_Overview.md
- intro/Docker_History.md
- intro/Training_Environment.md
- intro/Installing_Docker.md
- intro/First_Containers.md
- intro/Background_Containers.md
- intro/Start_And_Attach.md
- - intro/Initial_Images.md
- intro/Building_Images_Interactively.md
- intro/Building_Images_With_Dockerfiles.md
- intro/Cmd_And_Entrypoint.md
- intro/Copying_Files_During_Build.md
- - |
# Exercise — writing Dockerfiles
Let's write Dockerfiles for an existing application!
The code is at: https://bitbucket.org/jgarrouste/k8s-wordsmith-exo/src/master/
- intro/Multi_Stage_Builds.md
- intro/Publishing_To_Docker_Hub.md
- intro/Dockerfile_Tips.md
- |
# Exercise — writing better Dockerfiles
Let's update our Dockerfiles to leverage multi-stage builds!
The code is at: https://bitbucket.org/jgarrouste/k8s-wordsmith-exo/src/master/
Use a different tag for these images, so that we can compare their sizes.
What's the size difference between single-stage and multi-stage builds?
- - intro/Naming_And_Inspecting.md
- intro/Labels.md
- intro/Getting_Inside.md
- intro/Resource_Limits.md
- - intro/Namespaces_Cgroups.md
- intro/Copy_On_Write.md
#- intro/Containers_From_Scratch.md
- - intro/Container_Networking_Basics.md
- intro/Network_Drivers.md
- intro/Container_Network_Model.md
#- intro/Connecting_Containers_With_Links.md
- intro/Ambassadors.md
- - intro/Local_Development_Workflow.md
- intro/Working_With_Volumes.md
- intro/Compose_For_Dev_Stacks.md
- |
# Exercise — writing a Compose file
Let's write a Compose file for the wordsmith app!
The code is at: https://bitbucket.org/jgarrouste/k8s-wordsmith-exo/src/master/
- - intro/CI_Pipeline.md
- intro/Docker_Machine.md
- intro/Advanced_Dockerfiles.md
- intro/Application_Configuration.md
- intro/Logging.md
- - intro/Container_Engines.md
- intro/Ecosystem.md
- intro/Orchestration_Overview.md
- intro/links.md
- - common/prereqs.md
- kube/versions-k8s.md
- common/sampleapp.md
- common/composescale.md
- common/composedown.md
- kube/concepts-k8s.md
- common/declarative.md
- kube/declarative.md
- kube/kubenet.md
- kube/kubectlget.md
- kube/setup-k8s.md
- - kube/kubectlrun.md
- kube/kubectlexpose.md
- kube/ourapponkube.md
- - kube/dashboard.md
- |
# Exercise — running wordsmith on Kubernetes
Now that we know how to deploy containers on Kubernetes, let's deploy the wordsmith app on our cluster!
The code is at: https://bitbucket.org/jgarrouste/k8s-wordsmith-exo/src/master/
- kube/kubectlscale.md
- kube/daemonset.md
- kube/rollout.md
- - kube/logs-cli.md
- kube/logs-centralized.md
- kube/helm.md
- kube/namespaces.md
- kube/whatsnext.md
- kube/links.md
- common/thankyou.md

View File

@@ -1,59 +0,0 @@
body {
background-image: url("images/container-background.jpg");
max-width: 1024px;
margin: 0 auto;
}
table {
font-size: 20px;
font-family: sans-serif;
background: white;
width: 100%;
height: 100%;
padding: 20px;
}
.header {
font-size: 300%;
font-weight: bold;
}
.title {
font-size: 150%;
font-weight: bold;
}
.details {
font-size: 80%;
font-style: italic;
}
td {
padding: 1px;
height: 1em;
}
td.spacer {
height: unset;
}
td.footer {
padding-top: 80px;
height: 100px;
}
td.title {
border-bottom: thick solid black;
padding-bottom: 2px;
padding-top: 20px;
}
a {
text-decoration: none;
}
a:hover {
background: yellow;
}
a.attend:after {
content: "📅 attend";
}
a.slides:after {
content: "📚 slides";
}
a.chat:after {
content: "💬 chat";
}
a.video:after {
content: "📺 video";
}

View File

@@ -1,146 +0,0 @@
#!/usr/bin/env python2
# coding: utf-8
TEMPLATE="""<html>
<head>
<title>{{ title }}</title>
<link rel="stylesheet" href="index.css">
</head>
<body>
<div class="main">
<table>
<tr><td class="header" colspan="3">{{ title }}</td></tr>
{% if coming_soon %}
<tr><td class="title" colspan="3">Coming soon near you</td></tr>
{% for item in coming_soon %}
<tr>
<td>{{ item.title }}</td>
<td>{% if item.slides %}<a class="slides" href="{{ item.slides }}" />{% endif %}</td>
<td><a class="attend" href="{{ item.attend }}" /></td>
</tr>
<tr>
<td class="details">Scheduled {{ item.prettydate }} at {{ item.event }} in {{item.city }}.</td>
</tr>
{% endfor %}
{% endif %}
{% if past_workshops %}
<tr><td class="title" colspan="3">Past workshops</td></tr>
{% for item in past_workshops[:5] %}
<tr>
<td>{{ item.title }}</td>
<td><a class="slides" href="{{ item.slides }}" /></td>
<td>{% if item.video %}<a class="video" href="{{ item.video }}" />{% endif %}</td>
</tr>
<tr>
<td class="details">Delivered {{ item.prettydate }} at {{ item.event }} in {{item.city }}.</td>
</tr>
{% endfor %}
{% if past_workshops[5:] %}
<tr>
<td>... and at least <a href="past.html">{{ past_workshops[5:] | length }} more</a>.</td>
</tr>
{% endif %}
{% endif %}
{% if recorded_workshops %}
<tr><td class="title" colspan="3">Recorded workshops</td></tr>
{% for item in recorded_workshops %}
<tr>
<td>{{ item.title }}</td>
<td><a class="slides" href="{{ item.slides }}" /></td>
<td><a class="video" href="{{ item.video }}" /></td>
</tr>
<tr>
<td class="details">Delivered {{ item.prettydate }} at {{ item.event }} in {{item.city }}.</td>
</tr>
{% endfor %}
{% endif %}
{% if self_paced %}
<tr><td class="title" colspan="3">Self-paced tutorials</td></tr>
{% for item in self_paced %}
<tr>
<td>{{ item.title }}</td>
<td><a class="slides" href="{{ item.slides }}" /></td>
</tr>
{% endfor %}
{% endif %}
{% if all_past_workshops %}
<tr><td class="title" colspan="3">Past workshops</td></tr>
{% for item in all_past_workshops %}
<tr>
<td>{{ item.title }}</td>
<td><a class="slides" href="{{ item.slides }}" /></td>
{% if item.video %}
<td><a class="video" href="{{ item.video }}" /></td>
{% endif %}
</tr>
<tr>
<td class="details">Delivered {{ item.prettydate }} at {{ item.event }} in {{item.city }}.</td>
</tr>
{% endfor %}
{% endif %}
<tr><td class="spacer"></td></tr>
<tr>
<td class="footer">
Maintained by Jérôme Petazzoni (<a href="https://twitter.com/jpetazzo">@jpetazzo</a>) and <a href="https://github.com/jpetazzo/container.training/graphs/contributors">contributors</a>.
</td>
</tr>
</table>
</div>
</body>
</html>""".decode("utf-8")
import datetime
import jinja2
import yaml
items = yaml.load(open("index.yaml"))
for item in items:
if "date" in item:
date = item["date"]
suffix = {
1: "st", 2: "nd", 3: "rd",
21: "st", 22: "nd", 23: "rd",
31: "st"}.get(date.day, "th")
# %e is a non-standard extension (it displays the day, but without a
# leading zero). If strftime fails with ValueError, try to fall back
# on %d (which displays the day but with a leading zero when needed).
try:
item["prettydate"] = date.strftime("%B %e{}, %Y").format(suffix)
except ValueError:
item["prettydate"] = date.strftime("%B %d{}, %Y").format(suffix)
today = datetime.date.today()
coming_soon = [i for i in items if i.get("date") and i["date"] >= today]
coming_soon.sort(key=lambda i: i["date"])
past_workshops = [i for i in items if i.get("date") and i["date"] < today]
past_workshops.sort(key=lambda i: i["date"], reverse=True)
self_paced = [i for i in items if not i.get("date")]
recorded_workshops = [i for i in items if i.get("video")]
template = jinja2.Template(TEMPLATE)
with open("index.html", "w") as f:
f.write(template.render(
title="Container Training",
coming_soon=coming_soon,
past_workshops=past_workshops,
self_paced=self_paced,
recorded_workshops=recorded_workshops
).encode("utf-8"))
with open("past.html", "w") as f:
f.write(template.render(
title="Container Training",
all_past_workshops=past_workshops
).encode("utf-8"))

View File

@@ -1,420 +0,0 @@
- date: 2018-11-23
city: Copenhagen
country: dk
event: GOTO
title: Build Container Orchestration with Docker Swarm
speaker: bretfisher
attend: https://gotocph.com/2018/workshops/121
- date: 2018-11-08
city: San Francisco, CA
country: us
event: QCON
title: Introduction to Docker and Containers
speaker: jpetazzo
attend: https://qconsf.com/sf2018/workshop/introduction-docker-and-containers
- date: 2018-11-09
city: San Francisco, CA
country: us
event: QCON
title: Getting Started With Kubernetes and Container Orchestration
speaker: jpetazzo
attend: https://qconsf.com/sf2018/workshop/getting-started-kubernetes-and-container-orchestration
- date: 2018-10-31
city: London, UK
country: uk
event: Velocity EU
title: Kubernetes 101
speaker: bridgetkromhout
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71149
- date: 2018-10-30
city: London, UK
country: uk
event: Velocity EU
title: "Docker Zero to Hero: Docker, Compose and Production Swarm"
speaker: bretfisher
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71231
- date: 2018-07-12
city: Minneapolis, MN
country: us
event: devopsdays Minneapolis
title: Kubernetes 101
speaker: "ashleymcnamara, bketelsen"
slides: https://devopsdaysmsp2018.container.training
attend: https://www.devopsdays.org/events/2018-minneapolis/registration/
- date: 2018-10-01
city: New York, NY
country: us
event: Velocity
title: Kubernetes 101
speaker: bridgetkromhout
attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/70102
- date: 2018-09-30
city: New York, NY
country: us
event: Velocity
title: Kubernetes Bootcamp - Deploying and Scaling Microservices
speaker: jpetazzo
attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/69875
- date: 2018-09-30
city: New York, NY
country: us
event: Velocity
title: "Docker Zero to Hero: Docker, Compose and Production Swarm"
speaker: bretfisher
attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/70147
- date: 2018-09-17
country: fr
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
- date: 2018-07-17
city: Portland, OR
country: us
event: OSCON
title: Kubernetes 101
speaker: bridgetkromhout
slides: https://oscon2018.container.training/
attend: https://conferences.oreilly.com/oscon/oscon-or/public/schedule/detail/66287
- date: 2018-06-27
city: Amsterdam
country: nl
event: devopsdays
title: Kubernetes 101
speaker: bridgetkromhout
slides: https://devopsdaysams2018.container.training
attend: https://www.devopsdays.org/events/2018-amsterdam/registration/
- date: 2018-06-12
city: San Jose, CA
country: us
event: Velocity
title: Kubernetes 101
speaker: bridgetkromhout
slides: https://velocitysj2018.container.training
attend: https://conferences.oreilly.com/velocity/vl-ca/public/schedule/detail/66286
- date: 2018-06-12
city: San Jose, CA
country: us
event: Velocity
title: "Kubernetes two-day kickstart: Deploying and Scaling Microservices with Kubernetes"
speaker: "bketelsen, erikstmartin"
slides: http://kubernetes.academy/kube-fullday.yml.html#1
attend: https://conferences.oreilly.com/velocity/vl-ca/public/schedule/detail/66932
- date: 2018-06-11
city: San Jose, CA
country: us
event: Velocity
title: "Kubernetes two-day kickstart: Introduction to Docker and Containers"
speaker: "bketelsen, erikstmartin"
slides: http://kubernetes.academy/intro-fullday.yml.html#1
attend: https://conferences.oreilly.com/velocity/vl-ca/public/schedule/detail/66932
- date: 2018-05-17
city: Virginia Beach, FL
country: us
event: Revolution Conf
title: Docker 101
speaker: bretfisher
slides: https://revconf18.bretfisher.com
- date: 2018-05-10
city: Saint Paul, MN
country: us
event: NDC Minnesota
title: Kubernetes 101
slides: https://ndcminnesota2018.container.training
- date: 2018-05-08
city: Budapest
country: hu
event: CRAFT
title: Swarm Orchestration
slides: https://craftconf18.bretfisher.com
- date: 2018-04-27
city: Chicago, IL
country: us
event: GOTO
title: Swarm Orchestration
slides: https://gotochgo18.bretfisher.com
- date: 2018-04-24
city: Chicago, IL
country: us
event: GOTO
title: Kubernetes 101
slides: http://gotochgo2018.container.training/
- date: 2018-04-11
city: Paris
country: fr
title: Introduction aux conteneurs
lang: fr
slides: https://avril2018.container.training/intro.yml.html
- date: 2018-04-13
city: Paris
country: fr
lang: fr
title: Introduction à l'orchestration
slides: https://avril2018.container.training/kube.yml.html
- date: 2018-04-06
city: Sacramento, CA
country: us
event: MuraCon
title: Docker 101
slides: https://muracon18.bretfisher.com
- date: 2018-03-27
city: Santa Clara, CA
country: us
event: SREcon Americas
title: Kubernetes 101
slides: http://srecon2018.container.training/
- date: 2018-03-27
city: Bergen
country: no
event: Boosterconf
title: Kubernetes 101
slides: http://boosterconf2018.container.training/
- date: 2018-02-22
city: San Francisco, CA
country: us
event: IndexConf
title: Kubernetes 101
slides: http://indexconf2018.container.training/
#attend: https://developer.ibm.com/indexconf/sessions/#!?id=5474
- date: 2017-11-17
city: San Francisco, CA
country: us
event: QCON SF
title: Orchestrating Microservices with Docker Swarm
slides: http://qconsf2017swarm.container.training/
- date: 2017-11-16
city: San Francisco, CA
country: us
event: QCON SF
title: Introduction to Docker and Containers
slides: http://qconsf2017intro.container.training/
video: https://www.youtube.com/playlist?list=PLBAFXs0YjviLgqTum8MkspG_8VzGl6C07
- date: 2017-10-30
city: San Franciso, CA
country: us
event: LISA
title: (M7) Getting Started with Docker and Containers
slides: http://lisa17m7.container.training/
- date: 2017-10-31
city: San Franciso, CA
country: us
event: LISA
title: (T9) Build, Ship, and Run Microservices on a Docker Swarm Cluster
slides: http://lisa17t9.container.training/
- date: 2017-10-26
city: Prague
country: cz
event: Open Source Summit Europe
title: Deploying and scaling microservices with Docker and Kubernetes
slides: http://osseu17.container.training/
video: https://www.youtube.com/playlist?list=PLBAFXs0YjviLrsyydCzxWrIP_1-wkcSHS
- date: 2017-10-16
city: Copenhagen
country: dk
event: DockerCon
title: Swarm from Zero to Hero
slides: http://dc17eu.container.training/
- date: 2017-10-16
city: Copenhagen
country: dk
event: DockerCon
title: Orchestration for Advanced Users
slides: https://www.bretfisher.com/dockercon17eu
- date: 2017-07-25
city: Minneapolis, MN
country: us
event: devopsdays
title: Deploying & Scaling microservices with Docker Swarm
video: https://www.youtube.com/watch?v=DABbqyJeG_E
- date: 2017-06-12
city: Berlin
country: de
event: DevOpsCon
title: Deploying and scaling containerized Microservices with Docker and Swarm
- date: 2017-05-18
city: Portland, OR
country: us
event: PyCon
title: Deploy and scale containers with Docker native, open source orchestration
video: https://www.youtube.com/watch?v=EuzoEaE6Cqs
- date: 2017-05-08
city: Austin, TX
country: us
event: OSCON
title: Deploying and scaling applications in containers with Docker
- date: 2017-05-04
city: Chicago, IL
country: us
event: GOTO
title: Container deployment, scaling, and orchestration with Docker Swarm
- date: 2017-04-17
city: Austin, TX
country: us
event: DockerCon
title: Orchestration Workshop
- date: 2017-03-22
city: San Jose, CA
country: us
event: Devoxx
title: Container deployment, scaling, and orchestration with Docker Swarm
- date: 2017-03-03
city: Pasadena, CA
country: us
event: SCALE
title: Container deployment, scaling, and orchestration with Docker Swarm
- date: 2016-12-06
city: Boston, MA
country: us
event: LISA
title: Deploying and Scaling Applications with Docker Swarm
slides: http://lisa16t1.container.training/
video: https://www.youtube.com/playlist?list=PLBAFXs0YjviIDDhr8vIwCN1wkyNGXjbbc
- date: 2016-10-07
city: Berlin
country: de
event: LinuxCon
title: Orchestrating Containers in Production at Scale with Docker Swarm
- date: 2016-09-20
city: New York, NY
country: us
event: Velocity
title: Deployment and orchestration at scale with Docker
- date: 2016-08-25
city: Toronto
country: ca
event: LinuxCon
title: Orchestrating Containers in Production at Scale with Docker Swarm
- date: 2016-06-22
city: Seattle, WA
country: us
event: DockerCon
title: Orchestration Workshop
- date: 2016-05-29
city: Portland, OR
country: us
event: PyCon
title: Introduction to Docker and containers
slides: https://us.pycon.org/2016/site_media/media/tutorial_handouts/DockerSlides.pdf
video: https://www.youtube.com/watch?v=ZVaRK10HBjo
- date: 2016-05-17
city: Austin, TX
country: us
event: OSCON
title: Deployment and orchestration at scale with Docker Swarm
- date: 2016-04-27
city: Budapest
country: hu
event: CRAFT
title: Advanced Docker concepts and container orchestration
- date: 2016-04-22
city: Berlin
country: de
event: Neofonie
title: Orchestration Workshop
- date: 2016-04-05
city: Stockholm
country: se
event: Praqma
title: Orchestration Workshop
- date: 2016-03-22
city: Munich
country: de
event: Stylight
title: Orchestration Workshop
- date: 2016-03-11
city: London
country: uk
event: QCON
title: Containers in production with Docker Swarm
- date: 2016-02-19
city: Amsterdam
country: nl
event: Container Solutions
title: Orchestration Workshop
- date: 2016-02-15
city: Paris
country: fr
event: Zenika
title: Orchestration Workshop
- date: 2016-01-22
city: Pasadena, CA
country: us
event: SCALE
title: Advanced Docker concepts and container orchestration
#- date: 2015-11-10
# city: Washington DC
# country: us
# event: LISA
# title: Deploying and Scaling Applications with Docker Swarm
#2015-09-24-strangeloop
- title: Introduction to Docker and Containers
slides: intro-selfpaced.yml.html
- title: Container Orchestration with Docker and Swarm
slides: swarm-selfpaced.yml.html
- title: Deploying and Scaling Microservices with Docker and Kubernetes
slides: kube-selfpaced.yml.html

View File

@@ -2,58 +2,82 @@ title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/jpetazzo/training-20180605-montpellier)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
slides: http://juin2018.container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- common/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md
- intro/intro.md
- common/about-slides.md
- common/toc.md
- - intro/Docker_Overview.md
- intro/Docker_History.md
- intro/Training_Environment.md
- intro/Installing_Docker.md
- intro/First_Containers.md
- intro/Background_Containers.md
- intro/Start_And_Attach.md
- - intro/Initial_Images.md
- intro/Building_Images_Interactively.md
- intro/Building_Images_With_Dockerfiles.md
- intro/Cmd_And_Entrypoint.md
- intro/Copying_Files_During_Build.md
- - |
# Exercise — writing Dockerfiles
Let's write Dockerfiles for an existing application!
The code is at: https://bitbucket.org/jgarrouste/k8s-wordsmith-exo/src/master/
- intro/Multi_Stage_Builds.md
- intro/Publishing_To_Docker_Hub.md
- intro/Dockerfile_Tips.md
- |
# Exercise — writing better Dockerfiles
Let's update our Dockerfiles to leverage multi-stage builds!
The code is at: https://bitbucket.org/jgarrouste/k8s-wordsmith-exo/src/master/
Use a different tag for these images, so that we can compare their sizes.
What's the size difference between single-stage and multi-stage builds?
- - intro/Naming_And_Inspecting.md
- intro/Labels.md
- intro/Getting_Inside.md
- intro/Resource_Limits.md
- - intro/Namespaces_Cgroups.md
- intro/Copy_On_Write.md
#- intro/Containers_From_Scratch.md
- - intro/Container_Networking_Basics.md
- intro/Network_Drivers.md
- intro/Container_Network_Model.md
#- intro/Connecting_Containers_With_Links.md
- intro/Ambassadors.md
- - intro/Local_Development_Workflow.md
- intro/Working_With_Volumes.md
- intro/Compose_For_Dev_Stacks.md
- |
# Exercise — writing a Compose file
Let's write a Compose file for the wordsmith app!
The code is at: https://bitbucket.org/jgarrouste/k8s-wordsmith-exo/src/master/
- - intro/CI_Pipeline.md
- intro/Docker_Machine.md
- intro/Advanced_Dockerfiles.md
- intro/Application_Configuration.md
- intro/Logging.md
- - intro/Container_Engines.md
- intro/Ecosystem.md
- intro/Orchestration_Overview.md
- common/thankyou.md
- intro/links.md

View File

@@ -13,47 +13,47 @@ exclude:
- in-person
chapters:
- shared/title.md
# - shared/logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md
- common/title.md
# - common/logistics.md
- intro/intro.md
- common/about-slides.md
- common/toc.md
- - intro/Docker_Overview.md
- intro/Docker_History.md
- intro/Training_Environment.md
- intro/Installing_Docker.md
- intro/First_Containers.md
- intro/Background_Containers.md
- intro/Start_And_Attach.md
- - intro/Initial_Images.md
- intro/Building_Images_Interactively.md
- intro/Building_Images_With_Dockerfiles.md
- intro/Cmd_And_Entrypoint.md
- intro/Copying_Files_During_Build.md
- - intro/Multi_Stage_Builds.md
- intro/Publishing_To_Docker_Hub.md
- intro/Dockerfile_Tips.md
- - intro/Naming_And_Inspecting.md
- intro/Labels.md
- intro/Getting_Inside.md
- - intro/Container_Networking_Basics.md
- intro/Network_Drivers.md
- intro/Container_Network_Model.md
#- intro/Connecting_Containers_With_Links.md
- intro/Ambassadors.md
- - intro/Local_Development_Workflow.md
- intro/Working_With_Volumes.md
- intro/Compose_For_Dev_Stacks.md
- intro/Docker_Machine.md
- - intro/Advanced_Dockerfiles.md
- intro/Application_Configuration.md
- intro/Logging.md
- intro/Resource_Limits.md
- - intro/Namespaces_Cgroups.md
- intro/Copy_On_Write.md
#- intro/Containers_From_Scratch.md
- - intro/Container_Engines.md
- intro/Ecosystem.md
- intro/Orchestration_Overview.md
- common/thankyou.md
- intro/links.md

View File

@@ -355,7 +355,7 @@ class: extra-details
## Overriding the `ENTRYPOINT` instruction
The entry point can be overridden as well.
The entry point can be overriden as well.
```bash
$ docker run -it training/ls

View File

@@ -117,7 +117,7 @@ CONTAINER ID IMAGE ... CREATED STATUS ...
Many Docker commands will work on container IDs: `docker stop`, `docker rm`...
If we want to list only the IDs of our containers (without the other columns
If we want to list only the IDs of our containers (without the other colums
or the header line),
we can use the `-q` ("Quiet", "Quick") flag:

View File

@@ -0,0 +1,3 @@
# Building a CI pipeline
.center[![Demo](images/demo.jpg)]

View File

@@ -98,7 +98,7 @@ $ curl localhost:32768
* We can see that metadata with `docker inspect`:
```bash
$ docker inspect --format '{{.Config.ExposedPorts}}' nginx
$ docker inspect nginx --format {{.Config.ExposedPorts}}
map[80/tcp:{}]
```

View File

@@ -64,7 +64,7 @@ Create this Dockerfile.
## Testing our C program
* Create `hello.c` and `Dockerfile` in the same directory.
* Create `hello.c` and `Dockerfile` in the same direcotry.
* Run `docker build -t hello .` in this directory.

View File

@@ -30,7 +30,7 @@
## Environment variables
- Most of the tools (CLI, libraries...) connecting to the Docker API can use environment variables.
- Most of the tools (CLI, libraries...) connecting to the Docker API can use ennvironment variables.
- These variables are:
@@ -40,7 +40,7 @@
- `DOCKER_CERT_PATH` (path to the keypair and certificate to use for auth)
- `docker-machine env ...` will generate the variables needed to connect to a host.
- `docker-machine env ...` will generate the variables needed to connect to an host.
- `$(eval docker-machine env ...)` sets these variables in the current shell.
@@ -50,7 +50,7 @@
With `docker-machine`, we can:
- upgrade a host to the latest version of the Docker Engine,
- upgrade an host to the latest version of the Docker Engine,
- start/stop/restart hosts,

View File

@@ -312,7 +312,7 @@ CMD gunicorn --bind 0.0.0.0:5000 --workers 10 counter:app
EXPOSE 5000
```
(Source: [trainingwheels Dockerfile](https://github.com/jpetazzo/trainingwheels/blob/master/www/Dockerfile))
(Source: [traininghweels Dockerfile](https://github.com/jpetazzo/trainingwheels/blob/master/www/Dockerfile))
---

View File

@@ -176,7 +176,7 @@ $ docker run -d -v $(pwd):/src -P namer
* `namer` is the name of the image we will run.
* We don't specify a command to run because it is already set in the Dockerfile.
* We don't specify a command to run because is is already set in the Dockerfile.
Note: on Windows, replace `$(pwd)` with `%cd%` (or `${pwd}` if you use PowerShell).

View File

@@ -102,7 +102,7 @@ Cons:
- not very readable
- some unnecessary files might still remain if the cleanup is not thorough
- some unnecessary files might still remain if the cleanup is not torough
- that layer is expensive (slow to build)

View File

@@ -144,7 +144,7 @@ class: extra-details, deep-dive
- Also allows to set the NIS domain.
(If you don't know what a NIS domain is, you don't have to worry about it!)
(If you dont' know what a NIS domain is, you don't have to worry about it!)
- If you're wondering: UTS = UNIX time sharing.

View File

@@ -36,6 +36,10 @@ individual Docker VM.*
- It comes pre-loaded with Docker and some other useful tools.
- **Keep the card with your VM IP address!**
**(We will be in a different room tomorrow.)**
---
## What *is* Docker?

View File

@@ -1,131 +0,0 @@
# Accessing internal services
- When we are logged in on a cluster node, we can access internal services
(by virtue of the Kubernetes network model: all nodes can reach all pods and services)
- When we are accessing a remote cluster, things are different
(generally, our local machine won't have access to the cluster's internal subnet)
- How can we temporarily access a service without exposing it to everyone?
--
- `kubectl proxy`: gives us access to the API, which includes a proxy for HTTP resources
- `kubectl port-forward`: allows forwarding of TCP ports to arbitrary pods, services, ...
---
## Suspension of disbelief
The exercises in this section assume that we have set up `kubectl` on our
local machine in order to access a remote cluster.
We will therefore show how to access services and pods of the remote cluster,
from our local machine.
You can also run these exercises directly on the cluster (if you haven't
installed and set up `kubectl` locally).
Running commands locally will be less useful
(since you could access services and pods directly),
but keep in mind that these commands will work anywhere as long as you have
installed and set up `kubectl` to communicate with your cluster.
---
## `kubectl proxy` in theory
- Running `kubectl proxy` gives us access to the entire Kubernetes API
- The API includes routes to proxy HTTP traffic
- These routes look like the following:
`/api/v1/namespaces/<namespace>/services/<service>/proxy`
- We just add the URI to the end of the request, for instance:
`/api/v1/namespaces/<namespace>/services/<service>/proxy/index.html`
- We can access `services` and `pods` this way
---
## `kubectl proxy` in practice
- Let's access the `webui` service through `kubectl proxy`
.exercise[
- Run an API proxy in the background:
```bash
kubectl proxy &
```
- Access the `webui` service:
```bash
curl localhost:8001/api/v1/namespaces/default/services/webui/proxy/index.html
```
- Terminate the proxy:
```bash
kill %1
```
]
---
## `kubectl port-forward` in theory
- What if we want to access a TCP service?
- We can use `kubectl port-forward` instead
- It will create a TCP relay to forward connections to a specific port
(of a pod, service, deployment...)
- The syntax is:
`kubectl port-forward service/name_of_service local_port:remote_port`
- If only one port number is specified, it is used for both local and remote ports
---
## `kubectl port-forward` in practice
- Let's access our remote Redis server
.exercise[
- Forward connections from local port 10000 to remote port 6379:
```bash
kubectl port-forward svc/redis 10000:6379 &
```
- Connect to the Redis server:
```bash
telnet localhost 10000
```
- Issue a few commands, e.g. `INFO server` then `QUIT`
<!--
```wait Connected to localhost```
```keys INFO server```
```keys ^J```
```keys QUIT```
```keys ^J```
-->
- Terminate the port forwarder:
```bash
kill %1
```
]

View File

@@ -1,533 +0,0 @@
# Authentication and authorization
*And first, a little refresher!*
- Authentication = verifying the identity of a person
On a UNIX system, we can authenticate with login+password, SSH keys ...
- Authorization = listing what they are allowed to do
On a UNIX system, this can include file permissions, sudoer entries ...
- Sometimes abbreviated as "authn" and "authz"
- In good modular systems, these things are decoupled
(so we can e.g. change a password or SSH key without having to reset access rights)
---
## Authentication in Kubernetes
- When the API server receives a request, it tries to authenticate it
(it examines headers, certificates ... anything available)
- Many authentication methods can be used simultaneously:
- TLS client certificates (that's what we've been doing with `kubectl` so far)
- bearer tokens (a secret token in the HTTP headers of the request)
- [HTTP basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication) (carrying user and password in a HTTP header)
- authentication proxy (sitting in front of the API and setting trusted headers)
- It's the job of the authentication method to produce:
- the user name
- the user ID
- a list of groups
- The API server doesn't interpret these; it'll be the job of *authorizers*
---
## Anonymous requests
- If any authentication method *rejects* a request, it's denied
(`401 Unauthorized` HTTP code)
- If a request is neither accepted nor accepted by anyone, it's anonymous
- the user name is `system:anonymous`
- the list of groups is `[system:unauthenticated]`
- By default, the anonymous user can't do anything
(that's what you get if you just `curl` the Kubernetes API)
---
## Authentication with TLS certificates
- This is enabled in most Kubernetes deployments
- The user name is derived from the `CN` in the client certificates
- The groups are derived from the `O` fields in the client certificate
- From the point of view of the Kubernetes API, users do not exist
(i.e. they are not stored in etcd or anywhere else)
- Users can be created (and given membership to groups) independently of the API
- The Kubernetes API can be set up to use your custom CA to validate client certs
---
class: extra-details
## Viewing our admin certificate
- Let's inspect the certificate we've been using all this time!
.exercise[
- This command will show the `CN` and `O` fields for our certificate:
```bash
kubectl config view \
--raw \
-o json \
| jq -r .users[0].user[\"client-certificate-data\"] \
| base64 -d \
| openssl x509 -text \
| grep Subject:
```
]
Let's break down that command together! 😅
---
class: extra-details
## Breaking down the command
- `kubectl config view` shows the Kubernetes user configuration
- `--raw` includes certificate information (which shows as REDACTED otherwise)
- `-o json` outputs the information in JSON format
- `| jq ...` extracts the field with the user certificate (in base64)
- `| base64 -d` decodes the base64 format (now we have a PEM file)
- `| openssl x509 -text` parses the certificate and outputs it as plain text
- `| grep Subject:` shows us the line that interests us
→ We are user `kubernetes-admin`, in group `system:masters`.
---
## Authentication with tokens
- Tokens are passed as HTTP headers:
`Authorization: Bearer and-then-here-comes-the-token`
- Tokens can be validated through a number of different methods:
- static tokens hard-coded in a file on the API server
- [bootstrap tokens](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) (special case to create a cluster or join nodes)
- [OpenID Connect tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens) (to delegate authentication to compatible OAuth2 providers)
- service accounts (these deserve more details, coming right up!)
---
## Service accounts
- A service account is a user that exists in the Kubernetes API
(it is visible with e.g. `kubectl get serviceaccounts`)
- Service accounts can therefore be created / updated dynamically
(they don't require hand-editing a file and restarting the API server)
- A service account is associated with a set of secrets
(the kind that you can view with `kubectl get secrets`)
- Service accounts are generally used to grant permissions to applications, services ...
(as opposed to humans)
---
class: extra-details
## Token authentication in practice
- We are going to list existing service accounts
- Then we will extract the token for a given service account
- And we will use that token to authenticate with the API
---
class: extra-details
## Listing service accounts
.exercise[
- The resource name is `serviceaccount` or `sa` in short:
```bash
kubectl get sa
```
]
There should be just one service account in the default namespace: `default`.
---
class: extra-details
## Finding the secret
.exercise[
- List the secrets for the `default` service account:
```bash
kubectl get sa default -o yaml
SECRET=$(kubectl get sa default -o json | jq -r .secrets[0].name)
```
]
It should be named `default-token-XXXXX`.
---
class: extra-details
## Extracting the token
- The token is stored in the secret, wrapped with base64 encoding
.exercise[
- View the secret:
```bash
kubectl get secret $SECRET -o yaml
```
- Extract the token and decode it:
```bash
TOKEN=$(kubectl get secret $SECRET -o json \
| jq -r .data.token | base64 -d)
```
]
---
class: extra-details
## Using the token
- Let's send a request to the API, without and with the token
.exercise[
- Find the ClusterIP for the `kubernetes` service:
```bash
kubectl get svc kubernetes
API=$(kubectl get svc kubernetes -o json | jq -r .spec.clusterIP)
```
- Connect without the token:
```bash
curl -k https://$API
```
- Connect with the token:
```bash
curl -k -H "Authorization: Bearer $TOKEN" https://$API
```
]
---
class: extra-details
## Results
- In both cases, we will get a "Forbidden" error
- Without authentication, the user is `system:anonymous`
- With authentication, it is shown as `system:serviceaccount:default:default`
- The API "sees" us as a different user
- But neither user has any right, so we can't do nothin'
- Let's change that!
---
## Authorization in Kubernetes
- There are multiple ways to grant permissions in Kubernetes, called [authorizers](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#authorization-modules):
- [Node Authorization](https://kubernetes.io/docs/reference/access-authn-authz/node/) (used internally by kubelet; we can ignore it)
- [Attribute-based access control](https://kubernetes.io/docs/reference/access-authn-authz/abac/) (powerful but complex and static; ignore it too)
- [Webhook](https://kubernetes.io/docs/reference/access-authn-authz/webhook/) (each API request is submitted to an external service for approval)
- [Role-based access control](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) (associates permissions to users dynamically)
- The one we want is the last one, generally abbreviated as RBAC
---
## Role-based access control
- RBAC allows to specify fine-grained permissions
- Permissions are expressed as *rules*
- A rule is a combination of:
- [verbs](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb) like create, get, list, update, delete ...
- resources (as in "API resource", like pods, nodes, services ...)
- resource names (to specify e.g. one specific pod instead of all pods)
- in some case, [subresources](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources) (e.g. logs are subresources of pods)
---
## From rules to roles to rolebindings
- A *role* is an API object containing a list of *rules*
Example: role "external-load-balancer-configurator" can:
- [list, get] resources [endpoints, services, pods]
- [update] resources [services]
- A *rolebinding* associates a role with a user
Example: rolebinding "external-load-balancer-configurator":
- associates user "external-load-balancer-configurator"
- with role "external-load-balancer-configurator"
- Yes, there can be users, roles, and rolebindings with the same name
- It's a good idea for 1-1-1 bindings; not so much for 1-N ones
---
## Cluster-scope permissions
- API resources Role and RoleBinding are for objects within a namespace
- We can also define API resources ClusterRole and ClusterRoleBinding
- These are a superset, allowing to:
- specify actions on cluster-wide objects (like nodes)
- operate across all namespaces
- We can create Role and RoleBinding resources within a namespaces
- ClusterRole and ClusterRoleBinding resources are global
---
## Pods and service accounts
- A pod can be associated to a service account
- by default, it is associated to the `default` service account
- as we've seen earlier, this service account has no permission anyway
- The associated token is exposed into the pod's filesystem
(in `/var/run/secrets/kubernetes.io/serviceaccount/token`)
- Standard Kubernetes tooling (like `kubectl`) will look for it there
- So Kubernetes tools running in a pod will automatically use the service account
---
## In practice
- We are going to create a service account
- We will use an existing cluster role (`view`)
- We will bind together this role and this service account
- Then we will run a pod using that service account
- In this pod, we will install `kubectl` and check our permissions
---
## Creating a service account
- We will call the new service account `viewer`
(note that nothing prevents us from calling it `view`, like the role)
.exercise[
- Create the new service account:
```bash
kubectl create serviceaccount viewer
```
- List service accounts now:
```bash
kubectl get serviceaccounts
```
]
---
## Binding a role to the service account
- Binding a role = creating a *rolebinding* object
- We will call that object `viewercanview`
(but again, we could call it `view`)
.exercise[
- Create the new role binding:
```bash
kubectl create rolebinding viewercanview \
--clusterrole=view \
--serviceaccount=default:viewer
```
]
It's important to note a couple of details in these flags ...
---
## Roles vs Cluster Roles
- We used `--clusterrole=view`
- What would have happened if we had used `--role=view`?
- we would have bound the role `view` from the local namespace
<br/>(instead of the cluster role `view`)
- the command would have worked fine (no error)
- but later, our API requests would have been denied
- This is a deliberate design decision
(we can reference roles that don't exist, and create/update them later)
---
## Users vs Service Accounts
- We used `--serviceaccount=default:viewer`
- What would have happened if we had used `--user=default:viewer`?
- we would have bound the role to a user instead of a service account
- again, the command would have worked fine (no error)
- ... but our API requests would have been denied later
- What's about the `default:` prefix?
- that's the namespace of the service account
- yes, it could be inferred from context, but ... `kubectl` requires it
---
## Testing
- We will run an `alpine` pod and install `kubectl` there
.exercise[
- Run a one-time pod:
```bash
kubectl run eyepod --rm -ti --restart=Never \
--serviceaccount=viewer \
--image alpine
```
- Install `curl`, then use it to install `kubectl`:
```bash
apk add --no-cache curl
URLBASE=https://storage.googleapis.com/kubernetes-release/release
KUBEVER=$(curl -s $URLBASE/stable.txt)
curl -LO $URLBASE/$KUBEVER/bin/linux/amd64/kubectl
chmod +x kubectl
```
]
---
## Running `kubectl` in the pod
- We'll try to use our `view` permissions, then to create an object
.exercise[
- Check that we can, indeed, view things:
```bash
./kubectl get all
```
- But that we can't create things:
```
./kubectl run tryme --image=nginx
```
- Exit the container with `exit` or `^D`
<!-- ```keys ^D``` -->
]
---
## Testing directly with `kubectl`
- We can also check for permission with `kubectl auth can-i`:
```bash
kubectl auth can-i list nodes
kubectl auth can-i create pods
kubectl auth can-i get pod/name-of-pod
kubectl auth can-i get /url-fragment-of-api-request/
kubectl auth can-i '*' services
```
- And we can check permissions on behalf of other users:
```bash
kubectl auth can-i list nodes \
--as some-user
kubectl auth can-i list nodes \
--as system:serviceaccount:<namespace>:<name-of-service-account>
```

View File

@@ -1,161 +0,0 @@
# Building images with the Docker Engine
- Until now, we have built our images manually, directly on a node
- We are going to show how to build images from within the cluster
(by executing code in a container controlled by Kubernetes)
- We are going to use the Docker Engine for that purpose
- To access the Docker Engine, we will mount the Docker socket in our container
- After building the image, we will push it to our self-hosted registry
---
## Resource specification for our builder pod
.small[
```yaml
apiVersion: v1
kind: Pod
metadata:
name: build-image
spec:
restartPolicy: OnFailure
containers:
- name: docker-build
image: docker
env:
- name: REGISTRY_PORT
value: "`3XXXX`"
command: ["sh", "-c"]
args:
- |
apk add --no-cache git &&
mkdir /workspace &&
git clone https://github.com/jpetazzo/container.training /workspace &&
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
docker push localhost:$REGISTRY_PORT/worker
volumeMounts:
- name: docker-socket
mountPath: /var/run/docker.sock
volumes:
- name: docker-socket
hostPath:
path: /var/run/docker.sock
```
]
---
## Breaking down the pod specification (1/2)
- `restartPolicy: OnFailure` prevents the build from running in an infinite lopo
- We use the `docker` image (so that the `docker` CLI is available)
- We rely on the fact that the `docker` image is based on `alpine`
(which is why we use `apk` to install `git`)
- The port for the registry is passed through an environment variable
(this avoids repeating it in the specification, which would be error-prone)
.warning[The environment variable has to be a string, so the `"`s are mandatory!]
---
## Breaking down the pod specification (2/2)
- The volume `docker-socket` is declared with a `hostPath`, indicating a bind-mount
- It is then mounted in the container onto the default Docker socket path
- We show a interesting way to specify the commands to run in the container:
- the command executed will be `sh -c <args>`
- `args` is a list of strings
- `|` is used to pass a multi-line string in the YAML file
---
## Running our pod
- Let's try this out!
.exercise[
- Check the port used by our self-hosted registry:
```bash
kubectl get svc registry
```
- Edit `~/container.training/k8s/docker-build.yaml` to put the port number
- Schedule the pod by applying the resource file:
```bash
kubectl apply -f ~/container.training/k8s/docker-build.yaml
```
- Watch the logs:
```bash
stern build-image
```
<!--
```longwait latest: digest: sha256:```
```keys ^C```
-->
]
---
## What's missing?
What do we need to change to make this production-ready?
- Build from a long-running container (e.g. a `Deployment`) triggered by web hooks
(the payload of the web hook could indicate the repository to build)
- Build a specific branch or tag; tag image accordingly
- Handle repositories where the Dockerfile is not at the root
(or containing multiple Dockerfiles)
- Expose build logs so that troubleshooting is straightforward
--
🤔 That seems like a lot of work!
--
That's why services like Docker Hub (with [automated builds](https://docs.docker.com/docker-hub/builds/)) are helpful.
<br/>
They handle the whole "code repository → Docker image" workflow.
---
## Things to be aware of
- This is talking directly to a node's Docker Engine to build images
- It bypasses resource allocation mechanisms used by Kubernetes
(but you can use *taints* and *tolerations* to dedicate builder nodes)
- Be careful not to introduce conflicts when naming images
(e.g. do not allow the user to specify the image names!)
- Your builds are going to be *fast*
(because they will leverage Docker's caching system)

View File

@@ -1,218 +0,0 @@
# Building images with Kaniko
- [Kaniko](https://github.com/GoogleContainerTools/kaniko) is an open source tool to build container images within Kubernetes
- It can build an image using any standard Dockerfile
- The resulting image can be pushed to a registry or exported as a tarball
- It doesn't require any particular privilege
(and can therefore run in a regular container in a regular pod)
- This combination of features is pretty unique
(most other tools use different formats, or require elevated privileges)
---
## Kaniko in practice
- Kaniko provides an "executor image", `gcr.io/kaniko-project/executor`
- When running that image, we need to specify at least:
- the path to the build context (=the directory with our Dockerfile)
- the target image name (including the registry address)
- Simplified example:
```
docker run \
-v ...:/workspace gcr.io/kaniko-project/executor \
--context=/workspace \
--destination=registry:5000/image_name:image_tag
```
---
## Running Kaniko in a Docker container
- Let's build the image for the DockerCoins `worker` service with Kaniko
.exercise[
- Find the port number for our self-hosted registry:
```bash
kubectl get svc registry
PORT=$(kubectl get svc registry -o json | jq .spec.ports[0].nodePort)
```
- Run Kaniko:
```bash
docker run --net host \
-v ~/container.training/dockercoins/worker:/workspace \
gcr.io/kaniko-project/executor \
--context=/workspace \
--destination=127.0.0.1:$PORT/worker-kaniko:latest
```
]
We use `--net host` so that we can connect to the registry over `127.0.0.1`.
---
## Running Kaniko in a Kubernetes pod
- We need to mount or copy the build context to the pod
- We are going to build straight from the git repository
(to avoid depending on files sitting on a node, outside of containers)
- We need to `git clone` the repository before running Kaniko
- We are going to use two containers sharing a volume:
- a first container to `git clone` the repository to the volume
- a second container to run Kaniko, using the content of the volume
- However, we need the first container to be done before running the second one
🤔 How could we do that?
---
## [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) to the rescue
- A pod can have a list of `initContainers`
- `initContainers` are executed in the specified order
- Each Init Container needs to complete (exit) successfully
- If any Init Container fails (non-zero exit status) the pod fails
(what happens next depends on the pod's `restartPolicy`)
- After all Init Containers have run successfully, normal `containers` are started
- We are going to execute the `git clone` operation in an Init Container
---
## Our Kaniko builder pod
.small[
```yaml
apiVersion: v1
kind: Pod
metadata:
name: kaniko-build
spec:
initContainers:
- name: git-clone
image: alpine
command: ["sh", "-c"]
args:
- |
apk add --no-cache git &&
git clone git://github.com/jpetazzo/container.training /workspace
volumeMounts:
- name: workspace
mountPath: /workspace
containers:
- name: build-image
image: gcr.io/kaniko-project/executor:latest
args:
- "--context=/workspace/dockercoins/rng"
- "--insecure"
- "--destination=registry:5000/rng-kaniko:latest"
volumeMounts:
- name: workspace
mountPath: /workspace
volumes:
- name: workspace
```
]
---
## Explanations
- We define a volume named `workspace` (using the default `emptyDir` provider)
- That volume is mounted to `/workspace` in both our containers
- The `git-clone` Init Container installs `git` and runs `git clone`
- The `build-image` container executes Kaniko
- We use our self-hosted registry DNS name (`registry`)
- We add `--insecure` to use plain HTTP to talk to the registry
---
## Running our Kaniko builder pod
- The YAML for the pod is in `k8s/kaniko-build.yaml`
.exercise[
- Create the pod:
```bash
kubectl apply -f ~/container.training/k8s/kaniko-build.yaml
```
- Watch the logs:
```bash
stern kaniko
```
<!--
```longwait registry:5000/rng-kaniko:latest:```
```keys ^C```
-->
]
---
## Discussion
*What should we use? The Docker build technique shown earlier? Kaniko? Something else?*
- The Docker build technique is simple, and has the potential to be very fast
- However, it doesn't play nice with Kubernetes resource limits
- Kaniko plays nice with resource limits
- However, it's slower (there is no caching at all)
- The ultimate building tool will probably be [Jessica Frazelle](https://twitter.com/jessfraz)'s [img](https://github.com/genuinetools/img) builder
(it depends on upstream changes that are not in Kubernetes 1.11.2 yet)
But ... is it all about [speed](https://github.com/AkihiroSuda/buildbench/issues/1)? (No!)
---
## The big picture
- For starters: the [Docker Hub automated builds](https://docs.docker.com/docker-hub/builds/) are very easy to set up
- link a GitHub repository with the Docker Hub
- each time you push to GitHub, an image gets build on the Docker Hub
- If this doesn't work for you: why?
- too slow (I'm far from `us-east-1`!) → consider using your cloud provider's registry
- I'm not using a cloud provider → ok, perhaps you need to self-host then
- I need fancy features (e.g. CI) → consider something like GitLab

View File

@@ -1,542 +0,0 @@
# Managing configuration
- Some applications need to be configured (obviously!)
- There are many ways for our code to pick up configuration:
- command-line arguments
- environment variables
- configuration files
- configuration servers (getting configuration from a database, an API...)
- ... and more (because programmers can be very creative!)
- How can we do these things with containers and Kubernetes?
---
## Passing configuration to containers
- There are many ways to pass configuration to code running in a container:
- baking it in a custom image
- command-line arguments
- environment variables
- injecting configuration files
- exposing it over the Kubernetes API
- configuration servers
- Let's review these different strategies!
---
## Baking custom images
- Put the configuration in the image
(it can be in a configuration file, but also `ENV` or `CMD` actions)
- It's easy! It's simple!
- Unfortunately, it also has downsides:
- multiplication of images
- different images for dev, staging, prod ...
- minor reconfigurations require a whole build/push/pull cycle
- Avoid doing it unless you don't have the time to figure out other options
---
## Command-line arguments
- Pass options to `args` array in the container specification
- Example ([source](https://github.com/coreos/pods/blob/master/kubernetes.yaml#L29)):
```yaml
args:
- "--data-dir=/var/lib/etcd"
- "--advertise-client-urls=http://127.0.0.1:2379"
- "--listen-client-urls=http://127.0.0.1:2379"
- "--listen-peer-urls=http://127.0.0.1:2380"
- "--name=etcd"
```
- The options can be passed directly to the program that we run ...
... or to a wrapper script that will use them to e.g. generate a config file
---
## Command-line arguments, pros & cons
- Works great when options are passed directly to the running program
(otherwise, a wrapper script can work around the issue)
- Works great when there aren't too many parameters
(to avoid a 20-lines `args` array)
- Requires documentation and/or understanding of the underlying program
("which parameters and flags do I need, again?")
- Well-suited for mandatory parameters (without default values)
- Not ideal when we need to pass a real configuration file anyway
---
## Environment variables
- Pass options through the `env` map in the container specification
- Example:
```yaml
env:
- name: ADMIN_PORT
value: "8080"
- name: ADMIN_AUTH
value: Basic
- name: ADMIN_CRED
value: "admin:0pensesame!"
```
.warning[`value` must be a string! Make sure that numbers and fancy strings are quoted.]
🤔 Why this weird `{name: xxx, value: yyy}` scheme? It will be revealed soon!
---
## The downward API
- In the previous example, environment variables have fixed values
- We can also use a mechanism called the *downward API*
- The downward API allows to expose pod or container information
- either through special files (we won't show that for now)
- or through environment variables
- The value of these environment variables is computed when the container is started
- Remember: environment variables won't (can't) change after container start
- Let's see a few concrete examples!
---
## Exposing the pod's namespace
```yaml
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
```
- Useful to generate FQDN of services
(in some contexts, a short name is not enough)
- For instance, the two commands should be equivalent:
```
curl api-backend
curl api-backend.$MY_POD_NAMESPACE.svc.cluster.local
```
---
## Exposing the pod's IP address
```yaml
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
```
- Useful if we need to know our IP address
(we could also read it from `eth0`, but this is more solid)
---
## Exposing the container's resource limits
```yaml
- name: MY_MEM_LIMIT
valueFrom:
resourceFieldRef:
containerName: test-container
resource: limits.memory
```
- Useful for runtimes where memory is garbage collected
- Example: the JVM
(the memory available to the JVM should be set with the `-Xmx ` flag)
- Best practice: set a memory limit, and pass it to the runtime
(see [this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/) for a detailed example)
---
## More about the downward API
- [This documentation page](https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/) tells more about these environment variables
- And [this one](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) explains the other way to use the downward API
(through files that get created in the container filesystem)
---
## Environment variables, pros and cons
- Works great when the running program expects these variables
- Works great for optional parameters with reasonable defaults
(since the container image can provide these defaults)
- Sort of auto-documented
(we can see which environment variables are defined in the image, and their values)
- Can be (ab)used with longer values ...
- ... You *can* put an entire Tomcat configuration file in an environment ...
- ... But *should* you?
(Do it if you really need to, we're not judging! But we'll see better ways.)
---
## Injecting configuration files
- Sometimes, there is no way around it: we need to inject a full config file
- Kubernetes provides a mechanism for that purpose: `configmaps`
- A configmap is a Kubernetes resource that exists in a namespace
- Conceptually, it's a key/value map
(values are arbitrary strings)
- We can think about them in (at least) two different ways:
- as holding entire configuration file(s)
- as holding individual configuration parameters
*Note: to hold sensitive information, we can use "Secrets", which
are another type of resource behaving very much like configmaps.
We'll cover them just after!*
---
## Configmaps storing entire files
- In this case, each key/value pair corresponds to a configuration file
- Key = name of the file
- Value = content of the file
- There can be one key/value pair, or as many as necessary
(for complex apps with multiple configuration files)
- Examples:
```
# Create a configmap with a single key, "app.conf"
kubectl create configmap my-app-config --from-file=app.conf
# Create a configmap with a single key, "app.conf" but another file
kubectl create configmap my-app-config --from-file=app.conf=app-prod.conf
# Create a configmap with multiple keys (one per file in the config.d directory)
kubectl create configmap my-app-config --from-file=config.d/
```
---
## Configmaps storing individual parameters
- In this case, each key/value pair corresponds to a parameter
- Key = name of the parameter
- Value = value of the parameter
- Examples:
```
# Create a configmap with two keys
kubectl create cm my-app-config \
--from-literal=foreground=red \
--from-literal=background=blue
# Create a configmap from a file containing key=val pairs
kubectl create cm my-app-config \
--from-env-file=app.conf
```
---
## Exposing configmaps to containers
- Configmaps can be exposed as plain files in the filesystem of a container
- this is achieved by declaring a volume and mounting it in the container
- this is particularly effective for configmaps containing whole files
- Configmaps can be exposed as environment variables in the container
- this is achieved with the downward API
- this is particularly effective for configmaps containing individual parameters
- Let's see how to do both!
---
## Passing a configuration file with a configmap
- We will start a load balancer powered by HAProxy
- We will use the [official `haproxy` image](https://hub.docker.com/_/haproxy/)
- It expects to find its configuration in `/usr/local/etc/haproxy/haproxy.cfg`
- We will provide a simple HAproxy configuration, `k8s/haproxy.cfg`
- It listens on port 80, and load balances connections between Google and Bing
---
## Creating the configmap
.exercise[
- Go to the `k8s` directory in the repository:
```bash
cd ~/container.training/k8s
```
- Create a configmap named `haproxy` and holding the configuration file:
```bash
kubectl create configmap haproxy --from-file=haproxy.cfg
```
- Check what our configmap looks like:
```bash
kubectl get configmap haproxy -o yaml
```
]
---
## Using the configmap
We are going to use the following pod definition:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: haproxy
spec:
volumes:
- name: config
configMap:
name: haproxy
containers:
- name: haproxy
image: haproxy
volumeMounts:
- name: config
mountPath: /usr/local/etc/haproxy/
```
---
## Using the configmap
- The resource definition from the previous slide is in `k8s/haproxy.yaml`
.exercise[
- Create the HAProxy pod:
```bash
kubectl apply -f ~/container.training/k8s/haproxy.yaml
```
<!-- ```hide kubectl wait pod haproxy --for condition=ready``` -->
- Check the IP address allocated to the pod:
```bash
kubectl get pod haproxy -o wide
IP=$(kubectl get pod haproxy -o json | jq -r .status.podIP)
```
]
---
## Testing our load balancer
- The load balancer will send:
- half of the connections to Google
- the other half to Bing
.exercise[
- Access the load balancer a few times:
```bash
curl -I $IP
curl -I $IP
curl -I $IP
```
]
We should see connections served by Google (look for the `Location` header) and others served by Bing (indicated by the `X-MSEdge-Ref` header).
---
## Exposing configmaps with the downward API
- We are going to run a Docker registry on a custom port
- By default, the registry listens on port 5000
- This can be changed by setting environment variable `REGISTRY_HTTP_ADDR`
- We are going to store the port number in a configmap
- Then we will expose that configmap to a container environment variable
---
## Creating the configmap
.exercise[
- Our configmap will have a single key, `http.addr`:
```bash
kubectl create configmap registry --from-literal=http.addr=0.0.0.0:80
```
- Check our configmap:
```bash
kubectl get configmap registry -o yaml
```
]
---
## Using the configmap
We are going to use the following pod definition:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: registry
spec:
containers:
- name: registry
image: registry
env:
- name: REGISTRY_HTTP_ADDR
valueFrom:
configMapKeyRef:
name: registry
key: http.addr
```
---
## Using the configmap
- The resource definition from the previous slide is in `k8s/registry.yaml`
.exercise[
- Create the registry pod:
```bash
kubectl apply -f ~/container.training/k8s/registry.yaml
```
<!-- ```hide kubectl wait pod registry --for condition=ready``` -->
- Check the IP address allocated to the pod:
```bash
kubectl get pod registry -o wide
IP=$(kubectl get pod registry -o json | jq -r .status.podIP)
```
- Confirm that the registry is available on port 80:
```bash
curl $IP/v2/_catalog
```
]
---
## Passwords, tokens, sensitive information
- For sensitive information, there is another special resource: *Secrets*
- Secrets and Configmaps work almost the same way
(we'll expose the differences on the next slide)
- The *intent* is different, though:
*"You should use secrets for things which are actually secret like API keys,
credentials, etc., and use config map for not-secret configuration data."*
*"In the future there will likely be some differentiators for secrets like rotation or support for backing the secret API w/ HSMs, etc."*
(Source: [the author of both features](https://stackoverflow.com/a/36925553/580281
))
---
## Differences between configmaps and secrets
- Secrets are base64-encoded when shown with `kubectl get secrets -o yaml`
- keep in mind that this is just *encoding*, not *encryption*
- it is very easy to [automatically extract and decode secrets](https://medium.com/@mveritym/decoding-kubernetes-secrets-60deed7a96a3)
- [Secrets can be encrypted at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/)
- With RBAC, we can authorize a user to access configmaps, but not secrets
(since they are two different kinds of resources)

View File

@@ -1,239 +0,0 @@
# Git-based workflows
- Deploying with `kubectl` has downsides:
- we don't know *who* deployed *what* and *when*
- there is no audit trail (except the API server logs)
- there is no easy way to undo most operations
- there is no review/approval process (like for code reviews)
- We have all these things for *code*, though
- Can we manage cluster state like we manage our source code?
---
## Reminder: Kubernetes is *declarative*
- All we do is create/change resources
- These resources have a perfect YAML representation
- All we do is manipulating these YAML representations
(`kubectl run` generates a YAML file that gets applied)
- We can store these YAML representations in a code repository
- We can version that code repository and maintain it with best practices
- define which branch(es) can go to qa/staging/production
- control who can push to which branches
- have formal review processes, pull requests ...
---
## Enabling git-based workflows
- There are a few tools out there to help us do that
- We'll see demos of two of them: [Flux] and [Gitkube]
- There are *many* other tools, some of them with even more features
- There are also *many* integrations with popular CI/CD systems
(e.g.: GitLab, Jenkins, ...)
[Flux]: https://www.weave.works/oss/flux/
[Gitkube]: https://gitkube.sh/
---
## Flux overview
- We put our Kubernetes resources as YAML files in a git repository
- Flux polls that repository regularly (every 5 minutes by default)
- The resources described by the YAML files are created/updated automatically
- Changes are made by updating the code in the repository
---
## Preparing a repository for Flux
- We need a repository with Kubernetes YAML files
- I have one: https://github.com/jpetazzo/kubercoins
- Fork it to your GitHub account
- Create a new branch in your fork; e.g. `prod`
(e.g. by adding a line in the README through the GitHub web UI)
- This is the branch that we are going to use for deployment
---
## Setting up Flux
- Clone the Flux repository:
```
git clone https://github.com/weaveworks/flux
```
- Edit `deploy/flux-deployment.yaml`
- Change the `--git-url` and `--git-branch` parameters:
```yaml
- --git-url=git@github.com:your-git-username/kubercoins
- --git-branch=prod
```
- Apply all the YAML:
```
kubectl apply -f deploy/
```
---
## Allowing Flux to access the repository
- When it starts, Flux generates an SSH key
- Display that key:
```
kubectl get logs deployment flux | grep identity
```
- Then add that key to the repository, giving it **write** access
(some Flux features require write access)
- After a minute or so, DockerCoins will be deployed to the current namespace
---
## Making changes
- Make changes (on the `prod` branch), e.g. change `replicas` in `worker`
- After a few minutes, the changes will be picked up by Flux and applied
---
## Other features
- Flux can keep a list of all the tags of all the images we're running
- The `fluxctl` tool can show us if we're running the latest images
- We can also "automate" a resource (i.e. automatically deploy new images)
- And much more!
---
## Gitkube overview
- We put our Kubernetes resources as YAML files in a git repository
- Gitkube is a git server (or "git remote")
- After making changes to the repository, we push to Gitkube
- Gitkube applies the resources to the cluster
---
## Setting up Gitkube
- Install the CLI:
```
sudo curl -L -o /usr/local/bin/gitkube \
https://github.com/hasura/gitkube/releases/download/v0.2.1/gitkube_linux_amd64
sudo chmod +x /usr/local/bin/gitkube
```
- Install Gitkube on the cluster:
```
gitkube install --expose ClusterIP
```
---
## Creating a Remote
- Gitkube provides a new type of API resource: *Remote*
(this is using a mechanism called Custom Resource Definitions or CRD)
- Create and apply a YAML file containing the following manifest:
```yaml
apiVersion: gitkube.sh/v1alpha1
kind: Remote
metadata:
name: example
spec:
authorizedKeys:
- `ssh-rsa AAA...`
manifests:
path: "."
```
(replace the `ssh-rsa AAA...` section with the content of `~/.ssh/id_rsa.pub`)
---
## Pushing to our remote
- Get the `gitkubed` IP address:
```
kubectl -n kube-system get svc gitkubed
IP=$(kubectl -n kube-system get svc gitkubed -o json |
jq -r .spec.clusterIP)
```
- Get ourselves a sample repository with resource YAML files:
```
git clone git://github.com/jpetazzo/kubercoins
cd kubercoins
```
- Add the remote and push to it:
```
git remote add k8s ssh://default-example@$IP/~/git/default-example
git push k8s master
```
---
## Making changes
- Edit a local file
- Commit
- Push!
- Make sure that you push to the `k8s` remote
---
## Other features
- Gitkube can also build container images for us
(see the [documentation](https://github.com/hasura/gitkube/blob/master/docs/remote.md) for more details)
- Gitkube can also deploy Helm Charts
(instead of raw YAML files)

View File

@@ -1,178 +0,0 @@
# Healthchecks
- Kubernetes provides two kinds of healthchecks: liveness and readiness
- Healthchecks are *probes* that apply to *containers* (not to pods)
- Each container can have two (optional) probes:
- liveness = is this container dead or alive?
- readiness = is this container ready to serve traffic?
- Different probes are available (HTTP, TCP, program execution)
- Let's see the difference and how to use them!
---
## Liveness probe
- Indicates if the container is dead or alive
- A dead container cannot come back to life
- If the liveness probe fails, the container is killed
(to make really sure that it's really dead; no zombies or undeads!)
- What happens next depends on the pod's `restartPolicy`:
- `Never`: the container is not restarted
- `OnFailure` or `Always`: the container is restarted
---
## When to use a liveness probe
- To indicate failures that can't be recovered
- deadlocks (causing all requests to time out)
- internal corruption (causing all requests to error)
- If the liveness probe fails *N* consecutive times, the container is killed
- *N* is the `failureThreshold` (3 by default)
---
## Readiness probe
- Indicates if the container is ready to serve traffic
- If a container becomes "unready" (let's say busy!) it might be ready again soon
- If the readiness probe fails:
- the container is *not* killed
- if the pod is a member of a service, it is temporarily removed
- it is re-added as soon as the readiness probe passes again
---
## When to use a readiness probe
- To indicate temporary failures
- the application can only service *N* parallel connections
- the runtime is busy doing garbage collection or initial data load
- The container is marked as "not ready" after `failureThreshold` failed attempts
(3 by default)
- It is marked again as "ready" after `successThreshold` successful attempts
(1 by default)
---
## Different types of probes
- HTTP request
- specify URL of the request (and optional headers)
- any status code between 200 and 399 indicates success
- TCP connection
- the probe succeeds if the TCP port is open
- arbitrary exec
- a command is executed in the container
- exit status of zero indicates success
---
## Benefits of using probes
- Rolling updates proceed when containers are *actually ready*
(as opposed to merely started)
- Containers in a broken state gets killed and restarted
(instead of serving errors or timeouts)
- Overloaded backends get removed from load balancer rotation
(thus improving response times across the board)
---
## Example: HTTP probe
Here is a pod template for the `rng` web service of the DockerCoins app:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: rng-with-liveness
spec:
containers:
- name: rng
image: dockercoins/rng:v0.1
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 10
periodSeconds: 1
```
If the backend serves an error, or takes longer than 1s, 3 times in a row, it gets killed.
---
## Example: exec probe
Here is a pod template for a Redis server:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: redis-with-liveness
spec:
containers:
- name: redis
image: redis
livenessProbe:
exec:
command: ["redis-cli", "ping"]
```
If the Redis process becomes unresponsive, it will be killed.
---
## Details about liveness and readiness probes
- Probes are executed at intervals of `periodSeconds` (default: 10)
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
- A probe is considered successful after `successThreshold` successes (default: 1)
- A probe is considered failing after `failureThreshold` failures (default: 3)
- If a probe is not defined, it's as if there was an "always successful" probe

View File

@@ -1,524 +0,0 @@
# Exposing HTTP services with Ingress resources
- *Services* give us a way to access a pod or a set of pods
- Services can be exposed to the outside world:
- with type `NodePort` (on a port >30000)
- with type `LoadBalancer` (allocating an external load balancer)
- What about HTTP services?
- how can we expose `webui`, `rng`, `hasher`?
- the Kubernetes dashboard?
- a new version of `webui`?
---
## Exposing HTTP services
- If we use `NodePort` services, clients have to specify port numbers
(i.e. http://xxxxx:31234 instead of just http://xxxxx)
- `LoadBalancer` services are nice, but:
- they are not available in all environments
- they often carry an additional cost (e.g. they provision an ELB)
- they require one extra step for DNS integration
<br/>
(waiting for the `LoadBalancer` to be provisioned; then adding it to DNS)
- We could build our own reverse proxy
---
## Building a custom reverse proxy
- There are many options available:
Apache, HAProxy, Hipache, NGINX, Traefik, ...
(look at [jpetazzo/aiguillage](https://github.com/jpetazzo/aiguillage) for a minimal reverse proxy configuration using NGINX)
- Most of these options require us to update/edit configuration files after each change
- Some of them can pick up virtual hosts and backends from a configuration store
- Wouldn't it be nice if this configuration could be managed with the Kubernetes API?
--
- Enter.red[¹] *Ingress* resources!
.footnote[.red[¹] Pun maybe intended.]
---
## Ingress resources
- Kubernetes API resource (`kubectl get ingress`/`ingresses`/`ing`)
- Designed to expose HTTP services
- Basic features:
- load balancing
- SSL termination
- name-based virtual hosting
- Can also route to different services depending on:
- URI path (e.g. `/api``api-service`, `/static``assets-service`)
- Client headers, including cookies (for A/B testing, canary deployment...)
- and more!
---
## Principle of operation
- Step 1: deploy an *ingress controller*
- ingress controller = load balancer + control loop
- the control loop watches over ingress resources, and configures the LB accordingly
- Step 2: setup DNS
- associate DNS entries with the load balancer address
- Step 3: create *ingress resources*
- the ingress controller picks up these resources and configures the LB
- Step 4: profit!
---
## Ingress in action
- We will deploy the Traefik ingress controller
- this is an arbitrary choice
- maybe motivated by the fact that Traefik releases are named after cheeses
- For DNS, we will use [nip.io](http://nip.io/)
- `*.1.2.3.4.nip.io` resolves to `1.2.3.4`
- We will create ingress resources for various HTTP services
---
## Deploying pods listening on port 80
- We want our ingress load balancer to be available on port 80
- We could do that with a `LoadBalancer` service
... but it requires support from the underlying infrastructure
- We could use pods specifying `hostPort: 80`
... but with most CNI plugins, this [doesn't work or require additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
- We could use a `NodePort` service
... but that requires [changing the `--service-node-port-range` flag in the API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/)
- Last resort: the `hostNetwork` mode
---
## Without `hostNetwork`
- Normally, each pod gets its own *network namespace*
(sometimes called sandbox or network sandbox)
- An IP address is associated to the pod
- This IP address is routed/connected to the cluster network
- All containers of that pod are sharing that network namespace
(and therefore using the same IP address)
---
## With `hostNetwork: true`
- No network namespace gets created
- The pod is using the network namespace of the host
- It "sees" (and can use) the interfaces (and IP addresses) of the host
- The pod can receive outside traffic directly, on any port
- Downside: with most network plugins, network policies won't work for that pod
- most network policies work at the IP address level
- filtering that pod = filtering traffic from the node
---
## Running Traefik
- The [Traefik documentation](https://docs.traefik.io/user-guide/kubernetes/#deploy-trfik-using-a-deployment-or-daemonset) tells us to pick between Deployment and Daemon Set
- We are going to use a Daemon Set so that each node can accept connections
- We will do two minor changes to the [YAML provided by Traefik](https://github.com/containous/traefik/blob/master/examples/k8s/traefik-ds.yaml):
- enable `hostNetwork`
- add a *toleration* so that Traefik also runs on `node1`
---
## Taints and tolerations
- A *taint* is an attribute added to a node
- It prevents pods from running on the node
- ... Unless they have a matching *toleration*
- When deploying with `kubeadm`:
- a taint is placed on the node dedicated the control plane
- the pods running the control plane have a matching toleration
---
class: extra-details
## Checking taints on our nodes
.exercise[
- Check our nodes specs:
```bash
kubectl get node node1 -o json | jq .spec
kubectl get node node2 -o json | jq .spec
```
]
We should see a result only for `node1` (the one with the control plane):
```json
"taints": [
{
"effect": "NoSchedule",
"key": "node-role.kubernetes.io/master"
}
]
```
---
class: extra-details
## Understanding a taint
- The `key` can be interpreted as:
- a reservation for a special set of pods
<br/>
(here, this means "this node is reserved for the control plane")
- an error condition on the node
<br/>
(for instance: "disk full", do not start new pods here!)
- The `effect` can be:
- `NoSchedule` (don't run new pods here)
- `PreferNoSchedule` (try not to run new pods here)
- `NoExecute` (don't run new pods and evict running pods)
---
class: extra-details
## Checking tolerations on the control plane
.exercise[
- Check tolerations for CoreDNS:
```bash
kubectl -n kube-system get deployments coredns -o json |
jq .spec.template.spec.tolerations
```
]
The result should include:
```json
{
"effect": "NoSchedule",
"key": "node-role.kubernetes.io/master"
}
```
It means: "bypass the exact taint that we saw earlier on `node1`."
---
class: extra-details
## Special tolerations
.exercise[
- Check tolerations on `kube-proxy`:
```bash
kubectl -n kube-system get ds kube-proxy -o json |
jq .spec.template.spec.tolerations
```
]
The result should include:
```json
{
"operator": "Exists"
}
```
This one is a special case that means "ignore all taints and run anyway."
---
## Running Traefik on our cluster
- We provide a YAML file (`k8s/traefik.yaml`) which is essentially the sum of:
- [Traefik's Daemon Set resources](https://github.com/containous/traefik/blob/master/examples/k8s/traefik-ds.yaml) (patched with `hostNetwork` and tolerations)
- [Traefik's RBAC rules](https://github.com/containous/traefik/blob/master/examples/k8s/traefik-rbac.yaml) allowing it to watch necessary API objects
.exercise[
- Apply the YAML:
```bash
kubectl apply -f ~/container.training/k8s/traefik.yaml
```
]
---
## Checking that Traefik runs correctly
- If Traefik started correctly, we now have a web server listening on each node
.exercise[
- Check that Traefik is serving 80/tcp:
```bash
curl localhost
```
]
We should get a `404 page not found` error.
This is normal: we haven't provided any ingress rule yet.
---
## Setting up DNS
- To make our lives easier, we will use [nip.io](http://nip.io)
- Check out `http://cheddar.A.B.C.D.nip.io`
(replacing A.B.C.D with the IP address of `node1`)
- We should get the same `404 page not found` error
(meaning that our DNS is "set up properly", so to speak!)
---
## Traefik web UI
- Traefik provides a web dashboard
- With the current install method, it's listening on port 8080
.exercise[
- Go to `http://node1:8080` (replacing `node1` with its IP address)
]
---
## Setting up host-based routing ingress rules
- We are going to use `errm/cheese` images
(there are [3 tags available](https://hub.docker.com/r/errm/cheese/tags/): wensleydale, cheddar, stilton)
- These images contain a simple static HTTP server sending a picture of cheese
- We will run 3 deployments (one for each cheese)
- We will create 3 services (one for each deployment)
- Then we will create 3 ingress rules (one for each service)
- We will route `<name-of-cheese>.A.B.C.D.nip.io` to the corresponding deployment
---
## Running cheesy web servers
.exercise[
- Run all three deployments:
```bash
kubectl run cheddar --image=errm/cheese:cheddar
kubectl run stilton --image=errm/cheese:stilton
kubectl run wensleydale --image=errm/cheese:wensleydale
```
- Create a service for each of them:
```bash
kubectl expose deployment cheddar --port=80
kubectl expose deployment stilton --port=80
kubectl expose deployment wensleydale --port=80
```
]
---
## What does an ingress resource look like?
Here is a minimal host-based ingress resource:
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cheddar
spec:
rules:
- host: cheddar.`A.B.C.D`.nip.io
http:
paths:
- path: /
backend:
serviceName: cheddar
servicePort: 80
```
(It is in `k8s/ingress.yaml`.)
---
## Creating our first ingress resources
.exercise[
- Edit the file `~/container.training/k8s/ingress.yaml`
- Replace A.B.C.D with the IP address of `node1`
- Apply the file
- Open http://cheddar.A.B.C.D.nip.io
]
(An image of a piece of cheese should show up.)
---
## Creating the other ingress resources
.exercise[
- Edit the file `~/container.training/k8s/ingress.yaml`
- Replace `cheddar` with `stilton` (in `name`, `host`, `serviceName`)
- Apply the file
- Check that `stilton.A.B.C.D.nip.io` works correctly
- Repeat for `wensleydale`
]
---
## Using multiple ingress controllers
- You can have multiple ingress controllers active simultaneously
(e.g. Traefik and NGINX)
- You can even have multiple instances of the same controller
(e.g. one for internal, another for external traffic)
- The `kubernetes.io/ingress.class` annotation can be used to tell which one to use
- It's OK if multiple ingress controllers configure the same resource
(it just means that the service will be accessible through multiple paths)
---
## Ingress: the good
- The traffic flows directly from the ingress load balancer to the backends
- it doesn't need to go through the `ClusterIP`
- in fact, we don't even need a `ClusterIP` (we can use a headless service)
- The load balancer can be outside of Kubernetes
(as long as it has access to the cluster subnet)
- This allows to use external (hardware, physical machines...) load balancers
- Annotations can encode special features
(rate-limiting, A/B testing, session stickiness, etc.)
---
## Ingress: the bad
- Aforementioned "special features" are not standardized yet
- Some controllers will support them; some won't
- Even relatively common features (stripping a path prefix) can differ:
- [traefik.ingress.kubernetes.io/rule-type: PathPrefixStrip](https://docs.traefik.io/user-guide/kubernetes/#path-based-routing)
- [ingress.kubernetes.io/rewrite-target: /](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/rewrite)
- This should eventually stabilize
(remember that ingresses are currently `apiVersion: extensions/v1beta1`)

Some files were not shown because too many files have changed in this diff Show More