Compare commits

..

13 Commits

Author SHA1 Message Date
Jerome Petazzoni
08fa37dace fix-redirects.sh: adding forced redirect 2020-04-07 16:57:19 -05:00
Jerome Petazzoni
807028cbf3 Remove WiFi warning 2019-06-13 10:51:44 -05:00
Jerome Petazzoni
dfde597cb9 Merge branch 'master' into sfsf-2019-06 2019-06-13 10:51:22 -05:00
Jerome Petazzoni
96419c6baf test→node 2019-06-12 21:35:12 -05:00
Jerome Petazzoni
12da011f21 Customize logistics etc 2019-06-12 21:13:00 -05:00
Jerome Petazzoni
fa1637fb7e Add Helm charts and reorg content 2019-06-12 21:07:55 -05:00
Jerome Petazzoni
fbe2251e21 Merge remote-tracking branch 'origin/make-chart' into sfsf-2019-06 2019-06-12 21:07:12 -05:00
Jerome Petazzoni
b4faf10581 merge 2019-06-12 16:43:24 -05:00
Jerome Petazzoni
0ef9c87f97 Merge branch 'master' into sfsf-2019-06 2019-06-12 16:04:36 -05:00
Jerome Petazzoni
addd14582a merge 2019-06-09 18:41:04 -05:00
Jerome Petazzoni
5299fbaab5 merge 2019-06-02 19:32:20 -05:00
Jerome Petazzoni
398ff5ee4f merge 2019-06-02 16:48:30 -05:00
Jerome Petazzoni
b883e6d557 Prepare SFSF training 2019-06-02 16:47:53 -05:00
93 changed files with 747 additions and 3289 deletions

1
.gitignore vendored
View File

@@ -3,7 +3,6 @@
*~
prepare-vms/tags
prepare-vms/infra
prepare-vms/www
slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html

View File

@@ -39,7 +39,7 @@ your own tutorials.
All these materials have been gathered in a single repository
because they have a few things in common:
- some [shared slides](slides/shared/) that are re-used
- some [common slides](slides/common/) that are re-used
(and updated) identically between different decks;
- a [build system](slides/) generating HTML slides from
Markdown source files;

View File

@@ -72,7 +72,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.5"
image: "consul:1.4.4"
args:
- "agent"
- "-bootstrap-expect=3"

View File

@@ -1,160 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -32,16 +32,13 @@ subjects:
name: fluentd
namespace: default
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd
labels:
app: fluentd
spec:
selector:
matchLabels:
app: fluentd
template:
metadata:
labels:
@@ -54,7 +51,7 @@ spec:
effect: NoSchedule
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:v1.4-debian-elasticsearch-1
image: fluent/fluentd-kubernetes-daemonset:v1.3-debian-elasticsearch-1
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch"
@@ -89,7 +86,7 @@ spec:
hostPath:
path: /var/lib/docker/containers
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
@@ -131,7 +128,7 @@ spec:
app: elasticsearch
type: ClusterIP
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:

View File

@@ -9,7 +9,7 @@ spec:
name: haproxy
containers:
- name: haproxy
image: haproxy:1
image: haproxy
volumeMounts:
- name: config
mountPath: /usr/local/etc/haproxy/

View File

@@ -1,13 +1,14 @@
apiVersion: networking.k8s.io/v1beta1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: whatever
name: cheddar
spec:
rules:
- host: whatever.A.B.C.D.nip.io
- host: cheddar.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234
serviceName: cheddar
servicePort: 80

View File

@@ -12,6 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
@@ -90,7 +95,7 @@ subjects:
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
@@ -109,13 +114,12 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --enable-skip-login
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
@@ -162,7 +166,7 @@ spec:
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:

View File

@@ -1,5 +1,5 @@
apiVersion: v1
kind: Pod
Kind: Pod
metadata:
name: hello
namespace: default

View File

@@ -12,6 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
@@ -90,7 +95,7 @@ subjects:
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
@@ -109,7 +114,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP

View File

@@ -45,7 +45,7 @@ subjects:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: local-path-provisioner

View File

@@ -58,7 +58,7 @@ metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: metrics-server
@@ -82,7 +82,7 @@ spec:
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
imagePullPolicy: Always
volumeMounts:
- name: tmp-dir

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-without-volume
spec:
containers:
- name: nginx
image: nginx

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-volume
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/

View File

@@ -1,20 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-init
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
initContainers:
- name: git
image: alpine
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/

View File

@@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-git
name: nginx-with-volume
spec:
volumes:
- name: www

View File

@@ -74,7 +74,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.5"
image: "consul:1.4.4"
volumeMounts:
- name: data
mountPath: /consul/data

View File

@@ -1,340 +1,4 @@
# SOURCE: https://install.portworx.com/?kbver=1.15.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true&st=k8s&mc=false
# SOURCE: https://install.portworx.com/?kbver=1.15.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true&st=k8s&mc=false
---
kind: Service
apiVersion: v1
metadata:
name: portworx-service
namespace: kube-system
labels:
name: portworx
spec:
selector:
name: portworx
type: NodePort
ports:
- name: px-api
protocol: TCP
port: 9001
targetPort: 9001
- name: px-kvdb
protocol: TCP
port: 9019
targetPort: 9019
- name: px-sdk
protocol: TCP
port: 9020
targetPort: 9020
- name: px-rest-gateway
protocol: TCP
port: 9021
targetPort: 9021
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: volumeplacementstrategies.portworx.io
spec:
group: portworx.io
versions:
- name: v1beta2
served: true
storage: true
- name: v1beta1
served: false
storage: false
scope: Cluster
names:
plural: volumeplacementstrategies
singular: volumeplacementstrategy
kind: VolumePlacementStrategy
shortNames:
- vps
- vp
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: px-account
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-get-put-list-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch", "get", "update", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["delete", "get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims", "persistentvolumes"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "update", "create"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["privileged"]
verbs: ["use"]
- apiGroups: ["portworx.io"]
resources: ["volumeplacementstrategies"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-role-binding
subjects:
- kind: ServiceAccount
name: px-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: node-get-put-list-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Namespace
metadata:
name: portworx
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-role
namespace: portworx
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-role-binding
namespace: portworx
subjects:
- kind: ServiceAccount
name: px-account
namespace: kube-system
roleRef:
kind: Role
name: px-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: portworx
namespace: kube-system
annotations:
portworx.com/install-source: "https://install.portworx.com/?kbver=1.15.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true&st=k8s&mc=false"
spec:
minReadySeconds: 0
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
name: portworx
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: px/enabled
operator: NotIn
values:
- "false"
- key: node-role.kubernetes.io/master
operator: DoesNotExist
hostNetwork: true
hostPID: false
initContainers:
- name: checkloop
image: alpine
command: [ "sh", "-c" ]
args:
- |
if ! grep -q loop4 /proc/partitions; then
echo 'Could not find "loop4" in /proc/partitions. Please create it first.'
exit 1
fi
containers:
- name: portworx
image: portworx/oci-monitor:2.1.3
imagePullPolicy: Always
args:
["-c", "px-workshop", "-s", "/dev/loop4", "-secret_type", "k8s", "-b",
"-x", "kubernetes"]
env:
- name: "AUTO_NODE_RECOVERY_TIMEOUT_IN_SECS"
value: "1500"
- name: "PX_TEMPLATE_VERSION"
value: "v4"
livenessProbe:
periodSeconds: 30
initialDelaySeconds: 840 # allow image pull in slow networks
httpGet:
host: 127.0.0.1
path: /status
port: 9001
readinessProbe:
periodSeconds: 10
httpGet:
host: 127.0.0.1
path: /health
port: 9015
terminationMessagePath: "/tmp/px-termination-log"
securityContext:
privileged: true
volumeMounts:
- name: diagsdump
mountPath: /var/cores
- name: dockersock
mountPath: /var/run/docker.sock
- name: containerdsock
mountPath: /run/containerd
- name: criosock
mountPath: /var/run/crio
- name: crioconf
mountPath: /etc/crictl.yaml
- name: etcpwx
mountPath: /etc/pwx
- name: optpwx
mountPath: /opt/pwx
- name: procmount
mountPath: /host_proc
- name: sysdmount
mountPath: /etc/systemd/system
- name: journalmount1
mountPath: /var/run/log
readOnly: true
- name: journalmount2
mountPath: /var/log
readOnly: true
- name: dbusmount
mountPath: /var/run/dbus
restartPolicy: Always
serviceAccountName: px-account
volumes:
- name: diagsdump
hostPath:
path: /var/cores
- name: dockersock
hostPath:
path: /var/run/docker.sock
- name: containerdsock
hostPath:
path: /run/containerd
- name: criosock
hostPath:
path: /var/run/crio
- name: crioconf
hostPath:
path: /etc/crictl.yaml
type: FileOrCreate
- name: etcpwx
hostPath:
path: /etc/pwx
- name: optpwx
hostPath:
path: /opt/pwx
- name: procmount
hostPath:
path: /proc
- name: sysdmount
hostPath:
path: /etc/systemd/system
- name: journalmount1
hostPath:
path: /var/run/log
- name: journalmount2
hostPath:
path: /var/log
- name: dbusmount
hostPath:
path: /var/run/dbus
---
kind: Service
apiVersion: v1
metadata:
name: portworx-api
namespace: kube-system
labels:
name: portworx-api
spec:
selector:
name: portworx-api
type: NodePort
ports:
- name: px-api
protocol: TCP
port: 9001
targetPort: 9001
- name: px-sdk
protocol: TCP
port: 9020
targetPort: 9020
- name: px-rest-gateway
protocol: TCP
port: 9021
targetPort: 9021
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: portworx-api
namespace: kube-system
spec:
minReadySeconds: 0
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 100%
template:
metadata:
labels:
name: portworx-api
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: px/enabled
operator: NotIn
values:
- "false"
- key: node-role.kubernetes.io/master
operator: DoesNotExist
hostNetwork: true
hostPID: false
containers:
- name: portworx-api
image: k8s.gcr.io/pause:3.1
imagePullPolicy: Always
readinessProbe:
periodSeconds: 10
httpGet:
host: 127.0.0.1
path: /status
port: 9001
restartPolicy: Always
serviceAccountName: px-account
---
# SOURCE: https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true
apiVersion: v1
kind: ConfigMap
metadata:
@@ -347,7 +11,7 @@ data:
"apiVersion": "v1",
"extenders": [
{
"urlPrefix": "http://stork-service.kube-system:8099",
"urlPrefix": "http://stork-service.kube-system.svc:8099",
"apiVersion": "v1beta1",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
@@ -370,8 +34,8 @@ metadata:
name: stork-role
rules:
- apiGroups: [""]
resources: ["pods", "pods/exec"]
verbs: ["get", "list", "delete", "create", "watch"]
resources: ["pods"]
verbs: ["get", "list", "delete"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
@@ -384,14 +48,14 @@ rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["stork.libopenstorage.org"]
resources: ["*"]
verbs: ["get", "list", "watch", "update", "patch", "create", "delete"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "get"]
verbs: ["create", "list", "watch", "delete"]
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
resources: ["volumesnapshots", "volumesnapshotdatas"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
resources: ["volumesnapshotdatas"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
@@ -408,9 +72,6 @@ rules:
- apiGroups: ["*"]
resources: ["statefulsets", "statefulsets/extensions"]
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
- apiGroups: ["*"]
resources: ["*"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@@ -470,10 +131,7 @@ spec:
- --leader-elect=true
- --health-monitor-interval=120
imagePullPolicy: Always
image: openstorage/stork:2.2.4
env:
- name: "PX_SERVICE_NAME"
value: "portworx-api"
image: openstorage/stork:1.1.3
resources:
requests:
cpu: '0.1'
@@ -510,13 +168,16 @@ metadata:
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "create", "update"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch", "update"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["create"]
- apiGroups: [""]
resourceNames: ["kube-scheduler"]
resources: ["endpoints"]
@@ -536,7 +197,7 @@ rules:
- apiGroups: [""]
resources: ["replicationcontrollers", "services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps", "extensions"]
- apiGroups: ["app", "extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
@@ -592,7 +253,7 @@ spec:
- --policy-configmap=stork-config
- --policy-configmap-namespace=kube-system
- --lock-object-name=stork-scheduler
image: gcr.io/google_containers/kube-scheduler-amd64:v1.15.2
image: gcr.io/google_containers/kube-scheduler-amd64:v1.11.2
livenessProbe:
httpGet:
path: /healthz
@@ -619,61 +280,229 @@ spec:
hostPID: false
serviceAccountName: stork-scheduler-account
---
kind: Service
apiVersion: v1
metadata:
name: portworx-service
namespace: kube-system
labels:
name: portworx
spec:
selector:
name: portworx
ports:
- name: px-api
protocol: TCP
port: 9001
targetPort: 9001
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: px-account
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-get-put-list-role
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch", "get", "update", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["delete", "get", "list"]
- apiGroups: [""]
resources: ["persistentvolumeclaims", "persistentvolumes"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "update", "create"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["privileged"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-role-binding
subjects:
- kind: ServiceAccount
name: px-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: node-get-put-list-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Namespace
metadata:
name: portworx
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-role
namespace: portworx
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-role-binding
namespace: portworx
subjects:
- kind: ServiceAccount
name: px-account
namespace: kube-system
roleRef:
kind: Role
name: px-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: portworx
namespace: kube-system
annotations:
portworx.com/install-source: "https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true"
spec:
minReadySeconds: 0
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
name: portworx
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: px/enabled
operator: NotIn
values:
- "false"
- key: node-role.kubernetes.io/master
operator: DoesNotExist
hostNetwork: true
hostPID: false
containers:
- name: portworx
image: portworx/oci-monitor:1.4.2.2
imagePullPolicy: Always
args:
["-c", "px-workshop", "-s", "/dev/loop4", "-b",
"-x", "kubernetes"]
env:
- name: "PX_TEMPLATE_VERSION"
value: "v4"
livenessProbe:
periodSeconds: 30
initialDelaySeconds: 840 # allow image pull in slow networks
httpGet:
host: 127.0.0.1
path: /status
port: 9001
readinessProbe:
periodSeconds: 10
httpGet:
host: 127.0.0.1
path: /health
port: 9015
terminationMessagePath: "/tmp/px-termination-log"
securityContext:
privileged: true
volumeMounts:
- name: dockersock
mountPath: /var/run/docker.sock
- name: etcpwx
mountPath: /etc/pwx
- name: optpwx
mountPath: /opt/pwx
- name: proc1nsmount
mountPath: /host_proc/1/ns
- name: sysdmount
mountPath: /etc/systemd/system
- name: diagsdump
mountPath: /var/cores
- name: journalmount1
mountPath: /var/run/log
readOnly: true
- name: journalmount2
mountPath: /var/log
readOnly: true
- name: dbusmount
mountPath: /var/run/dbus
restartPolicy: Always
serviceAccountName: px-account
volumes:
- name: dockersock
hostPath:
path: /var/run/docker.sock
- name: etcpwx
hostPath:
path: /etc/pwx
- name: optpwx
hostPath:
path: /opt/pwx
- name: proc1nsmount
hostPath:
path: /proc/1/ns
- name: sysdmount
hostPath:
path: /etc/systemd/system
- name: diagsdump
hostPath:
path: /var/cores
- name: journalmount1
hostPath:
path: /var/run/log
- name: journalmount2
hostPath:
path: /var/log
- name: dbusmount
hostPath:
path: /var/run/dbus
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: px-lh-account
namespace: kube-system
---
kind: ClusterRole
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-lh-role
namespace: kube-system
name: px-lh-role
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get"]
- apiGroups:
- extensions
- apps
resources:
- deployments
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["services"]
verbs: ["create", "get", "list", "watch"]
- apiGroups: ["stork.libopenstorage.org"]
resources: ["clusterpairs","migrations","groupvolumesnapshots"]
verbs: ["get", "list", "create", "update", "delete"]
- apiGroups: ["monitoring.coreos.com"]
resources:
- alertmanagers
- prometheuses
- prometheuses/finalizers
- servicemonitors
verbs: ["*"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "update"]
---
kind: ClusterRoleBinding
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-lh-role-binding
namespace: kube-system
subjects:
- kind: ServiceAccount
name: px-lh-account
namespace: kube-system
- kind: ServiceAccount
name: px-lh-account
namespace: kube-system
roleRef:
kind: ClusterRole
kind: Role
name: px-lh-role
apiGroup: rbac.authorization.k8s.io
---
@@ -689,12 +518,14 @@ spec:
ports:
- name: http
port: 80
nodePort: 32678
- name: https
port: 443
nodePort: 32679
selector:
tier: px-web-console
---
apiVersion: apps/v1beta1
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: px-lighthouse
@@ -718,7 +549,7 @@ spec:
spec:
initContainers:
- name: config-init
image: portworx/lh-config-sync:0.4
image: portworx/lh-config-sync:0.2
imagePullPolicy: Always
args:
- "init"
@@ -727,9 +558,8 @@ spec:
mountPath: /config/lh
containers:
- name: px-lighthouse
image: portworx/px-lighthouse:2.0.4
image: portworx/px-lighthouse:1.5.0
imagePullPolicy: Always
args: [ "-kubernetes", "true" ]
ports:
- containerPort: 80
- containerPort: 443
@@ -737,16 +567,13 @@ spec:
- name: config
mountPath: /config/lh
- name: config-sync
image: portworx/lh-config-sync:0.4
image: portworx/lh-config-sync:0.2
imagePullPolicy: Always
args:
- "sync"
volumeMounts:
- name: config
mountPath: /config/lh
- name: stork-connector
image: portworx/lh-stork-connector:0.2
imagePullPolicy: Always
serviceAccountName: px-lh-account
volumes:
- name: config

View File

@@ -15,7 +15,7 @@ spec:
schedulerName: stork
containers:
- name: postgres
image: postgres:11
image: postgres:10.5
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres

View File

@@ -6,16 +6,13 @@ metadata:
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
@@ -29,7 +26,7 @@ spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
- image: traefik
name: traefik-ingress-lb
ports:
- name: http

View File

@@ -7,8 +7,8 @@ workshop.
## 1. Prerequisites
Virtualbox, Vagrant and Ansible
Virtualbox, Vagrant and Ansible
- Virtualbox: https://www.virtualbox.org/wiki/Downloads
@@ -25,7 +25,7 @@ Virtualbox, Vagrant and Ansible
$ git clone --recursive https://github.com/ansible/ansible.git
$ cd ansible
$ git checkout stable-{{ getStableVersionFromAnsibleProject }}
$ git checkout stable-2.0.0.1
$ git submodule update
- source the setup script to make Ansible available on this terminal session:
@@ -38,7 +38,6 @@ Virtualbox, Vagrant and Ansible
## 2. Preparing the environment
Change into directory that has your Vagrantfile
Run the following commands:
@@ -67,14 +66,6 @@ will reflect inside the instance.
- Depending on the Vagrant version, `sudo apt-get install bsdtar` may be needed
- If you get an error like "no Vagrant file found" or you have a file but "cannot open base box" when running `vagrant up`,
chances are good you not in the correct directory.
Make sure you are in sub directory named "prepare-local". It has all the config files required by ansible, vagrant and virtualbox
- If you are using Python 3.7, running the ansible-playbook provisioning, see an error like "SyntaxError: invalid syntax" and it mentions
the word "async", you need to upgrade your Ansible version to 2.6 or higher to resolve the keyword conflict.
https://github.com/ansible/ansible/issues/42105
- If you get strange Ansible errors about dependencies, try to check your pip
version with `pip --version`. The current version is 8.1.1. If your pip is
older than this, upgrade it with `sudo pip install --upgrade pip`, restart

View File

@@ -10,21 +10,15 @@ These tools can help you to create VMs on:
- [Docker](https://docs.docker.com/engine/installation/)
- [Docker Compose](https://docs.docker.com/compose/install/)
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
Depending on the infrastructure that you want to use, you also need to install
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
And if you want to generate printable cards:
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
- [jinja2](https://pypi.python.org/pypi/Jinja2)
You can install them with pip (perhaps with `pip install --user`, or even use `virtualenv` if that's your thing).
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
may have changed your default Python to Python 2.
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
- [jinja2](https://pypi.python.org/pypi/Jinja2) (on a Mac: `brew install jinja2`)
## General Workflow
@@ -93,37 +87,26 @@ You're all set!
```
workshopctl - the orchestration workshop swiss army knife
Commands:
build Build the Docker image to run this program in a container
cards Generate ready-to-print cards for a group of VMs
deploy Install Docker on a bunch of running VMs
disableaddrchecks Disable source/destination IP address checks
disabledocker Stop Docker Engine and don't restart it automatically
helmprom Install Helm and Prometheus
help Show available commands
ids (FIXME) List the instance IDs belonging to a given tag or token
kubebins Install Kubernetes and CNI binaries but don't start anything
kubereset Wipe out Kubernetes configuration on all nodes
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
kubetest Check that all nodes are reporting as Ready
listall List VMs running on all configured infrastructures
list List available groups for a given infrastructure
netfix Disable GRO and run a pinger job on the VMs
opensg Open the default security group to ALL ingress traffic
ping Ping VMs in a given tag, to check that they have network access
pssh Run an arbitrary command on all nodes
pull_images Pre-pull a bunch of Docker images
quotas Check our infrastructure quotas (max instances)
remap_nodeports Remap NodePort range to 10000-10999
retag (FIXME) Apply a new tag to a group of VMs
ssh Open an SSH session to the first node of a tag
start Start a group of VMs
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
tags List groups of VMs known locally
test Run tests (pre-flight checks) on a group of VMs
weavetest Check that weave seems properly setup
webssh Install a WEB SSH server on the machines (port 1080)
wrap Run this program in a container
www Run a web server to access card HTML and PDF
ami Show the AMI that will be used for deployment
amis List Ubuntu AMIs in the current region
build Build the Docker image to run this program in a container
cards Generate ready-to-print cards for a group of VMs
deploy Install Docker on a bunch of running VMs
ec2quotas Check our EC2 quotas (max instances)
help Show available commands
ids List the instance IDs belonging to a given tag or token
ips List the IP addresses of the VMs for a given tag or token
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
kubetest Check that all notes are reporting as Ready
list List available groups in the current region
opensg Open the default security group to ALL ingress traffic
pull_images Pre-pull a bunch of Docker images
retag Apply a new tag to a group of VMs
start Start a group of VMs
status List instance status for a given group
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
test Run tests (pre-flight checks) on a group of VMs
wrap Run this program in a container
```
### Summary of What `./workshopctl` Does For You
@@ -262,32 +245,3 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
- Don't write to bash history in system() in postprep
- compose, etc version inconsistent (int vs str)
## Making sure Python3 is the default (Mac only)
Check the `/usr/local/bin/python` symlink. It should be pointing to
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
instructions.
1) Verify that Python 3 is installed.
```
ls -la /usr/local/Cellar/Python
```
You should see one or more versions of Python 3. If you don't,
install it with `brew install python`.
2) Verify that `python` points to Python3.
```
ls -la /usr/local/bin/python
```
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
```
rm /usr/local/bin/python
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
# where xxxx is the most recent Python 3 version you saw above
```

View File

@@ -33,14 +33,9 @@ _cmd_cards() {
../../lib/ips-txt-to-html.py settings.yaml
)
ln -sf ../tags/$TAG/ips.html www/$TAG.html
ln -sf ../tags/$TAG/ips.pdf www/$TAG.pdf
info "Cards created. You can view them with:"
info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)"
info "open tags/$TAG/ips.html (on macOS)"
info "Or you can start a web server with:"
info "$0 www"
}
_cmd deploy "Install Docker on a bunch of running VMs"
@@ -127,11 +122,11 @@ _cmd_kubebins() {
set -e
cd /usr/local/bin
if ! [ -x etcd ]; then
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.15/etcd-v3.3.15-linux-amd64.tar.gz \
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz \
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
fi
if ! [ -x hyperkube ]; then
curl -L https://dl.k8s.io/v1.16.2/kubernetes-server-linux-amd64.tar.gz \
curl -L https://dl.k8s.io/v1.14.1/kubernetes-server-linux-amd64.tar.gz \
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
fi
if ! [ -x kubelet ]; then
@@ -143,7 +138,7 @@ _cmd_kubebins() {
sudo mkdir -p /opt/cni/bin
cd /opt/cni/bin
if ! [ -x bridge ]; then
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.6/cni-plugins-amd64-v0.7.6.tgz \
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz \
| sudo tar -zx
fi
"
@@ -157,10 +152,10 @@ _cmd_kube() {
# Optional version, e.g. 1.13.5
KUBEVERSION=$2
if [ "$KUBEVERSION" ]; then
EXTRA_APTGET="=$KUBEVERSION-00"
EXTRA_KUBELET="=$KUBEVERSION-00"
EXTRA_KUBEADM="--kubernetes-version=v$KUBEVERSION"
else
EXTRA_APTGET=""
EXTRA_KUBELET=""
EXTRA_KUBEADM=""
fi
@@ -172,7 +167,7 @@ _cmd_kube() {
sudo tee /etc/apt/sources.list.d/kubernetes.list"
pssh --timeout 200 "
sudo apt-get update -q &&
sudo apt-get install -qy kubelet$EXTRA_APTGET kubeadm$EXTRA_APTGET kubectl$EXTRA_APTGET &&
sudo apt-get install -qy kubelet$EXTRA_KUBELET kubeadm kubectl &&
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
# Initialize kube master
@@ -234,7 +229,7 @@ EOF"
pssh "
if [ ! -x /usr/local/bin/stern ]; then
##VERSION##
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64 &&
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64 &&
sudo chmod +x /usr/local/bin/stern &&
stern --completion bash | sudo tee /etc/bash_completion.d/stern
fi"
@@ -323,14 +318,6 @@ _cmd_listall() {
done
}
_cmd ping "Ping VMs in a given tag, to check that they have network access"
_cmd_ping() {
TAG=$1
need_tag
fping < tags/$TAG/ips.txt
}
_cmd netfix "Disable GRO and run a pinger job on the VMs"
_cmd_netfix () {
TAG=$1
@@ -386,20 +373,6 @@ _cmd_pull_images() {
pull_tag
}
_cmd remap_nodeports "Remap NodePort range to 10000-10999"
_cmd_remap_nodeports() {
TAG=$1
need_tag
FIND_LINE=" - --service-cluster-ip-range=10.96.0.0\/12"
ADD_LINE=" - --service-node-port-range=10000-10999"
MANIFEST_FILE=/etc/kubernetes/manifests/kube-apiserver.yaml
pssh "
if i_am_first_node && ! grep -q '$ADD_LINE' $MANIFEST_FILE; then
sudo sed -i 's/\($FIND_LINE\)\$/\1\n$ADD_LINE/' $MANIFEST_FILE
fi"
}
_cmd quotas "Check our infrastructure quotas (max instances)"
_cmd_quotas() {
need_infra $1
@@ -555,50 +528,6 @@ _cmd_weavetest() {
sh -c \"./weave --local status | grep Connections | grep -q ' 1 failed' || ! echo POD \""
}
_cmd webssh "Install a WEB SSH server on the machines (port 1080)"
_cmd_webssh() {
TAG=$1
need_tag
pssh "
sudo apt-get update &&
sudo apt-get install python-tornado python-paramiko -y"
pssh "
[ -d webssh ] || git clone https://github.com/jpetazzo/webssh"
pssh "
for KEYFILE in /etc/ssh/*.pub; do
read a b c < \$KEYFILE; echo localhost \$a \$b
done > webssh/known_hosts"
pssh "cat >webssh.service <<EOF
[Unit]
Description=webssh
[Install]
WantedBy=multi-user.target
[Service]
WorkingDirectory=/home/ubuntu/webssh
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
User=nobody
Group=nogroup
Restart=always
EOF"
pssh "
sudo systemctl enable \$PWD/webssh.service &&
sudo systemctl start webssh.service"
}
_cmd www "Run a web server to access card HTML and PDF"
_cmd_www() {
cd www
IPADDR=$(curl -sL canihazip.com/s)
info "The following files are available:"
for F in *; do
echo "http://$IPADDR:8000/$F"
done
info "Press Ctrl-C to stop server."
python3 -m http.server
}
greet() {
IAMUSER=$(aws iam get-user --query 'User.UserName')
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."

View File

@@ -31,7 +31,6 @@ infra_start() {
die "I could not find which AMI to use in this region. Try another region?"
fi
AWS_KEY_NAME=$(make_key_name)
AWS_INSTANCE_TYPE=${AWS_INSTANCE_TYPE-t3a.medium}
sep "Starting instances"
info " Count: $COUNT"
@@ -39,11 +38,10 @@ infra_start() {
info " Token/tag: $TAG"
info " AMI: $AMI"
info " Key name: $AWS_KEY_NAME"
info " Instance type: $AWS_INSTANCE_TYPE"
result=$(aws ec2 run-instances \
--key-name $AWS_KEY_NAME \
--count $COUNT \
--instance-type $AWS_INSTANCE_TYPE \
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
--client-token $TAG \
--block-device-mapping 'DeviceName=/dev/sda1,Ebs={VolumeSize=20}' \
--image-id $AMI)
@@ -99,7 +97,7 @@ infra_disableaddrchecks() {
}
wait_until_tag_is_running() {
max_retry=100
max_retry=50
i=0
done_count=0
while [[ $done_count -lt $COUNT ]]; do

View File

@@ -4,12 +4,17 @@ import sys
import yaml
import jinja2
def prettify(l):
l = [ip.strip() for ip in l]
ret = [ "node{}: <code>{}</code>".format(i+1, s) for (i, s) in zip(range(len(l)), l) ]
return ret
# Read settings from user-provided settings file
context = yaml.safe_load(open(sys.argv[1]))
SETTINGS = yaml.load(open(sys.argv[1]))
clustersize = SETTINGS["clustersize"]
ips = list(open("ips.txt"))
clustersize = context["clustersize"]
print("---------------------------------------------")
print(" Number of IPs: {}".format(len(ips)))
@@ -25,9 +30,7 @@ while ips:
ips = ips[clustersize:]
clusters.append(cluster)
context["clusters"] = clusters
template_file_name = context["cards_template"]
template_file_name = SETTINGS["cards_template"]
template_file_path = os.path.join(
os.path.dirname(__file__),
"..",
@@ -36,19 +39,18 @@ template_file_path = os.path.join(
)
template = jinja2.Template(open(template_file_path).read())
with open("ips.html", "w") as f:
f.write(template.render(**context))
f.write(template.render(clusters=clusters, **SETTINGS))
print("Generated ips.html")
try:
import pdfkit
with open("ips.html") as f:
pdfkit.from_file(f, "ips.pdf", options={
"page-size": context["paper_size"],
"margin-top": context["paper_margin"],
"margin-bottom": context["paper_margin"],
"margin-left": context["paper_margin"],
"margin-right": context["paper_margin"],
"page-size": SETTINGS["paper_size"],
"margin-top": SETTINGS["paper_margin"],
"margin-bottom": SETTINGS["paper_margin"],
"margin-left": SETTINGS["paper_margin"],
"margin-right": SETTINGS["paper_margin"],
})
print("Generated ips.pdf")
except ImportError:

View File

@@ -73,29 +73,8 @@ set expandtab
set number
set shiftwidth=2
set softtabstop=2
set nowrap
SQRL""")
# Custom .tmux.conf
system(
"""sudo -u docker tee /home/docker/.tmux.conf <<SQRL
bind h select-pane -L
bind j select-pane -D
bind k select-pane -U
bind l select-pane -R
# Allow using mouse to switch panes
set -g mouse on
# Make scrolling with wheels work
bind -n WheelUpPane if-shell -F -t = "#{mouse_any_flag}" "send-keys -M" "if -Ft= '#{pane_in_mode}' 'send-keys -M' 'select-pane -t=; copy-mode -e; send-keys -M'"
bind -n WheelDownPane select-pane -t= \; send-keys -M
SQRL"""
)
# add docker user to sudoers and allow password authentication
system("""sudo tee /etc/sudoers.d/docker <<SQRL
docker ALL=(ALL) NOPASSWD:ALL
@@ -106,7 +85,6 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
system("sudo service ssh restart")
system("sudo apt-get -q update")
system("sudo apt-get -qy install git jq")
system("sudo apt-get -qy install emacs-nox joe")
#######################
### DOCKER INSTALLS ###

View File

@@ -21,10 +21,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
image:

View File

@@ -21,11 +21,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
clusternumber: 100
image:

View File

@@ -21,11 +21,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
clusternumber: 200
image:

View File

@@ -21,10 +21,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
image:

View File

@@ -23,7 +23,7 @@ paper_margin: 0.2in
engine_version: test
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.18.0
machine_version: 0.13.0
# Password used to connect with the "docker user"

View File

@@ -23,7 +23,7 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.22.0
machine_version: 0.15.0
# Password used to connect with the "docker user"

View File

@@ -21,8 +21,9 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -23,7 +23,7 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"

View File

@@ -23,7 +23,7 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.22.0
machine_version: 0.15.0
# Password used to connect with the "docker user"

View File

@@ -30,9 +30,9 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
./workshopctl disableaddrchecks $TAG
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl cards $TAG
SETTINGS=admin-kuberouter
@@ -43,15 +43,11 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
./workshopctl disableaddrchecks $TAG
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl cards $TAG
#INFRA=infra/aws-us-west-1
export AWS_INSTANCE_TYPE=t3a.medium
SETTINGS=admin-test
TAG=$PREFIX-$SETTINGS
./workshopctl start \
@@ -61,6 +57,5 @@ TAG=$PREFIX-$SETTINGS
--count $((3*$STUDENTS))
./workshopctl deploy $TAG
./workshopctl kube $TAG 1.14.6
./workshopctl kube $TAG 1.13.5
./workshopctl cards $TAG

View File

@@ -1,23 +1,12 @@
{#
The variables below can be customized here directly, or in your
settings.yaml file. Any variable in settings.yaml will be exposed
in here as well.
#}
{# Feel free to customize or override anything in there! #}
{%- set url = url
| default("http://FIXME.container.training/") -%}
{%- set pagesize = pagesize
| default(9) -%}
{%- set lang = lang
| default("en") -%}
{%- set event = event
| default("training session") -%}
{%- set backside = backside
| default(False) -%}
{%- set image = image
| default("kube") -%}
{%- set clusternumber = clusternumber
| default(None) -%}
{%- set url = "http://FIXME.container.training/" -%}
{%- set pagesize = 9 -%}
{%- set lang = "en" -%}
{%- set event = "training session" -%}
{%- set backside = False -%}
{%- set image = "kube" -%}
{%- set clusternumber = 100 -%}
{%- set image_src = {
"docker": "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png",
@@ -172,9 +161,7 @@ img.kube {
<div>
<p>{{ intro }}</p>
<p>
{% if image_src %}
<img src="{{ image_src }}" />
{% endif %}
<table>
{% if clusternumber != None %}
<tr><td>cluster:</td></tr>
@@ -200,10 +187,8 @@ img.kube {
</p>
<p>
{% if url %}
{{ slides_are_at }}
<center>{{ url }}</center>
{% endif %}
</p>
</div>
{% if loop.index%pagesize==0 or loop.last %}

View File

@@ -1,4 +0,0 @@
This directory will contain symlinks to HTML and PDF files for the cards
with the IP address, login, and password for the training environments.
The file "index.html" is empty on purpose: it prevents listing the files.

View File

@@ -1,7 +1,5 @@
# Uncomment and/or edit one of the the following lines if necessary.
#/ /kube-halfday.yml.html 200
/ /kube-fullday.yml.html 200!
#/ /kube-fullday.yml.html 200
#/ /kube-twodays.yml.html 200
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
/ /sfsf.yml.html 200!

View File

@@ -104,6 +104,22 @@ like Windows, macOS, Solaris, FreeBSD ...
---
## rkt
* Compares to `runc`.
* No daemon or API.
* Strong emphasis on security (through privilege separation).
* Networking has to be set up separately (e.g. through CNI plugins).
* Partial image management (pull, but no push).
(Image build is handled by separate tools.)
---
## CRI-O
* Designed to be used with Kubernetes as a simple, basic runtime.

View File

@@ -76,78 +76,6 @@ CMD ["python", "app.py"]
---
## Be careful with `chown`, `chmod`, `mv`
* Layers cannot store efficiently changes in permissions or ownership.
* Layers cannot represent efficiently when a file is moved either.
* As a result, operations like `chown`, `chown`, `mv` can be expensive.
* For instance, in the Dockerfile snippet below, each `RUN` line
creates a layer with an entire copy of `some-file`.
```dockerfile
COPY some-file .
RUN chown www-data:www-data some-file
RUN chmod 644 some-file
RUN mv some-file /var/www
```
* How can we avoid that?
---
## Put files on the right place
* Instead of using `mv`, directly put files at the right place.
* When extracting archives (tar, zip...), merge operations in a single layer.
Example:
```dockerfile
...
RUN wget http://.../foo.tar.gz \
&& tar -zxf foo.tar.gz \
&& mv foo/fooctl /usr/local/bin \
&& rm -rf foo
...
```
---
## Use `COPY --chown`
* The Dockerfile instruction `COPY` can take a `--chown` parameter.
Examples:
```dockerfile
...
COPY --chown=1000 some-file .
COPY --chown=1000:1000 some-file .
COPY --chown=www-data:www-data some-file .
```
* The `--chown` flag can specify a user, or a user:group pair.
* The user and group can be specified as names or numbers.
* When using names, the names must exist in `/etc/passwd` or `/etc/group`.
*(In the container, not on the host!)*
---
## Set correct permissions locally
* Instead of using `chmod`, set the right file permissions locally.
* When files are copied with `COPY`, permissions are preserved.
---
## Embedding unit tests in the build process
```dockerfile

View File

@@ -1,12 +1,3 @@
- date: [2019-11-04, 2019-11-05]
country: de
city: Berlin
event: Velocity
speaker: jpetazzo
title: Deploying and scaling applications with Kubernetes
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/79109
slides: https://velocity-2019-11.container.training/
- date: 2019-11-13
country: fr
city: Marseille
@@ -16,38 +7,6 @@
lang: fr
attend: http://2019.devops-dday.com/Workshop.html
- date: 2019-10-30
country: us
city: Portland, OR
event: LISA
speaker: jpetazzo
title: Deep Dive into Kubernetes Internals for Builders and Operators
attend: https://www.usenix.org/conference/lisa19/presentation/petazzoni-tutorial
- date: [2019-10-22, 2019-10-24]
country: us
city: Charlotte, NC
event: Ardan Labs
speaker: jpetazzo
title: Kubernetes Training
attend: https://www.eventbrite.com/e/containers-docker-and-kubernetes-training-for-devs-and-ops-charlotte-nc-november-2019-tickets-73296659281
- date: 2019-10-22
country: us
city: Charlotte, NC
event: Ardan Labs
speaker: jpetazzo
title: Docker & Containers Training
attend: https://www.eventbrite.com/e/containers-docker-and-kubernetes-training-for-devs-and-ops-charlotte-nc-november-2019-tickets-73296659281
- date: 2019-10-22
country: de
city: Berlin
event: GOTO
speaker: bretfisher
title: Kubernetes or Swarm? Build Both, Deploy Apps, Learn The Differences
attend: https://gotober.com/2019/workshops/194
- date: [2019-09-24, 2019-09-25]
country: fr
city: Paris
@@ -56,43 +15,6 @@
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
slides: https://kube-2019-09.container.training/
- date: 2019-08-27
country: tr
city: Izmir
event: HacknBreak
speaker: gurayyildirim
title: Deploying and scaling applications with Kubernetes (in Turkish)
lang: tr
attend: https://hacknbreak.com
- date: 2019-08-26
country: tr
city: Izmir
event: HacknBreak
speaker: gurayyildirim
title: Container Orchestration with Docker and Swarm (in Turkish)
lang: tr
attend: https://hacknbreak.com
- date: 2019-08-25
country: tr
city: Izmir
event: HackBreak
speaker: gurayyildirim
title: Introduction to Docker and Containers (in Turkish)
lang: tr
attend: https://hacknbreak.com
- date: 2019-07-16
country: us
city: Portland, OR
event: OSCON
speaker: bridgetkromhout
title: "Kubernetes 201: Production tooling"
attend: https://conferences.oreilly.com/oscon/oscon-or/public/schedule/detail/76390
slides: https://oscon2019.container.training
- date: 2019-06-17
country: ca

View File

@@ -167,11 +167,13 @@ What does that mean?
## Let's experiment a bit!
- For the exercises in this section, connect to the first node of the `test` cluster
- For this section, we will use a cluster with 4 nodes
(named node1, node2, node3, node4)
.exercise[
- SSH to the first node of the test cluster
- SSH to the first node of the cluster
- Check that the cluster is operational:
```bash

View File

@@ -667,12 +667,17 @@ class: extra-details
- For auditing purposes, sometimes we want to know who can perform an action
- There are a few tools to help us with that
- There is a proof-of-concept tool by Aqua Security which does exactly that:
- [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security
https://github.com/aquasecurity/kubectl-who-can
- [Review Access (aka Rakkess)](https://github.com/corneliusweig/rakkess)
- This is one way to install it:
```bash
docker run --rm -v /usr/local/bin:/go/bin golang \
go get -v github.com/aquasecurity/kubectl-who-can
```
- Both are available as standalone programs, or as plugins for `kubectl`
(`kubectl` plugins can be installed and managed with `krew`)
- This is one way to use it:
```bash
kubectl-who-can create pods
```

View File

@@ -15,3 +15,26 @@
- `dockercoins/webui:v0.1`
- `dockercoins/worker:v0.1`
---
## Setting `$REGISTRY` and `$TAG`
- In the upcoming exercises and labs, we use a couple of environment variables:
- `$REGISTRY` as a prefix to all image names
- `$TAG` as the image version tag
- For example, the worker image is `$REGISTRY/worker:$TAG`
- If you copy-paste the commands in these exercises:
**make sure that you set `$REGISTRY` and `$TAG` first!**
- For example:
```
export REGISTRY=dockercoins TAG=v0.1
```
(this will expand `$REGISTRY/worker:$TAG` to `dockercoins/worker:v0.1`)

View File

@@ -18,7 +18,7 @@
(it gives us replication primitives)
- Kubernetes helps us clone / replicate environments
- Kubernetes helps us to clone/replicate environments
(all resources can be described with manifests)

View File

@@ -10,8 +10,6 @@
- Components can be upgraded one at a time without problems
<!-- ##VERSION## -->
---
## Checking what we're running
@@ -168,7 +166,7 @@
- Upgrade kubelet:
```bash
sudo apt install kubelet=1.15.3-00
apt install kubelet=1.14.2-00
```
]
@@ -228,7 +226,7 @@
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
```
- Look for the `image:` line, and update it to e.g. `v1.15.0`
- Look for the `image:` line, and update it to e.g. `v1.14.0`
]
@@ -262,52 +260,14 @@
sudo kubeadm upgrade plan
```
]
(Note: kubeadm is confused by our manual upgrade of the API server.
<br/>It thinks the cluster is running 1.14.0!)
Note 1: kubeadm thinks that our cluster is running 1.15.0.
<br/>It is confused by our manual upgrade of the API server!
Note 2: kubeadm itself is still version 1.14.6.
<br/>It doesn't know how to upgrade do 1.15.X.
---
## Upgrading kubeadm
- First things first: we need to upgrade kubeadm
.exercise[
- Upgrade kubeadm:
```
sudo apt install kubeadm
```
- Check what kubeadm tells us:
```
sudo kubeadm upgrade plan
```
]
Note: kubeadm still thinks that our cluster is running 1.15.0.
<br/>But at least it knows about version 1.15.X now.
---
## Upgrading the cluster with kubeadm
- Ideally, we should revert our `image:` change
(so that kubeadm executes the right migration steps)
- Or we can try the upgrade anyway
.exercise[
<!-- ##VERSION## -->
- Perform the upgrade:
```bash
sudo kubeadm upgrade apply v1.15.3
sudo kubeadm upgrade apply v1.14.2
```
]
@@ -327,8 +287,8 @@ Note: kubeadm still thinks that our cluster is running 1.15.0.
- Download the configuration on each node, and upgrade kubelet:
```bash
for N in 1 2 3; do
ssh test$N sudo kubeadm upgrade node config --kubelet-version v1.15.3
ssh test$N sudo apt install kubelet=1.15.3-00
ssh test$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
ssh test$N sudo apt install kubelet=1.14.2-00
done
```
]
@@ -337,7 +297,7 @@ Note: kubeadm still thinks that our cluster is running 1.15.0.
## Checking what we've done
- All our nodes should now be updated to version 1.15.3
- All our nodes should now be updated to version 1.14.2
.exercise[
@@ -347,19 +307,3 @@ Note: kubeadm still thinks that our cluster is running 1.15.0.
```
]
---
class: extra-details
## Skipping versions
- This example worked because we went from 1.14 to 1.15
- If you are upgrading from e.g. 1.13, you will generally have to go through 1.14 first
- This means upgrading kubeadm to 1.14.X, then using it to upgrade the cluster
- Then upgrading kubeadm to 1.15.X, etc.
- **Make sure to read the release notes before upgrading!**

View File

@@ -66,8 +66,6 @@ Look in each plugin's directory for its documentation.
---
class: extra-details
## Conf vs conflist
- There are two slightly different configuration formats

View File

@@ -44,37 +44,21 @@
## Other things that Kubernetes can do for us
- Autoscaling
- Basic autoscaling
(straightforward on CPU; more complex on other metrics)
- Blue/green deployment, canary deployment
- Ressource management and scheduling
- Long running services, but also batch (one-off) jobs
(reserve CPU/RAM for containers; placement constraints)
- Overcommit our cluster and *evict* low-priority jobs
- Advanced rollout patterns
- Run services with *stateful* data (databases etc.)
(blue/green deployment, canary deployment)
- Fine-grained access control defining *what* can be done by *whom* on *which* resources
---
- Integrating third party services (*service catalog*)
## More things that Kubernetes can do for us
- Batch jobs
(one-off; parallel; also cron-style periodic execution)
- Fine-grained access control
(defining *what* can be done by *whom* on *which* resources)
- Stateful services
(databases, message queues, etc.)
- Automating complex tasks with *operators*
(e.g. database replication, failover, etc.)
- Automating complex tasks (*operators*)
---
@@ -207,29 +191,11 @@ No!
- By default, Kubernetes uses the Docker Engine to run containers
- We can leverage other pluggable runtimes through the *Container Runtime Interface*
- We could also use `rkt` ("Rocket") from CoreOS
- <del>We could also use `rkt` ("Rocket") from CoreOS</del> (deprecated)
- Or leverage other pluggable runtimes through the *Container Runtime Interface*
---
class: extra-details
## Some runtimes available through CRI
- [containerd](https://github.com/containerd/containerd/blob/master/README.md)
- maintained by Docker, IBM, and community
- used by Docker Engine, microk8s, k3s, GKE; also standalone
- comes with its own CLI, `ctr`
- [CRI-O](https://github.com/cri-o/cri-o/blob/master/README.md):
- maintained by Red Hat, SUSE, and community
- used by OpenShift and Kubic
- designed specifically as a minimal runtime for Kubernetes
- [And more](https://kubernetes.io/docs/setup/production-environment/container-runtimes/)
(like CRI-O, or containerd)
---

View File

@@ -193,12 +193,7 @@
- Best practice: set a memory limit, and pass it to the runtime
- Note: recent versions of the JVM can do this automatically
(see [JDK-8146115](https://bugs.java.com/bugdatabase/view_bug.do?bug_id=JDK-8146115))
and
[this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/)
for detailed examples)
(see [this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/) for a detailed example)
---

View File

@@ -4,29 +4,15 @@
- We want one (and exactly one) instance of `rng` per node
- We *do not want* two instances of `rng` on the same node
- What if we just scale up `deploy/rng` to the number of nodes?
- We will do that with a *daemon set*
- nothing guarantees that the `rng` containers will be distributed evenly
---
- if we add nodes later, they will not automatically run a copy of `rng`
## Why not a deployment?
- if we remove (or reboot) a node, one `rng` container will restart elsewhere
- Can't we just do `kubectl scale deployment rng --replicas=...`?
--
- Nothing guarantees that the `rng` containers will be distributed evenly
- If we add nodes later, they will not automatically run a copy of `rng`
- If we remove (or reboot) a node, one `rng` container will restart elsewhere
(and we will end up with two instances `rng` on the same node)
- By contrast, a daemon set will start one pod per node and keep it that way
(as nodes are added or removed)
- Instead of a `deployment`, we will use a `daemonset`
---
@@ -52,7 +38,7 @@
<!-- ##VERSION## -->
- Unfortunately, as of Kubernetes 1.15, the CLI cannot create daemon sets
- Unfortunately, as of Kubernetes 1.14, the CLI cannot create daemon sets
--

View File

@@ -105,22 +105,6 @@ The dashboard will then ask you which authentication you want to use.
---
## Other dashboards
- [Kube Web View](https://codeberg.org/hjacobs/kube-web-view)
- read-only dashboard
- optimized for "troubleshooting and incident response"
- see [vision and goals](https://kube-web-view.readthedocs.io/en/latest/vision.html#vision) for details
- [Kube Ops View](https://github.com/hjacobs/kube-ops-view)
- "provides a common operational picture for multiple Kubernetes clusters"
---
# Security implications of `kubectl apply`
- When we do `kubectl apply -f <URL>`, we create arbitrary resources
@@ -172,3 +156,4 @@ The dashboard will then ask you which authentication you want to use.
- It introduces new failure modes
(for instance, if you try to apply YAML from a link that's no longer valid)

View File

@@ -175,7 +175,7 @@ Success!
]
We should get `No resources found.` and the `kubernetes` service, respectively.
So far, so good.
Note: the API server automatically created the `kubernetes` service entry.
@@ -225,7 +225,7 @@ Success?
]
Our Deployment is in bad shape:
Our Deployment is in a bad shape:
```
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/web 0/1 0 0 2m26s
@@ -481,13 +481,13 @@ docker run alpine echo hello world
.exercise[
- Create the file `~/.kube/config` with `kubectl`:
- Create the file `kubeconfig.kubelet` with `kubectl`:
```bash
kubectl config \
kubectl --kubeconfig kubeconfig.kubelet config \
set-cluster localhost --server http://localhost:8080
kubectl config \
kubectl --kubeconfig kubeconfig.kubelet config \
set-context localhost --cluster localhost
kubectl config \
kubectl --kubeconfig kubeconfig.kubelet config \
use-context localhost
```
@@ -495,7 +495,19 @@ docker run alpine echo hello world
---
## Our `~/.kube/config` file
## All Kubernetes clients can use `kubeconfig`
- The `kubeconfig.kubelet` file has the same format as e.g. `~/.kubeconfig`
- All Kubernetes clients can use a similar file
- The `kubectl config` commands can be used to manipulate these files
- This highlights that kubelet is a "normal" client of the API server
---
## Our `kubeconfig.kubelet` file
The file that we generated looks like the one below.
@@ -521,9 +533,9 @@ clusters:
.exercise[
- Start kubelet with that kubeconfig file:
- Start kubelet with that `kubeconfig.kubelet` file:
```bash
kubelet --kubeconfig ~/.kube/config
kubelet --kubeconfig kubeconfig.kubelet
```
]

View File

@@ -1,209 +0,0 @@
# Authoring YAML
- There are various ways to generate YAML with Kubernetes, e.g.:
- `kubectl run`
- `kubectl create deployment` (and a few other `kubectl create` variants)
- `kubectl expose`
- When and why do we need to write our own YAML?
- How do we write YAML from scratch?
---
## The limits of generated YAML
- Many advanced (and even not-so-advanced) features require to write YAML:
- pods with multiple containers
- resource limits
- healthchecks
- DaemonSets, StatefulSets
- and more!
- How do we access these features?
---
## We don't have to start from scratch
- Create a resource (e.g. Deployment)
- Dump its YAML with `kubectl get -o yaml ...`
- Edit the YAML
- Use `kubectl apply -f ...` with the YAML file to:
- update the resource (if it's the same kind)
- create a new resource (if it's a different kind)
- Or: Use The Docs, Luke
(the documentation almost always has YAML examples)
---
## Generating YAML without creating resources
- We can use the `--dry-run` option
.exercise[
- Generate the YAML for a Deployment without creating it:
```bash
kubectl create deployment web --image nginx --dry-run
```
]
- We can clean up that YAML even more if we want
(for instance, we can remove the `creationTimestamp` and empty dicts)
---
## Using `--dry-run` with `kubectl apply`
- The `--dry-run` option can also be used with `kubectl apply`
- However, it can be misleading (it doesn't do a "real" dry run)
- Let's see what happens in the following scenario:
- generate the YAML for a Deployment
- tweak the YAML to transform it into a DaemonSet
- apply that YAML to see what would actually be created
---
## The limits of `kubectl apply --dry-run`
.exercise[
- Generate the YAML for a deployment:
```bash
kubectl create deployment web --image=nginx -o yaml > web.yaml
```
- Change the `kind` in the YAML to make it a `DaemonSet`:
```bash
sed -i s/Deployment/DaemonSet/ web.yaml
```
- Ask `kubectl` what would be applied:
```bash
kubectl apply -f web.yaml --dry-run --validate=false -o yaml
```
]
The resulting YAML doesn't represent a valid DaemonSet.
---
## Server-side dry run
- Since Kubernetes 1.13, we can use [server-side dry run and diffs](https://kubernetes.io/blog/2019/01/14/apiserver-dry-run-and-kubectl-diff/)
- Server-side dry run will do all the work, but *not* persist to etcd
(all validation and mutation hooks will be executed)
.exercise[
- Try the same YAML file as earlier, with server-side dry run:
```bash
kubectl apply -f web.yaml --server-dry-run --validate=false -o yaml
```
]
The resulting YAML doesn't have the `replicas` field anymore.
Instead, it has the fields expected in a DaemonSet.
---
## Advantages of server-side dry run
- The YAML is verified much more extensively
- The only step that is skipped is "write to etcd"
- YAML that passes server-side dry run *should* apply successfully
(unless the cluster state changes by the time the YAML is actually applied)
- Validating or mutating hooks that have side effects can also be an issue
---
## `kubectl diff`
- Kubernetes 1.13 also introduced `kubectl diff`
- `kubectl diff` does a server-side dry run, *and* shows differences
.exercise[
- Try `kubectl diff` on the YAML that we tweaked earlier:
```bash
kubectl diff -f web.yaml
```
]
Note: we don't need to specify `--validate=false` here.
---
## Advantage of YAML
- Using YAML (instead of `kubectl run`/`create`/etc.) allows to be *declarative*
- The YAML describes the desired state of our cluster and applications
- YAML can be stored, versioned, archived (e.g. in git repositories)
- To change resources, change the YAML files
(instead of using `kubectl edit`/`scale`/`label`/etc.)
- Changes can be reviewed before being applied
(with code reviews, pull requests ...)
- This workflow is sometimes called "GitOps"
(there are tools like Weave Flux or GitKube to facilitate it)
---
## YAML in practice
- Get started with `kubectl run`/`create`/`expose`/etc.
- Dump the YAML with `kubectl get -o yaml`
- Tweak that YAML and `kubectl apply` it back
- Store that YAML for reference (for further deployments)
- Feel free to clean up the YAML:
- remove fields you don't know
- check that it still works!
- That YAML will be useful later when using e.g. Kustomize or Helm

View File

@@ -87,7 +87,7 @@
- Clone the Flux repository:
```
git clone https://github.com/fluxcd/flux
git clone https://github.com/weaveworks/flux
```
- Edit `deploy/flux-deployment.yaml`

View File

@@ -1,3 +1,41 @@
## Questions to ask before adding healthchecks
- Do we want liveness, readiness, both?
(sometimes, we can use the same check, but with different failure thresholds)
- Do we have existing HTTP endpoints that we can use?
- Do we need to add new endpoints, or perhaps use something else?
- Are our healthchecks likely to use resources and/or slow down the app?
- Do they depend on additional services?
(this can be particularly tricky, see next slide)
---
## Healthchecks and dependencies
- A good healthcheck should always indicate the health of the service itself
- It should not be affected by the state of the service's dependencies
- Example: a web server requiring a database connection to operate
(make sure that the healthcheck can report "OK" even if the database is down;
<br/>
because it won't help us to restart the web server if the issue is with the DB!)
- Example: a microservice calling other microservices
- Example: a worker process
(these will generally require minor code changes to report health)
---
## Adding healthchecks to an app
- Let's add healthchecks to DockerCoins!
@@ -266,15 +304,15 @@ It will use the default success threshold (1 successful attempt = alive).
- We need to make sure that the healthcheck doesn't trip when
performance degrades due to external pressure
- Using a readiness check would have fewer effects
- Using a readiness check would have lesser effects
(but it would still be an imperfect solution)
(but it still would be an imperfect solution)
- A possible combination:
- readiness check with a short timeout / low failure threshold
- liveness check with a longer timeout / higher failure threshold
- liveness check with a longer timeout / higher failure treshold
---
@@ -306,7 +344,7 @@ class: extra-details
- When a process is killed, its children are *orphaned* and attached to PID 1
- PID 1 has the responsibility of *reaping* these processes when they terminate
- PID 1 has the responsibility if *reaping* these processes when they terminate
- OK, but how does that affect us?
@@ -332,4 +370,24 @@ class: extra-details
(and have gcr.io/pause take care of the reaping)
- Discussion of this in [Video - 10 Ways to Shoot Yourself in the Foot with Kubernetes, #9 Will Surprise You](https://www.youtube.com/watch?v=QKI-JRs2RIE)
---
## Healthchecks for worker
- Readiness isn't useful
(because worker isn't a backend for a service)
- Liveness may help us to restart a broken worker, but how can we check it?
- Embedding an HTTP server is an option
(but it has a high potential for unwanted side-effects and false positives)
- Using a "lease" file can be relatively easy:
- touch a file during each iteration of the main loop
- check the timestamp of that file from an exec probe
- Writing logs (and checking them from the probe) also works

View File

@@ -42,11 +42,9 @@
- internal corruption (causing all requests to error)
- Anything where our incident response would be "just restart/reboot it"
- If the liveness probe fails *N* consecutive times, the container is killed
.warning[**Do not** use liveness probes for problems that can't be fixed by a restart]
- Otherwise we just restart our pods for no reason, creating useless load
- *N* is the `failureThreshold` (3 by default)
---
@@ -54,7 +52,7 @@
- Indicates if the container is ready to serve traffic
- If a container becomes "unready" it might be ready again soon
- If a container becomes "unready" (let's say busy!) it might be ready again soon
- If the readiness probe fails:
@@ -68,79 +66,19 @@
## When to use a readiness probe
- To indicate failure due to an external cause
- To indicate temporary failures
- database is down or unreachable
- the application can only service *N* parallel connections
- mandatory auth or other backend service unavailable
- the runtime is busy doing garbage collection or initial data load
- To indicate temporary failure or unavailability
- The container is marked as "not ready" after `failureThreshold` failed attempts
- application can only service *N* parallel connections
(3 by default)
- runtime is busy doing garbage collection or initial data load
- It is marked again as "ready" after `successThreshold` successful attempts
- For processes that take a long time to start
(more on that later)
---
## Dependencies
- If a web server depends on a database to function, and the database is down:
- the web server's liveness probe should succeed
- the web server's readiness probe should fail
- Same thing for any hard dependency (without which the container can't work)
.warning[**Do not** fail liveness probes for problems that are external to the container]
---
## Timing and thresholds
- Probes are executed at intervals of `periodSeconds` (default: 10)
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
.warning[If a probe takes longer than that, it is considered as a FAIL]
- A probe is considered successful after `successThreshold` successes (default: 1)
- A probe is considered failing after `failureThreshold` failures (default: 3)
- A probe can have an `initialDelaySeconds` parameter (default: 0)
- Kubernetes will wait that amount of time before running the probe for the first time
(this is important to avoid killing services that take a long time to start)
---
class: extra-details
## Startup probe
- Kubernetes 1.16 introduces a third type of probe: `startupProbe`
(it is in `alpha` in Kubernetes 1.16)
- It can be used to indicate "container not ready *yet*"
- process is still starting
- loading external data, priming caches
- Before Kubernetes 1.16, we had to use the `initialDelaySeconds` parameter
(available for both liveness and readiness probes)
- `initialDelaySeconds` is a rigid delay (always wait X before running probes)
- `startupProbe` works better when a container start time can vary a lot
(1 by default)
---
@@ -174,12 +112,10 @@ class: extra-details
(instead of serving errors or timeouts)
- Unavailable backends get removed from load balancer rotation
- Overloaded backends get removed from load balancer rotation
(thus improving response times across the board)
- If a probe is not defined, it's as if there was an "always successful" probe
---
## Example: HTTP probe
@@ -229,56 +165,14 @@ If the Redis process becomes unresponsive, it will be killed.
---
## Questions to ask before adding healthchecks
## Details about liveness and readiness probes
- Do we want liveness, readiness, both?
- Probes are executed at intervals of `periodSeconds` (default: 10)
(sometimes, we can use the same check, but with different failure thresholds)
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
- Do we have existing HTTP endpoints that we can use?
- A probe is considered successful after `successThreshold` successes (default: 1)
- Do we need to add new endpoints, or perhaps use something else?
- A probe is considered failing after `failureThreshold` failures (default: 3)
- Are our healthchecks likely to use resources and/or slow down the app?
- Do they depend on additional services?
(this can be particularly tricky, see next slide)
---
## Healthchecks and dependencies
- Liveness checks should not be influenced by the state of external services
- All checks should reply quickly (by default, less than 1 second)
- Otherwise, they are considered to fail
- This might require to check the health of dependencies asynchronously
(e.g. if a database or API might be healthy but still take more than
1 second to reply, we should check the status asynchronously and report
a cached status)
---
## Healthchecks for workers
(In that context, worker = process that doesn't accept connections)
- Readiness isn't useful
(because workers aren't backends for a service)
- Liveness may help us restart a broken worker, but how can we check it?
- Embedding an HTTP server is a (potentially expensive) option
- Using a "lease" file can be relatively easy:
- touch a file during each iteration of the main loop
- check the timestamp of that file from an exec probe
- Writing logs (and checking them from the probe) also works
- If a probe is not defined, it's as if there was an "always successful" probe

View File

@@ -415,7 +415,7 @@ This is normal: we haven't provided any ingress rule yet.
Here is a minimal host-based ingress resource:
```yaml
apiVersion: networking.k8s.io/v1beta1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cheddar
@@ -523,4 +523,4 @@ spec:
- This should eventually stabilize
(remember that ingresses are currently `apiVersion: networking.k8s.io/v1beta1`)
(remember that ingresses are currently `apiVersion: extensions/v1beta1`)

View File

@@ -153,7 +153,10 @@ pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m
kubectl logs deploy/pingpong --tail 1 --follow
```
- Leave that command running, so that we can keep an eye on these logs
<!--
```wait seq=3```
```keys ^C```
-->
]
@@ -183,44 +186,6 @@ We could! But the *deployment* would notice it right away, and scale back to the
---
## Log streaming
- Let's look again at the output of `kubectl logs`
(the one we started before scaling up)
- `kubectl logs` shows us one line per second
- We could expect 3 lines per second
(since we should now have 3 pods running `ping`)
- Let's try to figure out what's happening!
---
## Streaming logs of multiple pods
- What happens if we restart `kubectl logs`?
.exercise[
- Interrupt `kubectl logs` (with Ctrl-C)
- Restart it:
```bash
kubectl logs deploy/pingpong --tail 1 --follow
```
]
`kubectl logs` will warn us that multiple pods were found, and that it's showing us only one of them.
Let's leave `kubectl logs` running while we keep exploring.
---
## Resilience
- The *deployment* `pingpong` watches its *replica set*
@@ -231,12 +196,20 @@ Let's leave `kubectl logs` running while we keep exploring.
.exercise[
- In a separate window, watch the list of pods:
- In a separate window, list pods, and keep watching them:
```bash
watch kubectl get pods
kubectl get pods -w
```
- Destroy the pod currently shown by `kubectl logs`:
<!--
```wait Running```
```keys ^C```
```hide kubectl wait deploy pingpong --for condition=available```
```keys kubectl delete pod ping```
```copypaste pong-..........-.....```
-->
- Destroy a pod:
```
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
```
@@ -244,23 +217,6 @@ Let's leave `kubectl logs` running while we keep exploring.
---
## What happened?
- `kubectl delete pod` terminates the pod gracefully
(sending it the TERM signal and waiting for it to shutdown)
- As soon as the pod is in "Terminating" state, the Replica Set replaces it
- But we can still see the output of the "Terminating" pod in `kubectl logs`
- Until 30 seconds later, when the grace period expires
- The pod is then killed, and `kubectl logs` exits
---
## What if we wanted something different?
- What if we wanted to start a "one-shot" container that *doesn't* get restarted?
@@ -278,72 +234,6 @@ Let's leave `kubectl logs` running while we keep exploring.
---
## Scheduling periodic background work
- A Cron Job is a job that will be executed at specific intervals
(the name comes from the traditional cronjobs executed by the UNIX crond)
- It requires a *schedule*, represented as five space-separated fields:
- minute [0,59]
- hour [0,23]
- day of the month [1,31]
- month of the year [1,12]
- day of the week ([0,6] with 0=Sunday)
- `*` means "all valid values"; `/N` means "every N"
- Example: `*/3 * * * *` means "every three minutes"
---
## Creating a Cron Job
- Let's create a simple job to be executed every three minutes
- Cron Jobs need to terminate, otherwise they'd run forever
.exercise[
- Create the Cron Job:
```bash
kubectl run --schedule="*/3 * * * *" --restart=OnFailure --image=alpine sleep 10
```
- Check the resource that was created:
```bash
kubectl get cronjobs
```
]
---
## Cron Jobs in action
- At the specified schedule, the Cron Job will create a Job
- The Job will create a Pod
- The Job will make sure that the Pod completes
(re-creating another one if it fails, for instance if its node fails)
.exercise[
- Check the Jobs that are created:
```bash
kubectl get jobs
```
]
(It will take a few minutes before the first job is scheduled.)
---
## What about that deprecation warning?
- As we can see from the previous slide, `kubectl run` can do many things

View File

@@ -1,8 +1,8 @@
# Controlling a Kubernetes cluster remotely
# Controlling the cluster remotely
- `kubectl` can be used either on cluster instances or outside the cluster
- All the operations that we do with `kubectl` can be done remotely
- Here, we are going to use `kubectl` from our local machine
- In this section, we are going to use `kubectl` from our local machine
---
@@ -34,11 +34,11 @@
- Download the `kubectl` binary from one of these links:
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/linux/amd64/kubectl)
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/linux/amd64/kubectl)
|
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/darwin/amd64/kubectl)
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/darwin/amd64/kubectl)
|
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/windows/amd64/kubectl.exe)
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/windows/amd64/kubectl.exe)
- On Linux and macOS, make the binary executable with `chmod +x kubectl`
@@ -67,10 +67,10 @@ Note: if you are following along with a different platform (e.g. Linux on an arc
The output should look like this:
```
Client Version: version.Info{Major:"1", Minor:"15", GitVersion:"v1.15.0",
GitCommit:"e8462b5b5dc2584fdcd18e6bcfe9f1e4d970a529", GitTreeState:"clean",
BuildDate:"2019-06-19T16:40:16Z", GoVersion:"go1.12.5", Compiler:"gc",
Platform:"darwin/amd64"}
Client Version: version.Info{Major:"1", Minor:"14", GitVersion:"v1.14.0",
GitCommit:"641856db18352033a0d96dbc99153fa3b27298e5", GitTreeState:"clean",
BuildDate:"2019-03-25T15:53:57Z", GoVersion:"go1.12.1", Compiler:"gc",
Platform:"linux/amd64"}
```
---
@@ -192,4 +192,4 @@ class: extra-details
]
We can now utilize the cluster exactly as if we're logged into a node, except that it's remote.
We can now utilize the cluster exactly as we did before, except that it's remote.

View File

@@ -62,12 +62,10 @@ Exactly what we need!
- The following commands will install Stern on a Linux Intel 64 bit machine:
```bash
sudo curl -L -o /usr/local/bin/stern \
https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64
https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64
sudo chmod +x /usr/local/bin/stern
```
- On OS X, just `brew install stern`
<!-- ##VERSION## -->
---

View File

@@ -1,8 +1,8 @@
# Checking pod and node resource usage
- Since Kubernetes 1.8, metrics are collected by the [resource metrics pipeline](https://kubernetes.io/docs/tasks/debug-application-cluster/resource-metrics-pipeline/)
- Since Kubernetes 1.8, metrics are collected by the [core metrics pipeline](https://v1-13.docs.kubernetes.io/docs/tasks/debug-application-cluster/core-metrics-pipeline/)
- The resource metrics pipeline is:
- The core metrics pipeline is:
- optional (Kubernetes can function without it)
@@ -37,7 +37,7 @@ If it shows our nodes and their CPU and memory load, we're good!
(it doesn't need persistence, as it doesn't *store* metrics)
- It has its own repository, [kubernetes-incubator/metrics-server](https://github.com/kubernetes-incubator/metrics-server)
- It has its own repository, [kubernetes-incubator/metrics-server](https://github.com/kubernetes-incubator/metrics-server])
- The repository comes with [YAML files for deployment](https://github.com/kubernetes-incubator/metrics-server/tree/master/deploy/1.8%2B)
@@ -59,7 +59,7 @@ If it shows our nodes and their CPU and memory load, we're good!
- Show resource usage across all containers:
```bash
kubectl top pods --containers --all-namespaces
kuebectl top pods --containers --all-namespaces
```
]

View File

@@ -195,7 +195,7 @@ class: extra-details
## Check our pods
- The pods will be scheduled on the nodes
- The pods will be scheduled to the nodes
- The nodes will pull the `nginx` image, and start the pods
@@ -218,18 +218,6 @@ class: extra-details
## What's going on?
- Without the `--network-plugin` flag, kubelet defaults to "no-op" networking
- It lets the container engine use a default network
(in that case, we end up with the default Docker bridge)
- Our pods are running on independent, disconnected, host-local networks
---
## What do we need to do?
- On a normal cluster, kubelet is configured to set up pod networking with CNI plugins
- This requires:
@@ -240,6 +228,14 @@ class: extra-details
- running kubelet with `--network-plugin=cni`
- Without the `--network-plugin` flag, kubelet defaults to "no-op" networking
- It lets the container engine use a default network
(in that case, we end up with the default Docker bridge)
- Our pods are running on independent, disconnected, host-local networks
---
## Using network plugins
@@ -329,7 +325,7 @@ class: extra-details
- We will add the `--network-plugin` and `--pod-cidr` flags
- We all have a "cluster number" (let's call that `C`) printed on your VM info card
- We all have a "cluster number" (let's call that `C`)
- We will use pod CIDR `10.C.N.0/24` (where `N` is the node number: 1, 2, 3)
@@ -398,7 +394,7 @@ class: extra-details
- Start kube-proxy:
```bash
sudo kube-proxy --kubeconfig ~/.kube/config
sudo kube-proxy --kubeconfig ~/kubeconfig
```
- Expose our Deployment:
@@ -484,23 +480,6 @@ Sometimes it works, sometimes it doesn't. Why?
```bash
kubectl get nodes -o wide
```
---
## Firewalling
- By default, Docker prevents containers from using arbitrary IP addresses
(by setting up iptables rules)
- We need to allow our containers to use our pod CIDR
- For simplicity, we will insert a blanket iptables rule allowing all traffic:
`iptables -I FORWARD -j ACCEPT`
- This has to be done on every node
---
## Setting up routing
@@ -509,8 +488,6 @@ Sometimes it works, sometimes it doesn't. Why?
- Create all the routes on all the nodes
- Insert the iptables rule allowing traffic
- Check that you can ping all the pods from one of the nodes
- Check that you can `curl` the ClusterIP of the Service successfully

View File

@@ -1,379 +0,0 @@
# OpenID Connect
- The Kubernetes API server can perform authentication with OpenID connect
- This requires an *OpenID provider*
(external authorization server using the OAuth 2.0 protocol)
- We can use a third-party provider (e.g. Google) or run our own (e.g. Dex)
- We are going to give an overview of the protocol
- We will show it in action (in a simplified scenario)
---
## Workflow overview
- We want to access our resources (a Kubernetes cluster)
- We authenticate with the OpenID provider
- we can do this directly (e.g. by going to https://accounts.google.com)
- or maybe a kubectl plugin can open a browser page on our behalf
- After authenticating us, the OpenID provider gives us:
- an *id token* (a short-lived signed JSON Web Token, see next slide)
- a *refresh token* (to renew the *id token* when needed)
- We can now issue requests to the Kubernetes API with the *id token*
- The API server will verify that token's content to authenticate us
---
## JSON Web Tokens
- A JSON Web Token (JWT) has three parts:
- a header specifying algorithms and token type
- a payload (indicating who issued the token, for whom, which purposes...)
- a signature generated by the issuer (the issuer = the OpenID provider)
- Anyone can verify a JWT without contacting the issuer
(except to obtain the issuer's public key)
- Pro tip: we can inspect a JWT with https://jwt.io/
---
## How the Kubernetes API uses JWT
- Server side
- enable OIDC authentication
- indicate which issuer (provider) should be allowed
- indicate which audience (or "client id") should be allowed
- optionally, map or prefix user and group names
- Client side
- obtain JWT as described earlier
- pass JWT as authentication token
- renew JWT when needed (using the refresh token)
---
## Demo time!
- We will use [Google Accounts](https://accounts.google.com) as our OpenID provider
- We will use the [Google OAuth Playground](https://developers.google.com/oauthplayground) as the "audience" or "client id"
- We will obtain a JWT through Google Accounts and the OAuth Playground
- We will enable OIDC in the Kubernetes API server
- We will use the JWT to authenticate
.footnote[If you can't or won't use a Google account, you can try to adapt this to another provider.]
---
## Checking the API server logs
- The API server logs will be particularly useful in this section
(they will indicate e.g. why a specific token is rejected)
- Let's keep an eye on the API server output!
.exercise[
- Tail the logs of the API server:
```bash
kubectl logs kube-apiserver-node1 --follow --namespace=kube-system
```
]
---
## Authenticate with the OpenID provider
- We will use the Google OAuth Playground for convenience
- In a real scenario, we would need our own OAuth client instead of the playground
(even if we were still using Google as the OpenID provider)
.exercise[
- Open the Google OAuth Playground:
```
https://developers.google.com/oauthplayground/
```
- Enter our own custom scope in the text field:
```
https://www.googleapis.com/auth/userinfo.email
```
- Click on "Authorize APIs" and allow the playground to access our email address
]
---
## Obtain our JSON Web Token
- The previous step gave us an "authorization code"
- We will use it to obtain tokens
.exercise[
- Click on "Exchange authorization code for tokens"
]
- The JWT is the very long `id_token` that shows up on the right hand side
(it is a base64-encoded JSON object, and should therefore start with `eyJ`)
---
## Using our JSON Web Token
- We need to create a context (in kubeconfig) for our token
(if we just add the token or use `kubectl --token`, our certificate will still be used)
.exercise[
- Create a new authentication section in kubeconfig:
```bash
kubectl config set-credentials myjwt --token=eyJ...
```
- Try to use it:
```bash
kubectl --user=myjwt get nodes
```
]
We should get an `Unauthorized` response, since we haven't enabled OpenID Connect in the API server yet. We should also see `invalid bearer token` in the API server log output.
---
## Enabling OpenID Connect
- We need to add a few flags to the API server configuration
- These two are mandatory:
`--oidc-issuer-url` → URL of the OpenID provider
`--oidc-client-id` → app requesting the authentication
<br/>(in our case, that's the ID for the Google OAuth Playground)
- This one is optional:
`--oidc-username-claim` → which field should be used as user name
<br/>(we will use the user's email address instead of an opaque ID)
- See the [API server documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server
) for more details about all available flags
---
## Updating the API server configuration
- The instructions below will work for clusters deployed with kubeadm
(or where the control plane is deployed in static pods)
- If your cluster is deployed differently, you will need to adapt them
.exercise[
- Edit `/etc/kubernetes/manifests/kube-apiserver.yaml`
- Add the following lines to the list of command-line flags:
```yaml
- --oidc-issuer-url=https://accounts.google.com
- --oidc-client-id=407408718192.apps.googleusercontent.com
- --oidc-username-claim=email
```
]
---
## Restarting the API server
- The kubelet monitors the files in `/etc/kubernetes/manifests`
- When we save the pod manifest, kubelet will restart the corresponding pod
(using the updated command line flags)
.exercise[
- After making the changes described on the previous slide, save the file
- Issue a simple command (like `kubectl version`) until the API server is back up
(it might take between a few seconds and one minute for the API server to restart)
- Restart the `kubectl logs` command to view the logs of the API server
]
---
## Using our JSON Web Token
- Now that the API server is set up to recognize our token, try again!
.exercise[
- Try an API command with our token:
```bash
kubectl --user=myjwt get nodes
kubectl --user=myjwt get pods
```
]
We should see a message like:
```
Error from server (Forbidden): nodes is forbidden: User "jean.doe@gmail.com"
cannot list resource "nodes" in API group "" at the cluster scope
```
→ We were successfully *authenticated*, but not *authorized*.
---
## Authorizing our user
- As an extra step, let's grant read access to our user
- We will use the pre-defined ClusterRole `view`
.exercise[
- Create a ClusterRoleBinding allowing us to view resources:
```bash
kubectl create clusterrolebinding i-can-view \
--user=`jean.doe@gmail.com` --clusterrole=view
```
(make sure to put *your* Google email address there)
- Confirm that we can now list pods with our token:
```bash
kubectl --user=myjwt get pods
```
]
---
## From demo to production
.warning[This was a very simplified demo! In a real deployment...]
- We wouldn't use the Google OAuth Playground
- We *probably* wouldn't even use Google at all
(it doesn't seem to provide a way to include groups!)
- Some popular alternatives:
- [Dex](https://github.com/dexidp/dex),
[Keycloak](https://www.keycloak.org/)
(self-hosted)
- [Okta](https://developer.okta.com/docs/how-to/creating-token-with-groups-claim/#step-five-decode-the-jwt-to-verify)
(SaaS)
- We would use a helper (like the [kubelogin](https://github.com/int128/kubelogin) plugin) to automatically obtain tokens
---
class: extra-details
## Service Account tokens
- The tokens used by Service Accounts are JWT tokens as well
- They are signed and verified using a special service account key pair
.exercise[
- Extract the token of a service account in the current namespace:
```bash
kubectl get secrets -o jsonpath={..token} | base64 -d
```
- Copy-paste the token to a verification service like https://jwt.io
- Notice that it says "Invalid Signature"
]
---
class: extra-details
## Verifying Service Account tokens
- JSON Web Tokens embed the URL of the "issuer" (=OpenID provider)
- The issuer provides its public key through a well-known discovery endpoint
(similar to https://accounts.google.com/.well-known/openid-configuration)
- There is no such endpoint for the Service Account key pair
- But we can provide the public key ourselves for verification
---
class: extra-details
## Verifying a Service Account token
- On clusters provisioned with kubeadm, the Service Account key pair is:
`/etc/kubernetes/pki/sa.key` (used by the controller manager to generate tokens)
`/etc/kubernetes/pki/sa.pub` (used by the API server to validate the same tokens)
.exercise[
- Display the public key used to sign Service Account tokens:
```bash
sudo cat /etc/kubernetes/pki/sa.pub
```
- Copy-paste the key in the "verify signature" area on https://jwt.io
- It should now say "Signature Verified"
]

View File

@@ -32,7 +32,7 @@
- must be able to anticipate all the events that might happen
- design will be better only to the extent of what we anticipated
- design will be better only to the extend of what we anticipated
- hard to anticipate if we don't have production experience
@@ -86,7 +86,7 @@ class: extra-details
## What can we store via the Kubernetes API?
- The API server stores most Kubernetes resources in etcd
- The API server stores most Kubernetes resources into etcd
- Etcd is designed for reliability, not for performance
@@ -187,8 +187,6 @@ class: extra-details
[Intro talk](https://www.youtube.com/watch?v=8k_ayO1VRXE)
|
[Deep dive talk](https://www.youtube.com/watch?v=fu7ecA2rXmc)
|
[Simple example](https://medium.com/faun/writing-your-first-kubernetes-operator-8f3df4453234)
- Zalando Kubernetes Operator Pythonic Framework (KOPF)

View File

@@ -302,7 +302,7 @@ Now, the StorageClass should have `(default)` next to its name.
- Retrieve the NodePort that was allocated:
```bash
kubectl get svc cerebro-es
kubectl get svc cerebreo-es
```
- Connect to that port with a browser
@@ -386,6 +386,4 @@ We should see at least one index being created in cerebro.
- What if we want different images or parameters for the different nodes?
*Operators can be very powerful.
<br/>
But we need to know exactly the scenarios that they can handle.*
*Operators can be very powerful, iff we know exactly the scenarios that they can handle.*

View File

@@ -11,36 +11,16 @@
- Deploy everything else:
```bash
kubectl create deployment hasher --image=dockercoins/hasher:v0.1
kubectl create deployment rng --image=dockercoins/rng:v0.1
kubectl create deployment webui --image=dockercoins/webui:v0.1
kubectl create deployment worker --image=dockercoins/worker:v0.1
set -u
for SERVICE in hasher rng webui worker; do
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
done
```
]
---
class: extra-details
## Deploying other images
- If we wanted to deploy images from another registry ...
- ... Or with a different tag ...
- ... We could use the following snippet:
```bash
REGISTRY=dockercoins
TAG=v0.1
for SERVICE in hasher rng webui worker; do
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
done
```
---
## Is this working?
- After waiting for the deployment to complete, let's look at the logs!

View File

@@ -1,4 +1,4 @@
# Highly available Persistent Volumes
# Highly available Persistent Volumes (extra material)
- How can we achieve true durability?

View File

@@ -60,11 +60,9 @@
(by default: every minute; can be more/less frequent)
- The list of URLs to scrape (the *scrape targets*) is defined in configuration
- If you're worried about parsing overhead: exporters can also use protobuf
.footnote[Worried about the overhead of parsing a text format?
<br/>
Check this [comparison](https://github.com/RichiH/OpenMetrics/blob/master/markdown/protobuf_vs_text.md) of the text format with the (now deprecated) protobuf format!]
- The list of URLs to scrape (the *scrape targets*) is defined in configuration
---

View File

@@ -1,169 +0,0 @@
# Recording deployment actions
- Some commands that modify a Deployment accept an optional `--record` flag
(Example: `kubectl set image deployment worker worker=alpine --record`)
- That flag will store the command line in the Deployment
(Technically, using the annotation `kubernetes.io/change-cause`)
- It gets copied to the corresponding ReplicaSet
(Allowing to keep track of which command created or promoted this ReplicaSet)
- We can view this information with `kubectl rollout history`
---
## Using `--record`
- Let's make a couple of changes to a Deployment and record them
.exercise[
- Roll back `worker` to image version 0.1:
```bash
kubectl set image deployment worker worker=dockercoins/worker:v0.1 --record
```
- Promote it to version 0.2 again:
```bash
kubectl set image deployment worker worker=dockercoins/worker:v0.2 --record
```
- View the change history:
```bash
kubectl rollout history deployment worker
```
]
---
## Pitfall #1: forgetting `--record`
- What happens if we don't specify `--record`?
.exercise[
- Promote `worker` to image version 0.3:
```bash
kubectl set image deployment worker worker=dockercoins/worker:v0.3
```
- View the change history:
```bash
kubectl rollout history deployment worker
```
]
--
It recorded version 0.2 instead of 0.3! Why?
---
## How `--record` really works
- `kubectl` adds the annotation `kubernetes.io/change-cause` to the Deployment
- The Deployment controller copies that annotation to the ReplicaSet
- `kubectl rollout history` shows the ReplicaSets' annotations
- If we don't specify `--record`, the annotation is not updated
- The previous value of that annotation is copied to the new ReplicaSet
- In that case, the ReplicaSet annotation does not reflect reality!
---
## Pitfall #2: recording `scale` commands
- What happens if we use `kubectl scale --record`?
.exercise[
- Check the current history:
```bash
kubectl rollout history deployment worker
```
- Scale the deployment:
```bash
kubectl scale deployment worker --replicas=3 --record
```
- Check the change history again:
```bash
kubectl rollout history deployment worker
```
]
--
The last entry in the history was overwritten by the `scale` command! Why?
---
## Actions that don't create a new ReplicaSet
- The `scale` command updates the Deployment definition
- But it doesn't create a new ReplicaSet
- Using the `--record` flag sets the annotation like before
- The annotation gets copied to the existing ReplicaSet
- This overwrites the previous annotation that was there
- In that case, we lose the previous change cause!
---
## Updating the annotation directly
- Let's see what happens if we set the annotation manually
.exercise[
- Annotate the Deployment:
```bash
kubectl annotate deployment worker kubernetes.io/change-cause="Just for fun"
```
- Check that our annotation shows up in the change history:
```bash
kubectl rollout history deployment worker
```
]
--
Our annotation shows up (and overwrote whatever was there before).
---
## Using change cause
- It sounds like a good idea to use `--record`, but:
*"Incorrect documentation is often worse than no documentation."*
<br/>
(Bertrand Meyer)
- If we use `--record` once, we need to either:
- use it every single time after that
- or clear the Deployment annotation after using `--record`
<br/>
(subsequent changes will show up with a `<none>` change cause)
- A safer way is to set it through our tooling

View File

@@ -404,7 +404,7 @@ These quotas will apply to the namespace where the ResourceQuota is created.
- Example:
```bash
kubectl create quota my-resource-quota --hard=pods=300,limits.memory=300Gi
kubectl create quota sparta --hard=pods=300,limits.memory=300Gi
```
- With both YAML and CLI form, the values are always under the `hard` section
@@ -515,24 +515,3 @@ services.nodeports 0 0
(with `kubectl describe resourcequota ...`)
- Rinse and repeat regularly
---
## Additional resources
- [A Practical Guide to Setting Kubernetes Requests and Limits](http://blog.kubecost.com/blog/requests-and-limits/)
- explains what requests and limits are
- provides guidelines to set requests and limits
- gives PromQL expressions to compute good values
<br/>(our app needs to be running for a while)
- [Kube Resource Report](https://github.com/hjacobs/kube-resource-report/)
- generates web reports on resource usage
- [static demo](https://hjacobs.github.io/kube-resource-report/sample-report/output/index.html)
|
[live demo](https://kube-resource-report.demo.j-serv.de/applications.html)

View File

@@ -14,27 +14,7 @@
## Rolling updates
- With rolling updates, when a Deployment is updated, it happens progressively
- The Deployment controls multiple Replica Sets
- Each Replica Set is a group of identical Pods
(with the same image, arguments, parameters ...)
- During the rolling update, we have at least two Replica Sets:
- the "new" set (corresponding to the "target" version)
- at least one "old" set
- We can have multiple "old" sets
(if we start another update before the first one is done)
---
## Update strategy
- With rolling updates, when a resource is updated, it happens progressively
- Two parameters determine the pace of the rollout: `maxUnavailable` and `maxSurge`
@@ -81,6 +61,32 @@
---
## Building a new version of the `worker` service
.warning[
Only run these commands if you have built and pushed DockerCoins to a local registry.
<br/>
If you are using images from the Docker Hub (`dockercoins/worker:v0.1`), skip this.
]
.exercise[
- Go to the `stacks` directory (`~/container.training/stacks`)
- Edit `dockercoins/worker/worker.py`; update the first `sleep` line to sleep 1 second
- Build a new tag and push it to the registry:
```bash
#export REGISTRY=localhost:3xxxx
export TAG=v0.2
docker-compose -f dockercoins.yml build
docker-compose -f dockercoins.yml push
```
]
---
## Rolling out the new `worker` service
.exercise[
@@ -99,7 +105,7 @@
- Update `worker` either with `kubectl edit`, or by running:
```bash
kubectl set image deploy worker worker=dockercoins/worker:v0.2
kubectl set image deploy worker worker=$REGISTRY/worker:$TAG
```
]
@@ -140,7 +146,8 @@ That rollout should be pretty quick. What shows in the web UI?
- Update `worker` by specifying a non-existent image:
```bash
kubectl set image deploy worker worker=dockercoins/worker:v0.3
export TAG=v0.3
kubectl set image deploy worker worker=$REGISTRY/worker:$TAG
```
- Check what's going on:
@@ -209,14 +216,27 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
.exercise[
- Connect to the dashboard that we deployed earlier
- Check that we have failures in Deployments, Pods, and Replica Sets
- Can we see the reason for the failure?
- Check which port the dashboard is on:
```bash
kubectl -n kube-system get svc socat
```
]
Note the `3xxxx` port.
.exercise[
- Connect to http://oneofournodes:3xxxx/
<!-- ```open https://node1:3xxxx/``` -->
]
--
- We have failures in Deployments, Pods, and Replica Sets
---
## Recovering from a bad rollout
@@ -245,139 +265,6 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
---
## Rolling back to an older version
- We reverted to `v0.2`
- But this version still has a performance problem
- How can we get back to the previous version?
---
## Multiple "undos"
- What happens if we try `kubectl rollout undo` again?
.exercise[
- Try it:
```bash
kubectl rollout undo deployment worker
```
- Check the web UI, the list of pods ...
]
🤔 That didn't work.
---
## Multiple "undos" don't work
- If we see successive versions as a stack:
- `kubectl rollout undo` doesn't "pop" the last element from the stack
- it copies the N-1th element to the top
- Multiple "undos" just swap back and forth between the last two versions!
.exercise[
- Go back to v0.2 again:
```bash
kubectl rollout undo deployment worker
```
]
---
## In this specific scenario
- Our version numbers are easy to guess
- What if we had used git hashes?
- What if we had changed other parameters in the Pod spec?
---
## Listing versions
- We can list successive versions of a Deployment with `kubectl rollout history`
.exercise[
- Look at our successive versions:
```bash
kubectl rollout history deployment worker
```
]
We don't see *all* revisions.
We might see something like 1, 4, 5.
(Depending on how many "undos" we did before.)
---
## Explaining deployment revisions
- These revisions correspond to our Replica Sets
- This information is stored in the Replica Set annotations
.exercise[
- Check the annotations for our replica sets:
```bash
kubectl describe replicasets -l app=worker | grep -A3
```
]
---
class: extra-details
## What about the missing revisions?
- The missing revisions are stored in another annotation:
`deployment.kubernetes.io/revision-history`
- These are not shown in `kubectl rollout history`
- We could easily reconstruct the full list with a script
(if we wanted to!)
---
## Rolling back to an older version
- `kubectl rollout undo` can work with a revision number
.exercise[
- Roll back to the "known good" deployment version:
```bash
kubectl rollout undo deployment worker --to-revision=1
```
- Check the web UI or the list of pods
]
---
class: extra-details
## Changing rollout parameters
- We want to:
@@ -396,7 +283,7 @@ spec:
spec:
containers:
- name: worker
image: dockercoins/worker:v0.1
image: $REGISTRY/worker:v0.1
strategy:
rollingUpdate:
maxUnavailable: 0
@@ -407,8 +294,6 @@ spec:
---
class: extra-details
## Applying changes through a YAML patch
- We could use `kubectl edit deployment worker`
@@ -427,7 +312,7 @@ class: extra-details
spec:
containers:
- name: worker
image: dockercoins/worker:v0.1
image: $REGISTRY/worker:v0.1
strategy:
rollingUpdate:
maxUnavailable: 0

View File

@@ -61,8 +61,7 @@
- [minikube](https://kubernetes.io/docs/setup/minikube/),
[kubespawn](https://github.com/kinvolk/kube-spawn),
[Docker Desktop](https://docs.docker.com/docker-for-mac/kubernetes/),
[kind](https://kind.sigs.k8s.io):
[Docker Desktop](https://docs.docker.com/docker-for-mac/kubernetes/):
for local development
- [kubicorn](https://github.com/kubicorn/kubicorn),

View File

@@ -18,7 +18,7 @@ with a cloud provider
---
## EKS (the old way)
## EKS (the hard way)
- [Read the doc](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html)
@@ -36,7 +36,7 @@ with a cloud provider
---
## EKS (the new way)
## EKS (the easy way)
- Install `eksctl`
@@ -144,7 +144,7 @@ with a cloud provider
az login
```
- Select a [region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=kubernetes-service&regions=all
- Select a [region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=kubernetes-service\&regions=all
)
- Create a "resource group":
@@ -168,7 +168,7 @@ with a cloud provider
az aks get-credentials --resource-group my-aks-group --name my-aks-cluster
```
- The cluster has useful components pre-installed, such as the metrics server
- The cluster has a lot of goodies pre-installed
---
@@ -224,7 +224,7 @@ with a cloud provider
kubectl config use-context do-xxx1-my-do-cluster
```
- The cluster comes with some components (like Cilium) but no metrics server
- The cluster comes with some goodies (like Cilium) but no metrics server
---

View File

@@ -80,8 +80,6 @@
- Docker Enterprise Edition
- [AKS Engine](https://github.com/Azure/aks-engine)
- Pivotal Container Service (PKS)
- Tectonic by CoreOS

View File

@@ -345,7 +345,7 @@ spec:
we figure out the minimal command-line to run our Consul cluster.*
```
consul agent -data-dir=/consul/data -client=0.0.0.0 -server -ui \
consul agent -data=dir=/consul/data -client=0.0.0.0 -server -ui \
-bootstrap-expect=3 \
-retry-join=`X.X.X.X` \
-retry-join=`Y.Y.Y.Y`

View File

@@ -224,7 +224,7 @@ In the manifest, the pod was named `hello`.
```yaml
apiVersion: v1
kind: Pod
Kind: Pod
metadata:
name: hello
namespace: default

View File

@@ -1,8 +1,8 @@
## Versions installed
- Kubernetes 1.15.3
- Docker Engine 19.03.1
- Docker Compose 1.24.1
- Kubernetes 1.14.2
- Docker Engine 18.09.6
- Docker Compose 1.21.1
<!-- ##VERSION## -->
@@ -23,7 +23,7 @@ class: extra-details
## Kubernetes and Docker compatibility
- Kubernetes 1.15 validates Docker Engine versions [up to 18.09](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.15.md#dependencies)
- Kubernetes 1.14 validates Docker Engine versions [up to 18.09](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md#external-dependencies)
<br/>
(the latest version when Kubernetes 1.14 was released)

View File

@@ -66,87 +66,7 @@ class: extra-details
---
## Adding a volume to a Pod
- We will start with the simplest Pod manifest we can find
- We will add a volume to that Pod manifest
- We will mount that volume in a container in the Pod
- By default, this volume will be an `emptyDir`
(an empty directory)
- It will "shadow" the directory where it's mounted
---
## Our basic Pod
```yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx-without-volume
spec:
containers:
- name: nginx
image: nginx
```
This is a MVP! (Minimum Viable Pod😉)
It runs a single NGINX container.
---
## Trying the basic pod
.exercise[
- Create the Pod:
```bash
kubectl create -f ~/container.training/k8s/nginx-1-without-volume.yaml
```
- Get its IP address:
```bash
IPADDR=$(kubectl get pod nginx-without-volume -o jsonpath={.status.podIP})
```
- Send a request with curl:
```bash
curl $IPADDR
```
]
(We should see the "Welcome to NGINX" page.)
---
## Adding a volume
- We need to add the volume in two places:
- at the Pod level (to declare the volume)
- at the container level (to mount the volume)
- We will declare a volume named `www`
- No type is specified, so it will default to `emptyDir`
(as the name implies, it will be initialized as an empty directory at pod creation)
- In that pod, there is also a container named `nginx`
- That container mounts the volume `www` to path `/usr/share/nginx/html/`
---
## The Pod with a volume
## A simple volume example
```yaml
apiVersion: v1
@@ -166,57 +86,30 @@ spec:
---
## Trying the Pod with a volume
## A simple volume example, explained
.exercise[
- We define a standalone `Pod` named `nginx-with-volume`
- Create the Pod:
```bash
kubectl create -f ~/container.training/k8s/nginx-2-with-volume.yaml
```
- In that pod, there is a volume named `www`
- Get its IP address:
```bash
IPADDR=$(kubectl get pod nginx-with-volume -o jsonpath={.status.podIP})
```
- No type is specified, so it will default to `emptyDir`
- Send a request with curl:
```bash
curl $IPADDR
```
(as the name implies, it will be initialized as an empty directory at pod creation)
]
- In that pod, there is also a container named `nginx`
(We should now see a "403 Forbidden" error page.)
- That container mounts the volume `www` to path `/usr/share/nginx/html/`
---
## Populating the volume with another container
- Let's add another container to the Pod
- Let's mount the volume in *both* containers
- That container will populate the volume with static files
- NGINX will then serve these static files
- To populate the volume, we will clone the Spoon-Knife repository
- this repository is https://github.com/octocat/Spoon-Knife
- it's very popular (more than 100K stars!)
---
## Sharing a volume between two containers
## A volume shared between two containers
.small[
```yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-git
name: nginx-with-volume
spec:
volumes:
- name: www
@@ -254,72 +147,30 @@ spec:
---
## Trying the shared volume
## Sharing a volume, in action
- This one will be time-sensitive!
- We need to catch the Pod IP address *as soon as it's created*
- Then send a request to it *as fast as possible*
- Let's try it!
.exercise[
- Watch the pods (so that we can catch the Pod IP address)
- Create the pod by applying the YAML file:
```bash
kubectl get pods -o wide --watch
kubectl apply -f ~/container.training/k8s/nginx-with-volume.yaml
```
]
---
## Shared volume in action
.exercise[
- Create the pod:
- Check the IP address that was allocated to our pod:
```bash
kubectl create -f ~/container.training/k8s/nginx-3-with-git.yaml
kubectl get pod nginx-with-volume -o wide
IP=$(kubectl get pod nginx-with-volume -o json | jq -r .status.podIP)
```
- As soon as we see its IP address, access it:
```bash
curl $IP
```
- A few seconds later, the state of the pod will change; access it again:
- Access the web server:
```bash
curl $IP
```
]
The first time, we should see "403 Forbidden".
The second time, we should see the HTML file from the Spoon-Knife repository.
---
## Explanations
- Both containers are started at the same time
- NGINX starts very quickly
(it can serve requests immediately)
- But at this point, the volume is empty
(NGINX serves "403 Forbidden")
- The other containers installs git and clones the repository
(this takes a bit longer)
- When the other container is done, the volume holds the repository
(NGINX serves the HTML file)
---
## The devil is in the details
@@ -332,100 +183,13 @@ The second time, we should see the HTML file from the Spoon-Knife repository.
- That's why we specified `restartPolicy: OnFailure`
---
## Inconsistencies
- There is a short period of time during which the website is not available
(because the `git` container hasn't done its job yet)
- With a bigger website, we could get inconsistent results
- This could be avoided by using [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
(where only a part of the content is ready)
- In real applications, this could cause incorrect results
- How can we avoid that?
---
## Init Containers
- We can define containers that should execute *before* the main ones
- They will be executed in order
(instead of in parallel)
- They must all succeed before the main containers are started
- This is *exactly* what we need here!
- Let's see one in action
.footnote[See [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) documentation for all the details.]
---
## Defining Init Containers
.small[
```yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-init
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
initContainers:
- name: git
image: alpine
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/
```
]
---
## Trying the init container
- Repeat the same operation as earlier
(try to send HTTP requests as soon as the pod comes up)
- This time, instead of "403 Forbidden" we get a "connection refused"
- NGINX doesn't start until the git container has done its job
- We never get inconsistent results
(a "half-ready" container)
---
## Other uses of init containers
- Load content
- Generate configuration (or certificates)
- Database migrations
- Waiting for other services to be up
(to avoid flurry of connection errors in main container)
- etc.
(we will see a live example in a few sections)
---

View File

@@ -1,93 +0,0 @@
# Deploying with YAML
- So far, we created resources with the following commands:
- `kubectl run`
- `kubectl create deployment`
- `kubectl expose`
- We can also create resources directly with YAML manifests
---
## `kubectl apply` vs `create`
- `kubectl create -f whatever.yaml`
- creates resources if they don't exist
- if resources already exist, don't alter them
<br/>(and display error message)
- `kubectl apply -f whatever.yaml`
- creates resources if they don't exist
- if resources already exist, update them
<br/>(to match the definition provided by the YAML file)
- stores the manifest as an *annotation* in the resource
---
## Creating multiple resources
- The manifest can contain multiple resources separated by `---`
```yaml
kind: ...
apiVersion: ...
metadata: ...
name: ...
...
---
kind: ...
apiVersion: ...
metadata: ...
name: ...
...
```
---
## Creating multiple resources
- The manifest can also contain a list of resources
```yaml
apiVersion: v1
kind: List
items:
- kind: ...
apiVersion: ...
...
- kind: ...
apiVersion: ...
...
```
---
## Deploying dockercoins with YAML
- We provide a YAML manifest with all the resources for Dockercoins
(Deployments and Services)
- We can use it if we need to deploy or redeploy Dockercoins
.exercise[
- Deploy or redeploy Dockercoins:
```bash
kubectl apply -f ~/container.training/k8s/dockercoins.yaml
```
]
(If we deployed Dockercoins earlier, we will see warning messages,
because the resources that we created lack the necessary annotation.
We can safely ignore them.)

View File

@@ -1,95 +0,0 @@
title: |
Kubernetes
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: http://kube-2019-11.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
-
- shared/prereqs.md
#- shared/webssh.md
- shared/connecting.md
#- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
-
- k8s/kubectlrun.md
- k8s/logs-cli.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/kubenet.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
-
- k8s/yamldeploy.md
- k8s/setup-k8s.md
#- k8s/dashboard.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
#- k8s/dryrun.md
#- k8s/kubectlproxy.md
#- k8s/localkubeconfig.md
#- k8s/accessinternal.md
- k8s/rollout.md
#- k8s/healthchecks.md
#- k8s/healthchecks-more.md
#- k8s/record.md
-
- k8s/namespaces.md
- k8s/ingress.md
#- k8s/kustomize.md
#- k8s/helm.md
#- k8s/create-chart.md
#- k8s/netpol.md
#- k8s/authn-authz.md
#- k8s/csr-api.md
#- k8s/openid-connect.md
#- k8s/podsecuritypolicy.md
- k8s/volumes.md
#- k8s/build-with-docker.md
#- k8s/build-with-kaniko.md
- k8s/configuration.md
#- k8s/logs-centralized.md
#- k8s/prometheus.md
#- k8s/statefulsets.md
#- k8s/local-persistent-volumes.md
#- k8s/portworx.md
#- k8s/extending-api.md
#- k8s/operators.md
#- k8s/operators-design.md
#- k8s/staticpods.md
#- k8s/owners-and-dependents.md
#- k8s/gitworkflows.md
-
- k8s/whatsnext.md
- k8s/links.md
- shared/thankyou.md
- |
class: pic
![Enix High Five](https://2019-hi5.netlify.com/hi5.png)

View File

@@ -1,70 +0,0 @@
title: |
Kubernetes 101
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
chapters:
- shared/title.md
#- logistics.md
# Bridget-specific; others use logistics.md
- logistics-bridget.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
#- shared/webssh.md
- shared/connecting.md
- k8s/versions-k8s.md
- shared/sampleapp.md
# Bridget doesn't go into as much depth with compose
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- shared/declarative.md
- k8s/declarative.md
- k8s/kubenet.md
- k8s/kubectlget.md
- k8s/setup-k8s.md
- - k8s/kubectlrun.md
- k8s/deploymentslideshow.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
#- k8s/kubectlproxy.md
#- k8s/localkubeconfig.md
#- k8s/accessinternal.md
- - k8s/dashboard.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/rollout.md
#- k8s/record.md
- - k8s/logs-cli.md
# Bridget hasn't added EFK yet
#- k8s/logs-centralized.md
- k8s/namespaces.md
- k8s/helm.md
- k8s/create-chart.md
#- k8s/kustomize.md
#- k8s/netpol.md
- k8s/whatsnext.md
# - k8s/links.md
# Bridget-specific
- k8s/links-bridget.md
- shared/thankyou.md

View File

@@ -1,99 +0,0 @@
title: |
Deploying and Scaling Microservices
with Docker and Kubernetes
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- in-person
chapters:
- shared/title.md
#- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
-
- shared/prereqs.md
#- shared/webssh.md
- shared/connecting.md
- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
-
- k8s/kubectlrun.md
- k8s/logs-cli.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/kubenet.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
-
- k8s/yamldeploy.md
- k8s/setup-k8s.md
- k8s/dashboard.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/dryrun.md
- k8s/kubectlproxy.md
- k8s/localkubeconfig.md
- k8s/accessinternal.md
-
- k8s/rollout.md
- k8s/healthchecks.md
- k8s/healthchecks-more.md
- k8s/record.md
-
- k8s/namespaces.md
- k8s/ingress.md
- k8s/kustomize.md
- k8s/helm.md
- k8s/create-chart.md
-
- k8s/netpol.md
- k8s/authn-authz.md
-
- k8s/csr-api.md
- k8s/openid-connect.md
- k8s/podsecuritypolicy.md
-
- k8s/volumes.md
- k8s/build-with-docker.md
- k8s/build-with-kaniko.md
- k8s/configuration.md
-
- k8s/logs-centralized.md
- k8s/prometheus.md
-
- k8s/statefulsets.md
- k8s/local-persistent-volumes.md
- k8s/portworx.md
-
- k8s/extending-api.md
- k8s/operators.md
- k8s/operators-design.md
- k8s/staticpods.md
- k8s/owners-and-dependents.md
- k8s/gitworkflows.md
-
- k8s/whatsnext.md
- k8s/links.md
- shared/thankyou.md

View File

@@ -1,96 +0,0 @@
title: |
Deploying and Scaling Microservices
with Kubernetes
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
-
- shared/prereqs.md
#- shared/webssh.md
- shared/connecting.md
#- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
#- shared/hastyconclusions.md
- shared/composedown.md
- k8s/concepts-k8s.md
- k8s/kubectlget.md
-
- k8s/kubectlrun.md
- k8s/logs-cli.md
- shared/declarative.md
- k8s/declarative.md
- k8s/deploymentslideshow.md
- k8s/kubenet.md
- k8s/kubectlexpose.md
- k8s/shippingimages.md
#- k8s/buildshiprun-selfhosted.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
-
- k8s/yamldeploy.md
#- k8s/setup-k8s.md
- k8s/dashboard.md
#- k8s/kubectlscale.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- k8s/dryrun.md
-
#- k8s/kubectlproxy.md
- k8s/localkubeconfig.md
- k8s/accessinternal.md
- k8s/rollout.md
- k8s/healthchecks.md
#- k8s/healthchecks-more.md
- k8s/record.md
-
- k8s/namespaces.md
- k8s/ingress.md
- k8s/kustomize.md
- k8s/helm.md
- k8s/create-chart.md
-
- k8s/netpol.md
- k8s/authn-authz.md
#- k8s/csr-api.md
#- k8s/openid-connect.md
#- k8s/podsecuritypolicy.md
-
- k8s/volumes.md
#- k8s/build-with-docker.md
#- k8s/build-with-kaniko.md
- k8s/configuration.md
- k8s/logs-centralized.md
- k8s/prometheus.md
-
- k8s/statefulsets.md
- k8s/local-persistent-volumes.md
- k8s/portworx.md
#- k8s/extending-api.md
#- k8s/operators.md
#- k8s/operators-design.md
#- k8s/staticpods.md
#- k8s/owners-and-dependents.md
#- k8s/gitworkflows.md
-
- k8s/whatsnext.md
- k8s/links.md
- shared/thankyou.md

View File

@@ -1,13 +1,19 @@
## Intros
- Hello! I'm Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
- Hello! We are:
- The workshop will run from 9:00 to 17:00
- .emoji[👷🏻‍♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Tiny Shell Script LLC)
- There will be a lunch break at 12:30
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Ardan Labs LLC)
(And coffee breaks around 10:30 and 15:30)
- The training will run from 9am to 5pm
- There will be a lunch break
(And coffee breaks!)
- Feel free to interrupt for questions at any time
- *Especially when you see full screen container pictures!*
- Live feedback, questions, help: @@CHAT@@

View File

@@ -80,7 +80,7 @@ def flatten(titles):
def generatefromyaml(manifest, filename):
manifest = yaml.safe_load(manifest)
manifest = yaml.load(manifest)
markdown, titles = processchapter(manifest["chapters"], filename)
logging.debug("Found {} titles.".format(len(titles)))
@@ -111,7 +111,6 @@ def generatefromyaml(manifest, filename):
html = html.replace("@@GITREPO@@", manifest["gitrepo"])
html = html.replace("@@SLIDES@@", manifest["slides"])
html = html.replace("@@TITLE@@", manifest["title"].replace("\n", " "))
html = html.replace("@@SLIDENUMBERPREFIX@@", manifest.get("slidenumberprefix", ""))
return html

61
slides/sfsf.yml Normal file
View File

@@ -0,0 +1,61 @@
title: |
Kubernetes
Advanced
Training
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/jpetazzo/training-20190613-sanfrancisco)"
#chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: http://sfsf-2019-06.container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
# DAY 1
- - k8s/prereqs-admin.md
- k8s/architecture.md
- k8s/deploymentslideshow.md
- k8s/dmuc.md
- - k8s/multinode.md
- k8s/cni.md
- k8s/apilb.md
- - k8s/kubercoins.md
- k8s/logs-cli.md
- k8s/logs-centralized.md
- k8s/healthchecks.md
- k8s/healthchecks-more.md
- - k8s/volumes.md
- k8s/configuration.md
- k8s/statefulsets.md
- k8s/local-persistent-volumes.md
- k8s/portworx.md
# DAY 2
- - k8s/namespaces.md
- k8s/kustomize.md
- k8s/helm.md
- k8s/create-chart.md
- k8s/create-more-charts.md
- - k8s/extending-api.md
- k8s/operators.md
- k8s/operators-design.md
- k8s/owners-and-dependents.md
- - k8s/authn-authz.md
- k8s/control-plane-auth.md
- k8s/prometheus.md
- - k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/horizontal-pod-autoscaler.md
# CONCLUSION
- - k8s/lastwords-admin.md
- k8s/links.md
- shared/thankyou.md

View File

@@ -4,12 +4,7 @@ class: in-person
.exercise[
- Log into the first VM (`node1`) with your SSH client:
```bash
ssh `user`@`A.B.C.D`
```
(Replace `user` and `A.B.C.D` with the user and IP address provided to you)
- Log into the first VM (`node1`) with your SSH client
<!--
```bash
@@ -23,13 +18,16 @@ done
```
-->
- Check that you can SSH (without password) to `node2`:
```bash
ssh node2
```
- Type `exit` or `^D` to come back to `node1`
<!-- ```bash exit``` -->
]
You should see a prompt looking like this:
```
[A.B.C.D] (...) user@node1 ~
$
```
If anything goes wrong — ask for help!
---
@@ -54,20 +52,6 @@ If anything goes wrong — ask for help!
---
## For a consistent Kubernetes experience ...
- If you are using your own Kubernetes cluster, you can use [shpod](https://github.com/jpetazzo/shpod)
- `shpod` provides a shell running in a pod on your own cluster
- It comes with many tools pre-installed (helm, stern...)
- These tools are used in many exercises in these slides
- `shpod` also gives you completion and a fancy prompt
---
class: self-paced
## Get your own Docker nodes

View File

@@ -50,6 +50,10 @@ Misattributed to Benjamin Franklin
- Go to @@SLIDES@@ to view these slides
- Join the chat room: @@CHAT@@
<!-- ```open @@SLIDES@@``` -->
]
---

View File

@@ -1,29 +0,0 @@
## WebSSH
- The virtual machines are also accessible via WebSSH
- This can be useful if:
- you can't install an SSH client on your machine
- SSH connections are blocked (by firewall or local policy)
- To use WebSSH, connect to the IP address of the remote VM on port 1080
(each machine runs a WebSSH server)
- Then provide the login and password indicated on your card
---
## Good to know
- WebSSH uses WebSocket
- If you're having connections issues, try to disable your HTTP proxy
(many HTTP proxies can't handle WebSocket properly)
- Most keyboard shortcuts should work, except Ctrl-W
(as it is hardwired by the browser to "close this tab")

View File

@@ -109,8 +109,8 @@ div.pic p {
div.pic img {
display: block;
margin: auto;
max-width: 100%;
max-height: 100%;
max-width: 1210px;
max-height: 550px;
}
div.pic h1, div.pic h2, div.title h1, div.title h2 {
text-align: center;

View File

@@ -28,7 +28,6 @@
var slideshow = remark.create({
ratio: '16:9',
highlightSpans: true,
slideNumberFormat: '@@SLIDENUMBERPREFIX@@%current%/%total%',
excludedClasses: [@@EXCLUDE@@]
});
</script>