Compare commits

..

13 Commits

Author SHA1 Message Date
Jerome Petazzoni
08fa37dace fix-redirects.sh: adding forced redirect 2020-04-07 16:57:19 -05:00
Jerome Petazzoni
807028cbf3 Remove WiFi warning 2019-06-13 10:51:44 -05:00
Jerome Petazzoni
dfde597cb9 Merge branch 'master' into sfsf-2019-06 2019-06-13 10:51:22 -05:00
Jerome Petazzoni
96419c6baf test→node 2019-06-12 21:35:12 -05:00
Jerome Petazzoni
12da011f21 Customize logistics etc 2019-06-12 21:13:00 -05:00
Jerome Petazzoni
fa1637fb7e Add Helm charts and reorg content 2019-06-12 21:07:55 -05:00
Jerome Petazzoni
fbe2251e21 Merge remote-tracking branch 'origin/make-chart' into sfsf-2019-06 2019-06-12 21:07:12 -05:00
Jerome Petazzoni
b4faf10581 merge 2019-06-12 16:43:24 -05:00
Jerome Petazzoni
0ef9c87f97 Merge branch 'master' into sfsf-2019-06 2019-06-12 16:04:36 -05:00
Jerome Petazzoni
addd14582a merge 2019-06-09 18:41:04 -05:00
Jerome Petazzoni
5299fbaab5 merge 2019-06-02 19:32:20 -05:00
Jerome Petazzoni
398ff5ee4f merge 2019-06-02 16:48:30 -05:00
Jerome Petazzoni
b883e6d557 Prepare SFSF training 2019-06-02 16:47:53 -05:00
134 changed files with 1337 additions and 8359 deletions

2
.gitignore vendored
View File

@@ -3,12 +3,10 @@
*~
prepare-vms/tags
prepare-vms/infra
prepare-vms/www
slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html
slides/past.html
slides/slides.zip
node_modules
### macOS ###

View File

@@ -39,7 +39,7 @@ your own tutorials.
All these materials have been gathered in a single repository
because they have a few things in common:
- some [shared slides](slides/shared/) that are re-used
- some [common slides](slides/common/) that are re-used
(and updated) identically between different decks;
- a [build system](slides/) generating HTML slides from
Markdown source files;

View File

@@ -1,21 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
annotations:
traefik.ingress.kubernetes.io/service-weights: |
whatever: 90%
whatever-new: 10%
spec:
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 80
- path: /
backend:
serviceName: whatever-new
servicePort: 80

View File

@@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: consul
labels:
app: consul
rules:
- apiGroups: [""]
resources:
@@ -27,6 +29,8 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
labels:
app: consul
---
apiVersion: v1
kind: Service
@@ -68,7 +72,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.6"
image: "consul:1.4.4"
args:
- "agent"
- "-bootstrap-expect=3"

View File

@@ -1,160 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,69 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cerebro
name: cerebro
spec:
selector:
matchLabels:
app: cerebro
template:
metadata:
labels:
app: cerebro
spec:
volumes:
- name: conf
configMap:
name: cerebro
containers:
- image: lmenezes/cerebro
name: cerebro
volumeMounts:
- name: conf
mountPath: /conf
args:
- -Dconfig.file=/conf/application.conf
env:
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: demo-es-elastic-user
key: elastic
---
apiVersion: v1
kind: Service
metadata:
labels:
app: cerebro
name: cerebro
spec:
ports:
- port: 9000
protocol: TCP
targetPort: 9000
selector:
app: cerebro
type: NodePort
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cerebro
data:
application.conf: |
secret = "ki:s:[[@=Ag?QI`W2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
hosts = [
{
host = "http://demo-es-http.eck-demo.svc.cluster.local:9200"
name = "demo"
auth = {
username = "elastic"
password = ${?ELASTICSEARCH_PASSWORD}
}
}
]

View File

@@ -1,19 +0,0 @@
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: demo
namespace: eck-demo
spec:
http:
tls:
selfSignedCertificate:
disabled: true
nodeSets:
- name: default
count: 1
config:
node.data: true
node.ingest: true
node.master: true
node.store.allow_mmap: false
version: 7.5.1

View File

@@ -1,168 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: eck-demo
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
# To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
#filebeat.autodiscover:
# providers:
# - type: kubernetes
# node: ${NODE_NAME}
# hints.enabled: true
# hints.default_config:
# type: container
# paths:
# - /var/log/containers/*${data.kubernetes.container.id}.log
processors:
- add_cloud_metadata:
- add_host_metadata:
cloud.id: ${ELASTIC_CLOUD_ID}
cloud.auth: ${ELASTIC_CLOUD_AUTH}
output.elasticsearch:
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
username: ${ELASTICSEARCH_USERNAME}
password: ${ELASTICSEARCH_PASSWORD}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: eck-demo
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:7.5.1
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: demo-es-http
- name: ELASTICSEARCH_PORT
value: "9200"
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: demo-es-elastic-user
key: elastic
- name: ELASTIC_CLOUD_ID
value:
- name: ELASTIC_CLOUD_AUTH
value:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: eck-demo
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: eck-demo
labels:
k8s-app: filebeat
---

View File

@@ -1,17 +0,0 @@
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: demo
spec:
version: 7.5.1
count: 1
elasticsearchRef:
name: demo
namespace: eck-demo
http:
service:
spec:
type: NodePort
tls:
selfSignedCertificate:
disabled: true

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,6 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
@@ -33,17 +32,13 @@ subjects:
name: fluentd
namespace: default
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd
namespace: default
labels:
app: fluentd
spec:
selector:
matchLabels:
app: fluentd
template:
metadata:
labels:
@@ -56,7 +51,7 @@ spec:
effect: NoSchedule
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:v1.4-debian-elasticsearch-1
image: fluent/fluentd-kubernetes-daemonset:v1.3-debian-elasticsearch-1
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch"
@@ -91,13 +86,12 @@ spec:
hostPath:
path: /var/lib/docker/containers
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: elasticsearch
name: elasticsearch
namespace: default
spec:
selector:
matchLabels:
@@ -125,7 +119,6 @@ metadata:
labels:
app: elasticsearch
name: elasticsearch
namespace: default
spec:
ports:
- port: 9200
@@ -135,13 +128,12 @@ spec:
app: elasticsearch
type: ClusterIP
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kibana
name: kibana
namespace: default
spec:
selector:
matchLabels:
@@ -165,7 +157,6 @@ metadata:
labels:
app: kibana
name: kibana
namespace: default
spec:
ports:
- port: 5601

View File

@@ -9,7 +9,7 @@ spec:
name: haproxy
containers:
- name: haproxy
image: haproxy:1
image: haproxy
volumeMounts:
- name: config
mountPath: /usr/local/etc/haproxy/

View File

@@ -1,13 +1,14 @@
apiVersion: networking.k8s.io/v1beta1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: whatever
name: cheddar
spec:
rules:
- host: whatever.A.B.C.D.nip.io
- host: cheddar.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234
serviceName: cheddar
servicePort: 80

View File

@@ -12,38 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
@@ -51,129 +25,82 @@ metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: Secret
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
@@ -186,125 +113,60 @@ spec:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0-rc2
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
- --enable-skip-login
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
- port: 443
targetPort: 8443
selector:
k8s-app: dashboard-metrics-scraper
k8s-app: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.2
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
@@ -323,12 +185,10 @@ spec:
- args:
- sh
- -c
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kubernetes-dashboard:443,verify=0
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kube-system:443,verify=0
image: alpine
name: dashboard
---
apiVersion: v1
kind: Service
metadata:
@@ -343,13 +203,13 @@ spec:
selector:
app: dashboard
type: NodePort
---
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: insecure-dashboard
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -357,4 +217,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
namespace: kube-system

View File

@@ -1,5 +1,5 @@
apiVersion: v1
kind: Pod
Kind: Pod
metadata:
name: hello
namespace: default

View File

@@ -12,6 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
@@ -90,7 +95,7 @@ subjects:
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
@@ -109,7 +114,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP

View File

@@ -45,7 +45,7 @@ subjects:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: local-path-provisioner

View File

@@ -58,7 +58,7 @@ metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: metrics-server
@@ -82,7 +82,7 @@ spec:
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
imagePullPolicy: Always
volumeMounts:
- name: tmp-dir

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-without-volume
spec:
containers:
- name: nginx
image: nginx

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-volume
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/

View File

@@ -1,20 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-init
spec:
volumes:
- name: www
containers:
- name: nginx
image: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html/
initContainers:
- name: git
image: alpine
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/

View File

@@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-with-git
name: nginx-with-volume
spec:
volumes:
- name: www

View File

@@ -1,54 +1,51 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
kind: Role
metadata:
name: persistentconsul
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- apiGroups: [ "" ]
resources: [ pods ]
verbs: [ get, list ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
kind: RoleBinding
metadata:
name: persistentconsul
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: persistentconsul
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: persistentconsul
namespace: default
name: consul
namespace: orange
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: persistentconsul
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: persistentconsul
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: persistentconsul
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: persistentconsul
name: consul
spec:
serviceName: persistentconsul
serviceName: consul
replicas: 3
selector:
matchLabels:
app: persistentconsul
app: consul
volumeClaimTemplates:
- metadata:
name: data
@@ -61,9 +58,9 @@ spec:
template:
metadata:
labels:
app: persistentconsul
app: consul
spec:
serviceAccountName: persistentconsul
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -72,19 +69,19 @@ spec:
- key: app
operator: In
values:
- persistentconsul
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.6"
image: "consul:1.4.4"
volumeMounts:
- name: data
mountPath: /consul/data
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=persistentconsul\""
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"

File diff suppressed because it is too large Load Diff

View File

@@ -12,17 +12,10 @@ spec:
labels:
app: postgres
spec:
#schedulerName: stork
initContainers:
- name: rmdir
image: alpine
volumeMounts:
- mountPath: /vol
name: postgres
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
schedulerName: stork
containers:
- name: postgres
image: postgres:11
image: postgres:10.5
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres

View File

@@ -6,16 +6,13 @@ metadata:
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
apiVersion: extensions/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
@@ -29,7 +26,7 @@ spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
- image: traefik
name: traefik-ingress-lb
ports:
- name: http

View File

@@ -7,8 +7,8 @@ workshop.
## 1. Prerequisites
Virtualbox, Vagrant and Ansible
Virtualbox, Vagrant and Ansible
- Virtualbox: https://www.virtualbox.org/wiki/Downloads
@@ -25,7 +25,7 @@ Virtualbox, Vagrant and Ansible
$ git clone --recursive https://github.com/ansible/ansible.git
$ cd ansible
$ git checkout stable-{{ getStableVersionFromAnsibleProject }}
$ git checkout stable-2.0.0.1
$ git submodule update
- source the setup script to make Ansible available on this terminal session:
@@ -38,7 +38,6 @@ Virtualbox, Vagrant and Ansible
## 2. Preparing the environment
Change into directory that has your Vagrantfile
Run the following commands:
@@ -67,14 +66,6 @@ will reflect inside the instance.
- Depending on the Vagrant version, `sudo apt-get install bsdtar` may be needed
- If you get an error like "no Vagrant file found" or you have a file but "cannot open base box" when running `vagrant up`,
chances are good you not in the correct directory.
Make sure you are in sub directory named "prepare-local". It has all the config files required by ansible, vagrant and virtualbox
- If you are using Python 3.7, running the ansible-playbook provisioning, see an error like "SyntaxError: invalid syntax" and it mentions
the word "async", you need to upgrade your Ansible version to 2.6 or higher to resolve the keyword conflict.
https://github.com/ansible/ansible/issues/42105
- If you get strange Ansible errors about dependencies, try to check your pip
version with `pip --version`. The current version is 8.1.1. If your pip is
older than this, upgrade it with `sudo pip install --upgrade pip`, restart

View File

@@ -10,21 +10,15 @@ These tools can help you to create VMs on:
- [Docker](https://docs.docker.com/engine/installation/)
- [Docker Compose](https://docs.docker.com/compose/install/)
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
Depending on the infrastructure that you want to use, you also need to install
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
And if you want to generate printable cards:
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
- [jinja2](https://pypi.python.org/pypi/Jinja2)
You can install them with pip (perhaps with `pip install --user`, or even use `virtualenv` if that's your thing).
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
may have changed your default Python to Python 2.
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
- [jinja2](https://pypi.python.org/pypi/Jinja2) (on a Mac: `brew install jinja2`)
## General Workflow
@@ -93,37 +87,26 @@ You're all set!
```
workshopctl - the orchestration workshop swiss army knife
Commands:
build Build the Docker image to run this program in a container
cards Generate ready-to-print cards for a group of VMs
deploy Install Docker on a bunch of running VMs
disableaddrchecks Disable source/destination IP address checks
disabledocker Stop Docker Engine and don't restart it automatically
helmprom Install Helm and Prometheus
help Show available commands
ids (FIXME) List the instance IDs belonging to a given tag or token
kubebins Install Kubernetes and CNI binaries but don't start anything
kubereset Wipe out Kubernetes configuration on all nodes
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
kubetest Check that all nodes are reporting as Ready
listall List VMs running on all configured infrastructures
list List available groups for a given infrastructure
netfix Disable GRO and run a pinger job on the VMs
opensg Open the default security group to ALL ingress traffic
ping Ping VMs in a given tag, to check that they have network access
pssh Run an arbitrary command on all nodes
pull_images Pre-pull a bunch of Docker images
quotas Check our infrastructure quotas (max instances)
remap_nodeports Remap NodePort range to 10000-10999
retag (FIXME) Apply a new tag to a group of VMs
ssh Open an SSH session to the first node of a tag
start Start a group of VMs
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
tags List groups of VMs known locally
test Run tests (pre-flight checks) on a group of VMs
weavetest Check that weave seems properly setup
webssh Install a WEB SSH server on the machines (port 1080)
wrap Run this program in a container
www Run a web server to access card HTML and PDF
ami Show the AMI that will be used for deployment
amis List Ubuntu AMIs in the current region
build Build the Docker image to run this program in a container
cards Generate ready-to-print cards for a group of VMs
deploy Install Docker on a bunch of running VMs
ec2quotas Check our EC2 quotas (max instances)
help Show available commands
ids List the instance IDs belonging to a given tag or token
ips List the IP addresses of the VMs for a given tag or token
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
kubetest Check that all notes are reporting as Ready
list List available groups in the current region
opensg Open the default security group to ALL ingress traffic
pull_images Pre-pull a bunch of Docker images
retag Apply a new tag to a group of VMs
start Start a group of VMs
status List instance status for a given group
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
test Run tests (pre-flight checks) on a group of VMs
wrap Run this program in a container
```
### Summary of What `./workshopctl` Does For You
@@ -262,32 +245,3 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
- Don't write to bash history in system() in postprep
- compose, etc version inconsistent (int vs str)
## Making sure Python3 is the default (Mac only)
Check the `/usr/local/bin/python` symlink. It should be pointing to
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
instructions.
1) Verify that Python 3 is installed.
```
ls -la /usr/local/Cellar/Python
```
You should see one or more versions of Python 3. If you don't,
install it with `brew install python`.
2) Verify that `python` points to Python3.
```
ls -la /usr/local/bin/python
```
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
```
rm /usr/local/bin/python
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
# where xxxx is the most recent Python 3 version you saw above
```

View File

@@ -1,10 +0,0 @@
#!/bin/sh
set -e
TAG=$(./workshopctl maketag)
./workshopctl start --settings settings/jerome.yaml --infra infra/aws-eu-central-1 --tag $TAG
./workshopctl deploy $TAG
./workshopctl kube $TAG
./workshopctl helmprom $TAG
while ! ./workshopctl kubetest $TAG; do sleep 1; done
./workshopctl tmux $TAG
echo ./workshopctl stop $TAG

View File

@@ -33,14 +33,9 @@ _cmd_cards() {
../../lib/ips-txt-to-html.py settings.yaml
)
ln -sf ../tags/$TAG/ips.html www/$TAG.html
ln -sf ../tags/$TAG/ips.pdf www/$TAG.pdf
info "Cards created. You can view them with:"
info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)"
info "open tags/$TAG/ips.html (on macOS)"
info "Or you can start a web server with:"
info "$0 www"
}
_cmd deploy "Install Docker on a bunch of running VMs"
@@ -127,11 +122,11 @@ _cmd_kubebins() {
set -e
cd /usr/local/bin
if ! [ -x etcd ]; then
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.15/etcd-v3.3.15-linux-amd64.tar.gz \
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz \
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
fi
if ! [ -x hyperkube ]; then
curl -L https://dl.k8s.io/v1.16.2/kubernetes-server-linux-amd64.tar.gz \
curl -L https://dl.k8s.io/v1.14.1/kubernetes-server-linux-amd64.tar.gz \
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
fi
if ! [ -x kubelet ]; then
@@ -143,7 +138,7 @@ _cmd_kubebins() {
sudo mkdir -p /opt/cni/bin
cd /opt/cni/bin
if ! [ -x bridge ]; then
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.6/cni-plugins-amd64-v0.7.6.tgz \
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz \
| sudo tar -zx
fi
"
@@ -157,10 +152,10 @@ _cmd_kube() {
# Optional version, e.g. 1.13.5
KUBEVERSION=$2
if [ "$KUBEVERSION" ]; then
EXTRA_APTGET="=$KUBEVERSION-00"
EXTRA_KUBELET="=$KUBEVERSION-00"
EXTRA_KUBEADM="--kubernetes-version=v$KUBEVERSION"
else
EXTRA_APTGET=""
EXTRA_KUBELET=""
EXTRA_KUBEADM=""
fi
@@ -172,7 +167,7 @@ _cmd_kube() {
sudo tee /etc/apt/sources.list.d/kubernetes.list"
pssh --timeout 200 "
sudo apt-get update -q &&
sudo apt-get install -qy kubelet$EXTRA_APTGET kubeadm$EXTRA_APTGET kubectl$EXTRA_APTGET &&
sudo apt-get install -qy kubelet$EXTRA_KUBELET kubeadm kubectl &&
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
# Initialize kube master
@@ -234,7 +229,7 @@ EOF"
pssh "
if [ ! -x /usr/local/bin/stern ]; then
##VERSION##
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64 &&
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64 &&
sudo chmod +x /usr/local/bin/stern &&
stern --completion bash | sudo tee /etc/bash_completion.d/stern
fi"
@@ -242,7 +237,7 @@ EOF"
# Install helm
pssh "
if [ ! -x /usr/local/bin/helm ]; then
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 | sudo bash &&
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash &&
helm completion bash | sudo tee /etc/bash_completion.d/helm
fi"
@@ -323,23 +318,6 @@ _cmd_listall() {
done
}
_cmd maketag "Generate a quasi-unique tag for a group of instances"
_cmd_maketag() {
if [ -z $USER ]; then
export USER=anonymous
fi
MS=$(($(date +%N)/1000000))
date +%Y-%m-%d-%H-%M-$MS-$USER
}
_cmd ping "Ping VMs in a given tag, to check that they have network access"
_cmd_ping() {
TAG=$1
need_tag
fping < tags/$TAG/ips.txt
}
_cmd netfix "Disable GRO and run a pinger job on the VMs"
_cmd_netfix () {
TAG=$1
@@ -371,16 +349,6 @@ _cmd_opensg() {
infra_opensg
}
_cmd portworx "Prepare the nodes for Portworx deployment"
_cmd_portworx() {
TAG=$1
need_tag
pssh "
sudo truncate --size 10G /portworx.blk &&
sudo losetup /dev/loop4 /portworx.blk"
}
_cmd disableaddrchecks "Disable source/destination IP address checks"
_cmd_disableaddrchecks() {
TAG=$1
@@ -405,20 +373,6 @@ _cmd_pull_images() {
pull_tag
}
_cmd remap_nodeports "Remap NodePort range to 10000-10999"
_cmd_remap_nodeports() {
TAG=$1
need_tag
FIND_LINE=" - --service-cluster-ip-range=10.96.0.0\/12"
ADD_LINE=" - --service-node-port-range=10000-10999"
MANIFEST_FILE=/etc/kubernetes/manifests/kube-apiserver.yaml
pssh "
if i_am_first_node && ! grep -q '$ADD_LINE' $MANIFEST_FILE; then
sudo sed -i 's/\($FIND_LINE\)\$/\1\n$ADD_LINE/' $MANIFEST_FILE
fi"
}
_cmd quotas "Check our infrastructure quotas (max instances)"
_cmd_quotas() {
need_infra $1
@@ -474,7 +428,7 @@ _cmd_start() {
need_infra $INFRA
if [ -z "$TAG" ]; then
TAG=$(_cmd_maketag)
TAG=$(make_tag)
fi
mkdir -p tags/$TAG
ln -s ../../$INFRA tags/$TAG/infra.sh
@@ -536,24 +490,20 @@ _cmd_test() {
test_tag
}
_cmd tmux "Log into the first node and start a tmux server"
_cmd_tmux() {
TAG=$1
need_tag
IP=$(head -1 tags/$TAG/ips.txt)
info "Opening ssh+tmux with $IP"
rm -f /tmp/tmux-$UID/default
ssh -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
}
_cmd helmprom "Install Helm and Prometheus"
_cmd_helmprom() {
TAG=$1
need_tag
pssh "
if i_am_first_node; then
sudo -u docker -H helm repo add stable https://kubernetes-charts.storage.googleapis.com/
sudo -u docker -H helm install prometheus stable/prometheus \
kubectl -n kube-system get serviceaccount helm ||
kubectl -n kube-system create serviceaccount helm
sudo -u docker -H helm init --service-account helm
kubectl get clusterrolebinding helm-can-do-everything ||
kubectl create clusterrolebinding helm-can-do-everything \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:helm
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
--namespace kube-system \
--set server.service.type=NodePort \
--set server.service.nodePort=30090 \
@@ -578,50 +528,6 @@ _cmd_weavetest() {
sh -c \"./weave --local status | grep Connections | grep -q ' 1 failed' || ! echo POD \""
}
_cmd webssh "Install a WEB SSH server on the machines (port 1080)"
_cmd_webssh() {
TAG=$1
need_tag
pssh "
sudo apt-get update &&
sudo apt-get install python-tornado python-paramiko -y"
pssh "
[ -d webssh ] || git clone https://github.com/jpetazzo/webssh"
pssh "
for KEYFILE in /etc/ssh/*.pub; do
read a b c < \$KEYFILE; echo localhost \$a \$b
done > webssh/known_hosts"
pssh "cat >webssh.service <<EOF
[Unit]
Description=webssh
[Install]
WantedBy=multi-user.target
[Service]
WorkingDirectory=/home/ubuntu/webssh
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
User=nobody
Group=nogroup
Restart=always
EOF"
pssh "
sudo systemctl enable \$PWD/webssh.service &&
sudo systemctl start webssh.service"
}
_cmd www "Run a web server to access card HTML and PDF"
_cmd_www() {
cd www
IPADDR=$(curl -sL canihazip.com/s)
info "The following files are available:"
for F in *; do
echo "http://$IPADDR:8000/$F"
done
info "Press Ctrl-C to stop server."
python3 -m http.server
}
greet() {
IAMUSER=$(aws iam get-user --query 'User.UserName')
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
@@ -740,3 +646,10 @@ sync_keys() {
info "Using existing key $AWS_KEY_NAME."
fi
}
make_tag() {
if [ -z $USER ]; then
export USER=anonymous
fi
date +%Y-%m-%d-%H-%M-$USER
}

View File

@@ -31,7 +31,6 @@ infra_start() {
die "I could not find which AMI to use in this region. Try another region?"
fi
AWS_KEY_NAME=$(make_key_name)
AWS_INSTANCE_TYPE=${AWS_INSTANCE_TYPE-t3a.medium}
sep "Starting instances"
info " Count: $COUNT"
@@ -39,11 +38,10 @@ infra_start() {
info " Token/tag: $TAG"
info " AMI: $AMI"
info " Key name: $AWS_KEY_NAME"
info " Instance type: $AWS_INSTANCE_TYPE"
result=$(aws ec2 run-instances \
--key-name $AWS_KEY_NAME \
--count $COUNT \
--instance-type $AWS_INSTANCE_TYPE \
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
--client-token $TAG \
--block-device-mapping 'DeviceName=/dev/sda1,Ebs={VolumeSize=20}' \
--image-id $AMI)
@@ -99,7 +97,7 @@ infra_disableaddrchecks() {
}
wait_until_tag_is_running() {
max_retry=100
max_retry=50
i=0
done_count=0
while [[ $done_count -lt $COUNT ]]; do

View File

@@ -4,12 +4,17 @@ import sys
import yaml
import jinja2
def prettify(l):
l = [ip.strip() for ip in l]
ret = [ "node{}: <code>{}</code>".format(i+1, s) for (i, s) in zip(range(len(l)), l) ]
return ret
# Read settings from user-provided settings file
context = yaml.safe_load(open(sys.argv[1]))
SETTINGS = yaml.load(open(sys.argv[1]))
clustersize = SETTINGS["clustersize"]
ips = list(open("ips.txt"))
clustersize = context["clustersize"]
print("---------------------------------------------")
print(" Number of IPs: {}".format(len(ips)))
@@ -25,9 +30,7 @@ while ips:
ips = ips[clustersize:]
clusters.append(cluster)
context["clusters"] = clusters
template_file_name = context["cards_template"]
template_file_name = SETTINGS["cards_template"]
template_file_path = os.path.join(
os.path.dirname(__file__),
"..",
@@ -36,19 +39,18 @@ template_file_path = os.path.join(
)
template = jinja2.Template(open(template_file_path).read())
with open("ips.html", "w") as f:
f.write(template.render(**context))
f.write(template.render(clusters=clusters, **SETTINGS))
print("Generated ips.html")
try:
import pdfkit
with open("ips.html") as f:
pdfkit.from_file(f, "ips.pdf", options={
"page-size": context["paper_size"],
"margin-top": context["paper_margin"],
"margin-bottom": context["paper_margin"],
"margin-left": context["paper_margin"],
"margin-right": context["paper_margin"],
"page-size": SETTINGS["paper_size"],
"margin-top": SETTINGS["paper_margin"],
"margin-bottom": SETTINGS["paper_margin"],
"margin-left": SETTINGS["paper_margin"],
"margin-right": SETTINGS["paper_margin"],
})
print("Generated ips.pdf")
except ImportError:

View File

@@ -73,29 +73,8 @@ set expandtab
set number
set shiftwidth=2
set softtabstop=2
set nowrap
SQRL""")
# Custom .tmux.conf
system(
"""sudo -u docker tee /home/docker/.tmux.conf <<SQRL
bind h select-pane -L
bind j select-pane -D
bind k select-pane -U
bind l select-pane -R
# Allow using mouse to switch panes
set -g mouse on
# Make scrolling with wheels work
bind -n WheelUpPane if-shell -F -t = "#{mouse_any_flag}" "send-keys -M" "if -Ft= '#{pane_in_mode}' 'send-keys -M' 'select-pane -t=; copy-mode -e; send-keys -M'"
bind -n WheelDownPane select-pane -t= \; send-keys -M
SQRL"""
)
# add docker user to sudoers and allow password authentication
system("""sudo tee /etc/sudoers.d/docker <<SQRL
docker ALL=(ALL) NOPASSWD:ALL
@@ -106,7 +85,6 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
system("sudo service ssh restart")
system("sudo apt-get -q update")
system("sudo apt-get -qy install git jq")
system("sudo apt-get -qy install emacs-nox joe")
#######################
### DOCKER INSTALLS ###

View File

@@ -21,10 +21,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
image:

View File

@@ -21,11 +21,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
clusternumber: 100
image:

View File

@@ -21,11 +21,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
clusternumber: 200
image:

View File

@@ -21,10 +21,8 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
image:

View File

@@ -23,7 +23,7 @@ paper_margin: 0.2in
engine_version: test
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.18.0
machine_version: 0.13.0
# Password used to connect with the "docker user"

View File

@@ -23,7 +23,7 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.22.0
machine_version: 0.15.0
# Password used to connect with the "docker user"

View File

@@ -21,8 +21,9 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -23,7 +23,7 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"

View File

@@ -23,7 +23,7 @@ paper_margin: 0.2in
engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.24.1
compose_version: 1.22.0
machine_version: 0.15.0
# Password used to connect with the "docker user"

View File

@@ -30,9 +30,9 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
./workshopctl disableaddrchecks $TAG
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl cards $TAG
SETTINGS=admin-kuberouter
@@ -43,15 +43,11 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
./workshopctl disableaddrchecks $TAG
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl cards $TAG
#INFRA=infra/aws-us-west-1
export AWS_INSTANCE_TYPE=t3a.medium
SETTINGS=admin-test
TAG=$PREFIX-$SETTINGS
./workshopctl start \
@@ -61,6 +57,5 @@ TAG=$PREFIX-$SETTINGS
--count $((3*$STUDENTS))
./workshopctl deploy $TAG
./workshopctl kube $TAG 1.14.6
./workshopctl kube $TAG 1.13.5
./workshopctl cards $TAG

View File

@@ -1,23 +1,12 @@
{#
The variables below can be customized here directly, or in your
settings.yaml file. Any variable in settings.yaml will be exposed
in here as well.
#}
{# Feel free to customize or override anything in there! #}
{%- set url = url
| default("http://FIXME.container.training/") -%}
{%- set pagesize = pagesize
| default(9) -%}
{%- set lang = lang
| default("en") -%}
{%- set event = event
| default("training session") -%}
{%- set backside = backside
| default(False) -%}
{%- set image = image
| default("kube") -%}
{%- set clusternumber = clusternumber
| default(None) -%}
{%- set url = "http://FIXME.container.training/" -%}
{%- set pagesize = 9 -%}
{%- set lang = "en" -%}
{%- set event = "training session" -%}
{%- set backside = False -%}
{%- set image = "kube" -%}
{%- set clusternumber = 100 -%}
{%- set image_src = {
"docker": "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png",
@@ -172,9 +161,7 @@ img.kube {
<div>
<p>{{ intro }}</p>
<p>
{% if image_src %}
<img src="{{ image_src }}" />
{% endif %}
<table>
{% if clusternumber != None %}
<tr><td>cluster:</td></tr>
@@ -200,10 +187,8 @@ img.kube {
</p>
<p>
{% if url %}
{{ slides_are_at }}
<center>{{ url }}</center>
{% endif %}
</p>
</div>
{% if loop.index%pagesize==0 or loop.last %}

View File

@@ -1,4 +0,0 @@
This directory will contain symlinks to HTML and PDF files for the cards
with the IP address, login, and password for the training environments.
The file "index.html" is empty on purpose: it prevents listing the files.

View File

@@ -1,4 +1,4 @@
FROM alpine:3.11
RUN apk add --no-cache entr py3-pip git zip
FROM alpine:3.9
RUN apk add --no-cache entr py-pip git
COPY requirements.txt .
RUN pip3 install -r requirements.txt
RUN pip install -r requirements.txt

View File

@@ -2,10 +2,4 @@
#/ /kube-halfday.yml.html 200
#/ /kube-fullday.yml.html 200
#/ /kube-twodays.yml.html 200
/ /kube.yml.html 200!
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
/ /sfsf.yml.html 200!

View File

@@ -26,10 +26,9 @@ IPADDR = None
class State(object):
def __init__(self):
self.clipboard = ""
self.interactive = True
self.verify_status = True
self.simulate_type = False
self.verify_status = False
self.simulate_type = True
self.switch_desktop = False
self.sync_slides = False
self.open_links = False
@@ -39,7 +38,6 @@ class State(object):
def load(self):
data = yaml.load(open("state.yaml"))
self.clipboard = str(data["clipboard"])
self.interactive = bool(data["interactive"])
self.verify_status = bool(data["verify_status"])
self.simulate_type = bool(data["simulate_type"])
@@ -53,7 +51,6 @@ class State(object):
def save(self):
with open("state.yaml", "w") as f:
yaml.dump(dict(
clipboard=self.clipboard,
interactive=self.interactive,
verify_status=self.verify_status,
simulate_type=self.simulate_type,
@@ -69,8 +66,6 @@ class State(object):
state = State()
outfile = open("autopilot.log", "w")
def hrule():
return "="*int(subprocess.check_output(["tput", "cols"]))
@@ -90,11 +85,9 @@ class Snippet(object):
# On single-line snippets, the data follows the method immediately
if '\n' in content:
self.method, self.data = content.split('\n', 1)
self.data = self.data.strip()
elif ' ' in content:
self.method, self.data = content.split(' ', 1)
else:
self.method, self.data = content, None
self.method, self.data = content.split(' ', 1)
self.data = self.data.strip()
self.next = None
def __str__(self):
@@ -193,7 +186,7 @@ def wait_for_prompt():
if last_line == "$":
# This is a perfect opportunity to grab the node's IP address
global IPADDR
IPADDR = re.findall("\[(.*)\]", output, re.MULTILINE)[-1]
IPADDR = re.findall("^\[(.*)\]", output, re.MULTILINE)[-1]
return
# When we are in an alpine container, the prompt will be "/ #"
if last_line == "/ #":
@@ -242,8 +235,6 @@ tmux
rm -f /tmp/tmux-{uid}/default && ssh -t -L /tmp/tmux-{uid}/default:/tmp/tmux-1001/default docker@{ipaddr} tmux new-session -As 0
(Or use workshopctl tmux)
3. If you cannot control a remote tmux:
tmux new-session ssh docker@{ipaddr}
@@ -268,11 +259,26 @@ for slide in re.split("\n---?\n", content):
slide_classes = slide_classes[0].split(",")
slide_classes = [c.strip() for c in slide_classes]
if excluded_classes & set(slide_classes):
logging.debug("Skipping excluded slide.")
logging.info("Skipping excluded slide.")
continue
slides.append(Slide(slide))
def send_keys(data):
if state.simulate_type and data[0] != '^':
for key in data:
if key == ";":
key = "\\;"
if key == "\n":
if interruptible_sleep(1): return
subprocess.check_call(["tmux", "send-keys", key])
if interruptible_sleep(0.15*random.random()): return
if key == "\n":
if interruptible_sleep(1): return
else:
subprocess.check_call(["tmux", "send-keys", data])
def capture_pane():
return subprocess.check_output(["tmux", "capture-pane", "-p"]).decode('utf-8')
@@ -282,7 +288,7 @@ setup_tmux_and_ssh()
try:
state.load()
logging.debug("Successfully loaded state from file.")
logging.info("Successfully loaded state from file.")
# Let's override the starting state, so that when an error occurs,
# we can restart the auto-tester and then single-step or debug.
# (Instead of running again through the same issue immediately.)
@@ -291,7 +297,6 @@ except Exception as e:
logging.exception("Could not load state from file.")
logging.warning("Using default values.")
def move_forward():
state.snippet += 1
if state.snippet > len(slides[state.slide].snippets):
@@ -315,147 +320,10 @@ def check_bounds():
state.slide = len(slides)-1
##########################################################
# All functions starting with action_ correspond to the
# code to be executed when seeing ```foo``` blocks in the
# input. ```foo``` would call action_foo(state, snippet).
##########################################################
def send_keys(keys):
subprocess.check_call(["tmux", "send-keys", keys])
# Send a single key.
# Useful for special keys, e.g. tmux interprets these strings:
# ^C (and all other sequences starting with a caret)
# Space
# ... and many others (check tmux manpage for details).
def action_key(state, snippet):
send_keys(snippet.data)
# Send multiple keys.
# If keystroke simulation is off, all keys are sent at once.
# If keystroke simulation is on, keys are sent one by one, with a delay between them.
def action_keys(state, snippet, keys=None):
if keys is None:
keys = snippet.data
if not state.simulate_type:
send_keys(keys)
else:
for key in keys:
if key == ";":
key = "\\;"
if key == "\n":
if interruptible_sleep(1): return
send_keys(key)
if interruptible_sleep(0.15*random.random()): return
if key == "\n":
if interruptible_sleep(1): return
def action_hide(state, snippet):
if state.run_hidden:
action_bash(state, snippet)
def action_bash(state, snippet):
data = snippet.data
# Make sure that we're ready
wait_for_prompt()
# Strip leading spaces
data = re.sub("\n +", "\n", data)
# Remove backticks (they are used to highlight sections)
data = data.replace('`', '')
# Add "RETURN" at the end of the command :)
data += "\n"
# Send command
action_keys(state, snippet, data)
# Force a short sleep to avoid race condition
time.sleep(0.5)
if snippet.next and snippet.next.method == "wait":
wait_for_string(snippet.next.data)
elif snippet.next and snippet.next.method == "longwait":
wait_for_string(snippet.next.data, 10*TIMEOUT)
else:
wait_for_prompt()
# Verify return code
check_exit_status()
def action_copy(state, snippet):
screen = capture_pane()
matches = re.findall(snippet.data, screen, flags=re.DOTALL)
if len(matches) == 0:
raise Exception("Could not find regex {} in output.".format(snippet.data))
# Arbitrarily get the most recent match
match = matches[-1]
# Remove line breaks (like a screen copy paste would do)
match = match.replace('\n', '')
logging.debug("Copied {} to clipboard.".format(match))
state.clipboard = match
def action_paste(state, snippet):
logging.debug("Pasting {} from clipboard.".format(state.clipboard))
action_keys(state, snippet, state.clipboard)
def action_check(state, snippet):
wait_for_prompt()
check_exit_status()
def action_open(state, snippet):
# Cheap way to get node1's IP address
screen = capture_pane()
url = snippet.data.replace("/node1", "/{}".format(IPADDR))
# This should probably be adapted to run on different OS
if state.open_links:
subprocess.check_output(["xdg-open", url])
focus_browser()
if state.interactive:
print("Press any key to continue to next step...")
click.getchar()
def action_tmux(state, snippet):
subprocess.check_call(["tmux"] + snippet.data.split())
def action_unknown(state, snippet):
logging.warning("Unknown method {}: {!r}".format(snippet.method, snippet.data))
def run_snippet(state, snippet):
logging.info("Running with method {}: {}".format(snippet.method, snippet.data))
try:
action = globals()["action_"+snippet.method]
except KeyError:
action = action_unknown
try:
action(state, snippet)
result = "OK"
except:
result = "ERR"
logging.exception("While running method {} with {!r}".format(snippet.method, snippet.data))
# Try to recover
try:
wait_for_prompt()
except:
subprocess.check_call(["tmux", "new-window"])
wait_for_prompt()
outfile.write("{} SLIDE={} METHOD={} DATA={!r}\n".format(result, state.slide, snippet.method, snippet.data))
outfile.flush()
while True:
state.save()
slide = slides[state.slide]
if state.snippet and state.snippet <= len(slide.snippets):
snippet = slide.snippets[state.snippet-1]
else:
snippet = None
snippet = slide.snippets[state.snippet-1] if state.snippet else None
click.clear()
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}] "
"[switch_desktop:{}] [sync_slides:{}] [open_links:{}] [run_hidden:{}]"
@@ -517,10 +385,7 @@ while True:
# continue until next timeout
state.interactive = False
elif command in ("y", "\r", " "):
if snippet:
run_snippet(state, snippet)
move_forward()
else:
if not snippet:
# Advance to next snippet
# Advance until a slide that has snippets
while not slides[state.slide].snippets:
@@ -530,5 +395,59 @@ while True:
break
# And then advance to the snippet
move_forward()
continue
method, data = snippet.method, snippet.data
logging.info("Running with method {}: {}".format(method, data))
if method == "keys":
send_keys(data)
elif method == "bash" or (method == "hide" and state.run_hidden):
# Make sure that we're ready
wait_for_prompt()
# Strip leading spaces
data = re.sub("\n +", "\n", data)
# Remove backticks (they are used to highlight sections)
data = data.replace('`', '')
# Add "RETURN" at the end of the command :)
data += "\n"
# Send command
send_keys(data)
# Force a short sleep to avoid race condition
time.sleep(0.5)
if snippet.next and snippet.next.method == "wait":
wait_for_string(snippet.next.data)
elif snippet.next and snippet.next.method == "longwait":
wait_for_string(snippet.next.data, 10*TIMEOUT)
else:
wait_for_prompt()
# Verify return code
check_exit_status()
elif method == "copypaste":
screen = capture_pane()
matches = re.findall(data, screen, flags=re.DOTALL)
if len(matches) == 0:
raise Exception("Could not find regex {} in output.".format(data))
# Arbitrarily get the most recent match
match = matches[-1]
# Remove line breaks (like a screen copy paste would do)
match = match.replace('\n', '')
send_keys(match + '\n')
# FIXME: we should factor out the "bash" method
wait_for_prompt()
check_exit_status()
elif method == "open":
# Cheap way to get node1's IP address
screen = capture_pane()
url = data.replace("/node1", "/{}".format(IPADDR))
# This should probably be adapted to run on different OS
if state.open_links:
subprocess.check_output(["xdg-open", url])
focus_browser()
if state.interactive:
print("Press any key to continue to next step...")
click.getchar()
else:
logging.warning("Unknown method {}: {!r}".format(method, data))
move_forward()
else:
logging.warning("Unknown command {}.".format(command))

View File

@@ -14,7 +14,6 @@ once)
./appendcheck.py $YAML.html
done
fi
zip -qr slides.zip . && echo "Created slides.zip archive."
;;
forever)

View File

@@ -104,6 +104,22 @@ like Windows, macOS, Solaris, FreeBSD ...
---
## rkt
* Compares to `runc`.
* No daemon or API.
* Strong emphasis on security (through privilege separation).
* Networking has to be set up separately (e.g. through CNI plugins).
* Partial image management (pull, but no push).
(Image build is handled by separate tools.)
---
## CRI-O
* Designed to be used with Kubernetes as a simple, basic runtime.

View File

@@ -76,78 +76,6 @@ CMD ["python", "app.py"]
---
## Be careful with `chown`, `chmod`, `mv`
* Layers cannot store efficiently changes in permissions or ownership.
* Layers cannot represent efficiently when a file is moved either.
* As a result, operations like `chown`, `chown`, `mv` can be expensive.
* For instance, in the Dockerfile snippet below, each `RUN` line
creates a layer with an entire copy of `some-file`.
```dockerfile
COPY some-file .
RUN chown www-data:www-data some-file
RUN chmod 644 some-file
RUN mv some-file /var/www
```
* How can we avoid that?
---
## Put files on the right place
* Instead of using `mv`, directly put files at the right place.
* When extracting archives (tar, zip...), merge operations in a single layer.
Example:
```dockerfile
...
RUN wget http://.../foo.tar.gz \
&& tar -zxf foo.tar.gz \
&& mv foo/fooctl /usr/local/bin \
&& rm -rf foo
...
```
---
## Use `COPY --chown`
* The Dockerfile instruction `COPY` can take a `--chown` parameter.
Examples:
```dockerfile
...
COPY --chown=1000 some-file .
COPY --chown=1000:1000 some-file .
COPY --chown=www-data:www-data some-file .
```
* The `--chown` flag can specify a user, or a user:group pair.
* The user and group can be specified as names or numbers.
* When using names, the names must exist in `/etc/passwd` or `/etc/group`.
*(In the container, not on the host!)*
---
## Set correct permissions locally
* Instead of using `chmod`, set the right file permissions locally.
* When files are copied with `COPY`, permissions are preserved.
---
## Embedding unit tests in the build process
```dockerfile

View File

@@ -1,137 +0,0 @@
# Init systems and PID 1
In this chapter, we will consider:
- the role of PID 1 in the world of Docker,
- how to avoid some common pitfalls due to the misuse of init systems.
---
## What's an init system?
- On UNIX, the "init system" (or "init" in short) is PID 1.
- It is the first process started by the kernel when the system starts.
- It has multiple responsibilities:
- start every other process on the machine,
- reap orphaned zombie processes.
---
class: extra-details
## Orphaned zombie processes ?!?
- When a process exits (or "dies"), it becomes a "zombie".
(Zombie processes show up in `ps` or `top` with the status code `Z`.)
- Its parent process must *reap* the zombie process.
(This is done by calling `waitpid()` to retrieve the process' exit status.)
- When a process exits, if it has child processes, these processes are "orphaned."
- They are then re-parented to PID 1, init.
- Init therefore needs to take care of these orphaned processes when they exit.
---
## Don't use init systems in containers
- It's often tempting to use an init system or a process manager.
(Examples: *systemd*, *supervisord*...)
- Our containers are then called "system containers".
(By contrast with "application containers".)
- "System containers" are similar to lightweight virtual machines.
- They have multiple downsides:
- when starting multiple processes, their logs get mixed on stdout,
- if the application process dies, the container engine doesn't see it.
- Overall, they make it harder to operate troubleshoot containerized apps.
---
## Exceptions and workarounds
- Sometimes, it's convenient to run a real init system like *systemd*.
(Example: a CI system whose goal is precisely to test an init script or unit file.)
- If we need to run multiple processes: can we use multiple containers?
(Example: [this Compose file](https://github.com/jpetazzo/container.training/blob/master/compose/simple-k8s-control-plane/docker-compose.yaml) runs multiple processes together.)
- When deploying with Kubernetes:
- a container belong to a pod,
- a pod can have multiple containers.
---
## What about these zombie processes?
- Our application runs as PID 1 in the container.
- Our application may or may not be designed to reap zombie processes.
- If our application uses subprocesses and doesn't reap them ...
... this can lead to PID exhaustion!
(Or, more realistically, to a confusing herd of zombie processes.)
- How can we solve this?
---
## Tini to the rescue
- Docker can automatically provide a minimal `init` process.
- This is enabled with `docker run --init ...`
- It uses a small init system ([tini](https://github.com/krallin/tini)) as PID 1:
- it reaps zombies,
- it forwards signals,
- it exits when the child exits.
- It is totally transparent to our application.
- We should use it if our application creates subprocess but doesn't reap them.
---
class: extra-details
## What about Kubernetes?
- Kubernetes does not expose that `--init` option.
- However, we can achieve the same result with [Process Namespace Sharing](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/).
- When Process Namespace Sharing is enabled, PID 1 will be `pause`.
- That `pause` process takes care of reaping zombies.
- Process Namespace Sharing is available since Kubernetes 1.16.
- If you're using an older version of Kubernetes ...
... you might have to add `tini` explicitly to your Docker image.

View File

@@ -102,44 +102,29 @@ class: extra-details
---
## Docker Desktop
## Docker Desktop for Mac and Docker Desktop for Windows
* Special Docker edition available for Mac and Windows
* Special Docker Editions that integrate well with their respective host OS
* Integrates well with the host OS:
* Provide user-friendly GUI to edit Docker configuration and settings
* installed like normal user applications on the host
* Leverage the host OS virtualization subsystem (e.g. the [Hypervisor API](https://developer.apple.com/documentation/hypervisor) on macOS)
* provides user-friendly GUI to edit Docker configuration and settings
* Installed like normal user applications on the host
* Only support running one Docker VM at a time ...
* Under the hood, they both run a tiny VM (transparent to our daily use)
* Access network resources like normal applications
<br/>(and therefore, play better with enterprise VPNs and firewalls)
* Support filesystem sharing through volumes (we'll talk about this later)
* They only support running one Docker VM at a time ...
<br/>
... but we can use `docker-machine`, the Docker Toolbox, VirtualBox, etc. to get a cluster.
---
class: extra-details
## Docker Desktop internals
* Leverages the host OS virtualization subsystem
(e.g. the [Hypervisor API](https://developer.apple.com/documentation/hypervisor) on macOS)
* Under the hood, runs a tiny VM
(transparent to our daily use)
* Accesses network resources like normal applications
(and therefore, plays better with enterprise VPNs and firewalls)
* Supports filesystem sharing through volumes
(we'll talk about this later)
---
## Running Docker on macOS and Windows
When you execute `docker version` from the terminal:

View File

@@ -1,47 +0,0 @@
# Container Super-structure
- Multiple orchestration platforms support some kind of container super-structure.
(i.e., a construct or abstraction bigger than a single container.)
- For instance, on Kubernetes, this super-structure is called a *pod*.
- A pod is a group of containers (it could be a single container, too).
- These containers run together, on the same host.
(A pod cannot straddle multiple hosts.)
- All the containers in a pod have the same IP address.
- How does that map to the Docker world?
---
class: pic
## Anatomy of a Pod
![Pods](images/kubernetes_pods.svg)
---
## Pods in Docker
- The containers inside a pod share the same network namespace.
(Just like when using `docker run --net=container:<container_id>` with the CLI.)
- As a result, they can communicate together over `localhost`.
- In addition to "our" containers, the pod has a special container, the *sandbox*.
- That container uses a special image: `k8s.gcr.io/pause`.
(This is visible when listing containers running on a Kubernetes node.)
- Containers within a pod have independent filesystems.
- They can share directories by using a mechanism called *volumes.*
(Which is similar to the concept of volumes in Docker.)

View File

@@ -100,25 +100,3 @@ class: extra-details
* In "Build rules" block near page bottom, put `/www` in "Build Context" column (or whichever directory the Dockerfile is in).
* Click "Save and Build" to build the repository immediately (without waiting for a git push).
* Subsequent builds will happen automatically, thanks to GitHub hooks.
---
## Building on the fly
- Some services can build images on the fly from a repository
- Example: [ctr.run](https://ctr.run/)
.exercise[
- Use ctr.run to automatically build a container image and run it:
```bash
docker run ctr.run/github.com/undefinedlabs/hello-world
```
]
There might be a long pause before the first layer is pulled,
because the API behind `docker pull` doesn't allow to stream build logs, and there is no feedback during the build.
It is possible to view the build logs by setting up an account on [ctr.run](https://ctr.run/).

View File

@@ -1,12 +0,0 @@
# Links and resources
- [Docker Community Slack](https://community.docker.com/registrations/groups/4316)
- [Docker Community Forums](https://forums.docker.com/)
- [Docker Hub](https://hub.docker.com)
- [Docker Blog](https://blog.docker.com/)
- [Docker documentation](https://docs.docker.com/)
- [Docker on StackOverflow](https://stackoverflow.com/questions/tagged/docker)
- [Docker on Twitter](https://twitter.com/docker)
- [Play With Docker Hands-On Labs](https://training.play-with-docker.com/)
.footnote[These slides (and future updates) are on → https://container.training/]

1
slides/containers/links.md Symbolic link
View File

@@ -0,0 +1 @@
../swarm/links.md

View File

@@ -1 +0,0 @@
<mxfile host="www.draw.io" modified="2019-12-06T15:04:22.728Z" agent="Mozilla/5.0 (X11; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0" etag="zsQLtxL9GRXJF3jcROIq" version="12.3.7" type="device" pages="1"><diagram id="hOpsmMj0j3CSse8MyRSQ" name="Page-1">3VhLU9swEP41nmkPzcR2EpIjCaHtUEo6HCi9dBRb2BoUy8hyHvz6rmzJD9mBQBJgmoMjrVcr6dtvVytb7mSx/spRHF4yH1PL6fpryz2zHMcZ9Rz4k5JNLrFtd5RLAk58JSsF1+QRK2FXSVPi46SmKBijgsR1oceiCHuiJkOcs1Vd7Y7R+qwxCnBDcO0h2pTeEF+EuXTonJTyb5gEoZ7ZHqj9LZBWVjtJQuSzVUXkTi13whkTeWuxnmAq0dO45OPOt7wtFsZxJHYZ8PfXAwsvwsvLP5duOpn2bx4ufnyx1WqXiKZqx5YzoGBw7JMlNAPZDFkiQOkTGF8iDk9K5vC8T+eYYnhz3ul0Putxc66HaQkoVIwpNMRGQ8xZGvlYrrILr1chEfg6Rp58uwJWyfnFgkLPhiaiJIigzXPMlbUl5gKvtwJjF3ADUTFbYME3oKIGDJWDFEXdruqvSn/3ekoWVn2tPYsUx4LCdOkGaChPvMQrDafMmL8fbiHj5JFFAmmBhIwAz08VoILFR4GztyOaheLB0XQaaMYoTXCeNAQiEeb7YXsA0AoubeogVlBz3RbUjgaa2wAtCki0/nBA2S38elukei0Z1AAJR/6pPIug51GUJMSr4wJ755vf1c6tBLTT192ztQI47210b01EMQzalVHQKwfJTjHGh/NNLQ3TOVtNS4FykFR52j2wO5ZyDz9PIIF4gMVz0dl0d8Wd/RZvahnHFAmyrC+3zcVqhhkjkaiwqWvk/oHBknybalT1cDUN9Q1DtmEox6FhCGiBNhW1WCok2xfcM7Kr7dYOfWjkFks6F5i+nuHNGiHm0miI00TSZR0ziOiPl0SdlpP8bXOD3TzJd0sOCfBFaHHEIvxBE0a2znMiUcmUd00g7xXwPSNOHbOG2zXgTUNFJjl2wA/eIODtYQttG7eCn1isGL+3JIQDtJDxnD9B8n02yeU7XgkaxiO0wEmWLLKbEydRsON1AvKHaL8zeMBBSFPN2ndBfD+jM8cJeUTzzJSks/IO2O2Prf6ZnM4dUwTXnjHy7oMswU0YZTyb2r3LftIOE8BSJm2PyrBoSW7q2qqmtAo6VgPmicyyNRV2O1Bl92rM0XXwvkfm0AigugF2d5dgYVD0MKRslqQN3wNTYpxlTIGfP3LmhQ+vUkGJTLKZ3Ef8/gpGEZHlwE5XJsgk/zThHOmscp3mWTVoyYPDox1VB6hjP3r2t/XnKBP0F5d7hiF7aITBlux/sFgY/E+x4JhV+LvHwsn+saBLLV1P3VZrK7lxe1QWXtX6bIY5gW3Ig+pFJdUOd7KcNu8VfeaHoZNXBp9jlvlm+f7q4INu+T02Vy8/a7vTfw==</diagram></mxfile>

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -1,14 +1,5 @@
#!/usr/bin/env python3
#!/usr/bin/env python2
# coding: utf-8
FLAGS=dict(
cz=u"🇨🇿",
de=u"🇩🇪",
fr=u"🇫🇷",
uk=u"🇬🇧",
us=u"🇺🇸",
)
TEMPLATE="""<html>
<head>
<title>{{ title }}</title>
@@ -43,7 +34,7 @@ TEMPLATE="""<html>
{% for item in coming_soon %}
<tr>
<td>{{ item.flag }} {{ item.title }}</td>
<td>{{ item.title }}</td>
<td>{% if item.slides %}<a class="slides" href="{{ item.slides }}" />{% endif %}</td>
<td>{% if item.attend %}<a class="attend" href="{{ item.attend }}" />
{% else %}
@@ -132,13 +123,13 @@ TEMPLATE="""<html>
</table>
</div>
</body>
</html>"""
</html>""".decode("utf-8")
import datetime
import jinja2
import yaml
items = yaml.safe_load(open("index.yaml"))
items = yaml.load(open("index.yaml"))
# Items with a date correspond to scheduled sessions.
# Items without a date correspond to self-paced content.
@@ -169,7 +160,6 @@ for item in items:
item["prettydate"] = date_begin.strftime("%B %d{}, %Y").format(suffix)
item["begin"] = date_begin
item["end"] = date_end
item["flag"] = FLAGS.get(item.get("country"),"")
today = datetime.date.today()
coming_soon = [i for i in items if i.get("date") and i["end"] >= today]
@@ -187,10 +177,10 @@ with open("index.html", "w") as f:
past_workshops=past_workshops,
self_paced=self_paced,
recorded_workshops=recorded_workshops
))
).encode("utf-8"))
with open("past.html", "w") as f:
f.write(template.render(
title="Container Training",
all_past_workshops=past_workshops
))
).encode("utf-8"))

View File

@@ -1,75 +1,3 @@
- date: 2020-03-06
country: uk
city: London
event: QCON
speaker: jpetazzo
title: Kubernetes Intensive Course
attend: https://qconlondon.com/london2020/workshop/kubernetes-intro
#slides: https://qconuk2019.container.training/
- date: 2020-03-05
country: uk
city: London
event: QCON
speaker: jpetazzo
title: Docker Intensive Course
attend: https://qconlondon.com/london2020/workshop/docker-intensive-course
#slides: https://qconuk2019.container.training/
- date: 2020-02-03
country: fr
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Fondamentaux Conteneurs et Docker (in French)
lang: fr
attend: https://enix.io/fr/services/formation/
- date: 2020-02-04
country: fr
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Fondamentaux Orchestration et Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/
- date: 2020-02-05
country: fr
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Kubernetes et Méthodologies DevOps (in French)
lang: fr
attend: https://enix.io/fr/services/formation/
- date: 2020-02-06
country: fr
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Kubernetes Avancé (in French)
lang: fr
attend: https://enix.io/fr/services/formation/
- date: 2020-02-07
country: fr
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Opérer Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/
- date: [2019-11-04, 2019-11-05]
country: de
city: Berlin
event: Velocity
speaker: jpetazzo
title: Deploying and scaling applications with Kubernetes
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/79109
slides: https://velocity-2019-11.container.training/
- date: 2019-11-13
country: fr
city: Marseille
@@ -79,38 +7,6 @@
lang: fr
attend: http://2019.devops-dday.com/Workshop.html
- date: 2019-10-30
country: us
city: Portland, OR
event: LISA
speaker: jpetazzo
title: Deep Dive into Kubernetes Internals for Builders and Operators
attend: https://www.usenix.org/conference/lisa19/presentation/petazzoni-tutorial
- date: [2019-10-22, 2019-10-24]
country: us
city: Charlotte, NC
event: Ardan Labs
speaker: jpetazzo
title: Kubernetes Training
attend: https://www.eventbrite.com/e/containers-docker-and-kubernetes-training-for-devs-and-ops-charlotte-nc-november-2019-tickets-73296659281
- date: 2019-10-22
country: us
city: Charlotte, NC
event: Ardan Labs
speaker: jpetazzo
title: Docker & Containers Training
attend: https://www.eventbrite.com/e/containers-docker-and-kubernetes-training-for-devs-and-ops-charlotte-nc-november-2019-tickets-73296659281
- date: 2019-10-22
country: de
city: Berlin
event: GOTO
speaker: bretfisher
title: Kubernetes or Swarm? Build Both, Deploy Apps, Learn The Differences
attend: https://gotober.com/2019/workshops/194
- date: [2019-09-24, 2019-09-25]
country: fr
city: Paris
@@ -119,43 +15,6 @@
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
slides: https://kube-2019-09.container.training/
- date: 2019-08-27
country: tr
city: Izmir
event: HacknBreak
speaker: gurayyildirim
title: Deploying and scaling applications with Kubernetes (in Turkish)
lang: tr
attend: https://hacknbreak.com
- date: 2019-08-26
country: tr
city: Izmir
event: HacknBreak
speaker: gurayyildirim
title: Container Orchestration with Docker and Swarm (in Turkish)
lang: tr
attend: https://hacknbreak.com
- date: 2019-08-25
country: tr
city: Izmir
event: HackBreak
speaker: gurayyildirim
title: Introduction to Docker and Containers (in Turkish)
lang: tr
attend: https://hacknbreak.com
- date: 2019-07-16
country: us
city: Portland, OR
event: OSCON
speaker: bridgetkromhout
title: "Kubernetes 201: Production tooling"
attend: https://conferences.oreilly.com/oscon/oscon-or/public/schedule/detail/76390
slides: https://oscon2019.container.training
- date: 2019-06-17
country: ca

View File

@@ -118,9 +118,9 @@ installed and set up `kubectl` to communicate with your cluster.
<!--
```wait Connected to localhost```
```keys INFO server```
```key ^J```
```keys ^J```
```keys QUIT```
```key ^J```
```keys ^J```
-->
- Terminate the port forwarder:

View File

@@ -167,11 +167,13 @@ What does that mean?
## Let's experiment a bit!
- For the exercises in this section, connect to the first node of the `test` cluster
- For this section, we will use a cluster with 4 nodes
(named node1, node2, node3, node4)
.exercise[
- SSH to the first node of the test cluster
- SSH to the first node of the cluster
- Check that the cluster is operational:
```bash

View File

@@ -547,7 +547,7 @@ It's important to note a couple of details in these flags...
- Exit the container with `exit` or `^D`
<!-- ```key ^D``` -->
<!-- ```keys ^D``` -->
]
@@ -667,12 +667,17 @@ class: extra-details
- For auditing purposes, sometimes we want to know who can perform an action
- There are a few tools to help us with that
- There is a proof-of-concept tool by Aqua Security which does exactly that:
- [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security
https://github.com/aquasecurity/kubectl-who-can
- [Review Access (aka Rakkess)](https://github.com/corneliusweig/rakkess)
- This is one way to install it:
```bash
docker run --rm -v /usr/local/bin:/go/bin golang \
go get -v github.com/aquasecurity/kubectl-who-can
```
- Both are available as standalone programs, or as plugins for `kubectl`
(`kubectl` plugins can be installed and managed with `krew`)
- This is one way to use it:
```bash
kubectl-who-can create pods
```

View File

@@ -109,7 +109,7 @@ spec:
<!--
```longwait latest: digest: sha256:```
```key ^C```
```keys ^C```
-->
]

View File

@@ -174,7 +174,7 @@ spec:
<!--
```longwait registry:5000/rng-kaniko:latest:```
```key ^C```
```keys ^C```
-->
]

View File

@@ -15,3 +15,26 @@
- `dockercoins/webui:v0.1`
- `dockercoins/worker:v0.1`
---
## Setting `$REGISTRY` and `$TAG`
- In the upcoming exercises and labs, we use a couple of environment variables:
- `$REGISTRY` as a prefix to all image names
- `$TAG` as the image version tag
- For example, the worker image is `$REGISTRY/worker:$TAG`
- If you copy-paste the commands in these exercises:
**make sure that you set `$REGISTRY` and `$TAG` first!**
- For example:
```
export REGISTRY=dockercoins TAG=v0.1
```
(this will expand `$REGISTRY/worker:$TAG` to `dockercoins/worker:v0.1`)

View File

@@ -18,7 +18,7 @@
(it gives us replication primitives)
- Kubernetes helps us clone / replicate environments
- Kubernetes helps us to clone/replicate environments
(all resources can be described with manifests)

View File

@@ -10,8 +10,6 @@
- Components can be upgraded one at a time without problems
<!-- ##VERSION## -->
---
## Checking what we're running
@@ -168,7 +166,7 @@
- Upgrade kubelet:
```bash
sudo apt install kubelet=1.15.3-00
apt install kubelet=1.14.2-00
```
]
@@ -228,7 +226,7 @@
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
```
- Look for the `image:` line, and update it to e.g. `v1.15.0`
- Look for the `image:` line, and update it to e.g. `v1.14.0`
]
@@ -262,52 +260,14 @@
sudo kubeadm upgrade plan
```
]
(Note: kubeadm is confused by our manual upgrade of the API server.
<br/>It thinks the cluster is running 1.14.0!)
Note 1: kubeadm thinks that our cluster is running 1.15.0.
<br/>It is confused by our manual upgrade of the API server!
Note 2: kubeadm itself is still version 1.14.6.
<br/>It doesn't know how to upgrade do 1.15.X.
---
## Upgrading kubeadm
- First things first: we need to upgrade kubeadm
.exercise[
- Upgrade kubeadm:
```
sudo apt install kubeadm
```
- Check what kubeadm tells us:
```
sudo kubeadm upgrade plan
```
]
Note: kubeadm still thinks that our cluster is running 1.15.0.
<br/>But at least it knows about version 1.15.X now.
---
## Upgrading the cluster with kubeadm
- Ideally, we should revert our `image:` change
(so that kubeadm executes the right migration steps)
- Or we can try the upgrade anyway
.exercise[
<!-- ##VERSION## -->
- Perform the upgrade:
```bash
sudo kubeadm upgrade apply v1.15.3
sudo kubeadm upgrade apply v1.14.2
```
]
@@ -327,8 +287,8 @@ Note: kubeadm still thinks that our cluster is running 1.15.0.
- Download the configuration on each node, and upgrade kubelet:
```bash
for N in 1 2 3; do
ssh test$N sudo kubeadm upgrade node config --kubelet-version v1.15.3
ssh test$N sudo apt install kubelet=1.15.3-00
ssh test$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
ssh test$N sudo apt install kubelet=1.14.2-00
done
```
]
@@ -337,7 +297,7 @@ Note: kubeadm still thinks that our cluster is running 1.15.0.
## Checking what we've done
- All our nodes should now be updated to version 1.15.3
- All our nodes should now be updated to version 1.14.2
.exercise[
@@ -347,19 +307,3 @@ Note: kubeadm still thinks that our cluster is running 1.15.0.
```
]
---
class: extra-details
## Skipping versions
- This example worked because we went from 1.14 to 1.15
- If you are upgrading from e.g. 1.13, you will generally have to go through 1.14 first
- This means upgrading kubeadm to 1.14.X, then using it to upgrade the cluster
- Then upgrading kubeadm to 1.15.X, etc.
- **Make sure to read the release notes before upgrading!**

View File

@@ -66,8 +66,6 @@ Look in each plugin's directory for its documentation.
---
class: extra-details
## Conf vs conflist
- There are two slightly different configuration formats

View File

@@ -44,37 +44,21 @@
## Other things that Kubernetes can do for us
- Autoscaling
- Basic autoscaling
(straightforward on CPU; more complex on other metrics)
- Blue/green deployment, canary deployment
- Ressource management and scheduling
- Long running services, but also batch (one-off) jobs
(reserve CPU/RAM for containers; placement constraints)
- Overcommit our cluster and *evict* low-priority jobs
- Advanced rollout patterns
- Run services with *stateful* data (databases etc.)
(blue/green deployment, canary deployment)
- Fine-grained access control defining *what* can be done by *whom* on *which* resources
---
- Integrating third party services (*service catalog*)
## More things that Kubernetes can do for us
- Batch jobs
(one-off; parallel; also cron-style periodic execution)
- Fine-grained access control
(defining *what* can be done by *whom* on *which* resources)
- Stateful services
(databases, message queues, etc.)
- Automating complex tasks with *operators*
(e.g. database replication, failover, etc.)
- Automating complex tasks (*operators*)
---
@@ -199,30 +183,6 @@ class: extra-details
class: extra-details
## How many nodes should a cluster have?
- There is no particular constraint
(no need to have an odd number of nodes for quorum)
- A cluster can have zero node
(but then it won't be able to start any pods)
- For testing and development, having a single node is fine
- For production, make sure that you have extra capacity
(so that your workload still fits if you lose a node or a group of nodes)
- Kubernetes is tested with [up to 5000 nodes](https://kubernetes.io/docs/setup/best-practices/cluster-large/)
(however, running a cluster of that size requires a lot of tuning)
---
class: extra-details
## Do we need to run Docker at all?
No!
@@ -231,29 +191,11 @@ No!
- By default, Kubernetes uses the Docker Engine to run containers
- We can leverage other pluggable runtimes through the *Container Runtime Interface*
- We could also use `rkt` ("Rocket") from CoreOS
- <del>We could also use `rkt` ("Rocket") from CoreOS</del> (deprecated)
- Or leverage other pluggable runtimes through the *Container Runtime Interface*
---
class: extra-details
## Some runtimes available through CRI
- [containerd](https://github.com/containerd/containerd/blob/master/README.md)
- maintained by Docker, IBM, and community
- used by Docker Engine, microk8s, k3s, GKE; also standalone
- comes with its own CLI, `ctr`
- [CRI-O](https://github.com/cri-o/cri-o/blob/master/README.md):
- maintained by Red Hat, SUSE, and community
- used by OpenShift and Kubic
- designed specifically as a minimal runtime for Kubernetes
- [And more](https://kubernetes.io/docs/setup/production-environment/container-runtimes/)
(like CRI-O, or containerd)
---
@@ -323,48 +265,6 @@ class: pic
---
## Scaling
- How would we scale the pod shown on the previous slide?
- **Do** create additional pods
- each pod can be on a different node
- each pod will have its own IP address
- **Do not** add more NGINX containers in the pod
- all the NGINX containers would be on the same node
- they would all have the same IP address
<br/>(resulting in `Address alreading in use` errors)
---
## Together or separate
- Should we put e.g. a web application server and a cache together?
<br/>
("cache" being something like e.g. Memcached or Redis)
- Putting them **in the same pod** means:
- they have to be scaled together
- they can communicate very efficiently over `localhost`
- Putting them **in different pods** means:
- they can be scaled separately
- they must communicate over remote IP addresses
<br/>(incurring more latency, lower performance)
- Both scenarios can make sense, depending on our goals
---
## Credits
- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)

View File

@@ -193,12 +193,7 @@
- Best practice: set a memory limit, and pass it to the runtime
- Note: recent versions of the JVM can do this automatically
(see [JDK-8146115](https://bugs.java.com/bugdatabase/view_bug.do?bug_id=JDK-8146115))
and
[this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/)
for detailed examples)
(see [this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/) for a detailed example)
---

View File

@@ -4,29 +4,15 @@
- We want one (and exactly one) instance of `rng` per node
- We *do not want* two instances of `rng` on the same node
- What if we just scale up `deploy/rng` to the number of nodes?
- We will do that with a *daemon set*
- nothing guarantees that the `rng` containers will be distributed evenly
---
- if we add nodes later, they will not automatically run a copy of `rng`
## Why not a deployment?
- if we remove (or reboot) a node, one `rng` container will restart elsewhere
- Can't we just do `kubectl scale deployment rng --replicas=...`?
--
- Nothing guarantees that the `rng` containers will be distributed evenly
- If we add nodes later, they will not automatically run a copy of `rng`
- If we remove (or reboot) a node, one `rng` container will restart elsewhere
(and we will end up with two instances `rng` on the same node)
- By contrast, a daemon set will start one pod per node and keep it that way
(as nodes are added or removed)
- Instead of a `deployment`, we will use a `daemonset`
---
@@ -52,7 +38,7 @@
<!-- ##VERSION## -->
- Unfortunately, as of Kubernetes 1.15, the CLI cannot create daemon sets
- Unfortunately, as of Kubernetes 1.14, the CLI cannot create daemon sets
--
@@ -110,22 +96,20 @@
```bash vim rng.yml```
```wait kind: Deployment```
```keys /Deployment```
```key ^J```
```keys ^J```
```keys cwDaemonSet```
```key ^[``` ]
```keys ^[``` ]
```keys :wq```
```key ^J```
```keys ^J```
-->
- Save, quit
- Try to create our new resource:
```bash
```
kubectl apply -f rng.yml
```
<!-- ```wait error:``` -->
]
--
@@ -503,11 +487,11 @@ be any interruption.*
<!--
```wait Please edit the object below```
```keys /app: rng```
```key ^J```
```keys ^J```
```keys noenabled: yes```
```key ^[``` ]
```keys ^[``` ]
```keys :wq```
```key ^J```
```keys ^J```
-->
]
@@ -540,18 +524,19 @@ be any interruption.*
.exercise[
- Update the YAML manifest of the service
- Add `enabled: "yes"` to its selector
- Update the service to add `enabled: "yes"` to its selector:
```bash
kubectl edit service rng
```
<!--
```wait Please edit the object below```
```keys /yes```
```key ^J```
```keys cw"yes"```
```key ^[``` ]
```keys /app: rng```
```keys ^J```
```keys noenabled: "yes"```
```keys ^[``` ]
```keys :wq```
```key ^J```
```keys ^J```
-->
]
@@ -590,25 +575,16 @@ If we did everything correctly, the web UI shouldn't show any change.
```bash
POD=$(kubectl get pod -l app=rng,pod-template-hash -o name)
kubectl logs --tail 1 --follow $POD
```
(We should see a steady stream of HTTP logs)
<!--
```wait HTTP/1.1```
```tmux split-pane -v```
-->
- In another window, remove the label from the pod:
```bash
kubectl label pod -l app=rng,pod-template-hash enabled-
```
(The stream of HTTP logs should stop immediately)
<!--
```key ^D```
```key ^C```
-->
]
There might be a slight change in the web UI (since we removed a bit

View File

@@ -105,22 +105,6 @@ The dashboard will then ask you which authentication you want to use.
---
## Other dashboards
- [Kube Web View](https://codeberg.org/hjacobs/kube-web-view)
- read-only dashboard
- optimized for "troubleshooting and incident response"
- see [vision and goals](https://kube-web-view.readthedocs.io/en/latest/vision.html#vision) for details
- [Kube Ops View](https://github.com/hjacobs/kube-ops-view)
- "provides a common operational picture for multiple Kubernetes clusters"
---
# Security implications of `kubectl apply`
- When we do `kubectl apply -f <URL>`, we create arbitrary resources
@@ -172,3 +156,4 @@ The dashboard will then ask you which authentication you want to use.
- It introduces new failure modes
(for instance, if you try to apply YAML from a link that's no longer valid)

View File

@@ -175,7 +175,7 @@ Success!
]
We should get `No resources found.` and the `kubernetes` service, respectively.
So far, so good.
Note: the API server automatically created the `kubernetes` service entry.
@@ -225,7 +225,7 @@ Success?
]
Our Deployment is in bad shape:
Our Deployment is in a bad shape:
```
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/web 0/1 0 0 2m26s
@@ -481,13 +481,13 @@ docker run alpine echo hello world
.exercise[
- Create the file `~/.kube/config` with `kubectl`:
- Create the file `kubeconfig.kubelet` with `kubectl`:
```bash
kubectl config \
kubectl --kubeconfig kubeconfig.kubelet config \
set-cluster localhost --server http://localhost:8080
kubectl config \
kubectl --kubeconfig kubeconfig.kubelet config \
set-context localhost --cluster localhost
kubectl config \
kubectl --kubeconfig kubeconfig.kubelet config \
use-context localhost
```
@@ -495,7 +495,19 @@ docker run alpine echo hello world
---
## Our `~/.kube/config` file
## All Kubernetes clients can use `kubeconfig`
- The `kubeconfig.kubelet` file has the same format as e.g. `~/.kubeconfig`
- All Kubernetes clients can use a similar file
- The `kubectl config` commands can be used to manipulate these files
- This highlights that kubelet is a "normal" client of the API server
---
## Our `kubeconfig.kubelet` file
The file that we generated looks like the one below.
@@ -521,9 +533,9 @@ clusters:
.exercise[
- Start kubelet with that kubeconfig file:
- Start kubelet with that `kubeconfig.kubelet` file:
```bash
kubelet --kubeconfig ~/.kube/config
kubelet --kubeconfig kubeconfig.kubelet
```
]

View File

@@ -1,211 +0,0 @@
# Authoring YAML
- There are various ways to generate YAML with Kubernetes, e.g.:
- `kubectl run`
- `kubectl create deployment` (and a few other `kubectl create` variants)
- `kubectl expose`
- When and why do we need to write our own YAML?
- How do we write YAML from scratch?
---
## The limits of generated YAML
- Many advanced (and even not-so-advanced) features require to write YAML:
- pods with multiple containers
- resource limits
- healthchecks
- DaemonSets, StatefulSets
- and more!
- How do we access these features?
---
## We don't have to start from scratch
- Create a resource (e.g. Deployment)
- Dump its YAML with `kubectl get -o yaml ...`
- Edit the YAML
- Use `kubectl apply -f ...` with the YAML file to:
- update the resource (if it's the same kind)
- create a new resource (if it's a different kind)
- Or: Use The Docs, Luke
(the documentation almost always has YAML examples)
---
## Generating YAML without creating resources
- We can use the `--dry-run` option
.exercise[
- Generate the YAML for a Deployment without creating it:
```bash
kubectl create deployment web --image nginx --dry-run
```
]
- We can clean up that YAML even more if we want
(for instance, we can remove the `creationTimestamp` and empty dicts)
---
## Using `--dry-run` with `kubectl apply`
- The `--dry-run` option can also be used with `kubectl apply`
- However, it can be misleading (it doesn't do a "real" dry run)
- Let's see what happens in the following scenario:
- generate the YAML for a Deployment
- tweak the YAML to transform it into a DaemonSet
- apply that YAML to see what would actually be created
---
## The limits of `kubectl apply --dry-run`
.exercise[
- Generate the YAML for a deployment:
```bash
kubectl create deployment web --image=nginx -o yaml > web.yaml
```
- Change the `kind` in the YAML to make it a `DaemonSet`:
```bash
sed -i s/Deployment/DaemonSet/ web.yaml
```
- Ask `kubectl` what would be applied:
```bash
kubectl apply -f web.yaml --dry-run --validate=false -o yaml
```
]
The resulting YAML doesn't represent a valid DaemonSet.
---
## Server-side dry run
- Since Kubernetes 1.13, we can use [server-side dry run and diffs](https://kubernetes.io/blog/2019/01/14/apiserver-dry-run-and-kubectl-diff/)
- Server-side dry run will do all the work, but *not* persist to etcd
(all validation and mutation hooks will be executed)
.exercise[
- Try the same YAML file as earlier, with server-side dry run:
```bash
kubectl apply -f web.yaml --server-dry-run --validate=false -o yaml
```
]
The resulting YAML doesn't have the `replicas` field anymore.
Instead, it has the fields expected in a DaemonSet.
---
## Advantages of server-side dry run
- The YAML is verified much more extensively
- The only step that is skipped is "write to etcd"
- YAML that passes server-side dry run *should* apply successfully
(unless the cluster state changes by the time the YAML is actually applied)
- Validating or mutating hooks that have side effects can also be an issue
---
## `kubectl diff`
- Kubernetes 1.13 also introduced `kubectl diff`
- `kubectl diff` does a server-side dry run, *and* shows differences
.exercise[
- Try `kubectl diff` on the YAML that we tweaked earlier:
```bash
kubectl diff -f web.yaml
```
<!-- ```wait status:``` -->
]
Note: we don't need to specify `--validate=false` here.
---
## Advantage of YAML
- Using YAML (instead of `kubectl run`/`create`/etc.) allows to be *declarative*
- The YAML describes the desired state of our cluster and applications
- YAML can be stored, versioned, archived (e.g. in git repositories)
- To change resources, change the YAML files
(instead of using `kubectl edit`/`scale`/`label`/etc.)
- Changes can be reviewed before being applied
(with code reviews, pull requests ...)
- This workflow is sometimes called "GitOps"
(there are tools like Weave Flux or GitKube to facilitate it)
---
## YAML in practice
- Get started with `kubectl run`/`create`/`expose`/etc.
- Dump the YAML with `kubectl get -o yaml`
- Tweak that YAML and `kubectl apply` it back
- Store that YAML for reference (for further deployments)
- Feel free to clean up the YAML:
- remove fields you don't know
- check that it still works!
- That YAML will be useful later when using e.g. Kustomize or Helm

View File

@@ -87,7 +87,7 @@
- Clone the Flux repository:
```
git clone https://github.com/fluxcd/flux
git clone https://github.com/weaveworks/flux
```
- Edit `deploy/flux-deployment.yaml`

View File

@@ -1,3 +1,41 @@
## Questions to ask before adding healthchecks
- Do we want liveness, readiness, both?
(sometimes, we can use the same check, but with different failure thresholds)
- Do we have existing HTTP endpoints that we can use?
- Do we need to add new endpoints, or perhaps use something else?
- Are our healthchecks likely to use resources and/or slow down the app?
- Do they depend on additional services?
(this can be particularly tricky, see next slide)
---
## Healthchecks and dependencies
- A good healthcheck should always indicate the health of the service itself
- It should not be affected by the state of the service's dependencies
- Example: a web server requiring a database connection to operate
(make sure that the healthcheck can report "OK" even if the database is down;
<br/>
because it won't help us to restart the web server if the issue is with the DB!)
- Example: a microservice calling other microservices
- Example: a worker process
(these will generally require minor code changes to report health)
---
## Adding healthchecks to an app
- Let's add healthchecks to DockerCoins!
@@ -266,15 +304,15 @@ It will use the default success threshold (1 successful attempt = alive).
- We need to make sure that the healthcheck doesn't trip when
performance degrades due to external pressure
- Using a readiness check would have fewer effects
- Using a readiness check would have lesser effects
(but it would still be an imperfect solution)
(but it still would be an imperfect solution)
- A possible combination:
- readiness check with a short timeout / low failure threshold
- liveness check with a longer timeout / higher failure threshold
- liveness check with a longer timeout / higher failure treshold
---
@@ -306,7 +344,7 @@ class: extra-details
- When a process is killed, its children are *orphaned* and attached to PID 1
- PID 1 has the responsibility of *reaping* these processes when they terminate
- PID 1 has the responsibility if *reaping* these processes when they terminate
- OK, but how does that affect us?
@@ -332,4 +370,24 @@ class: extra-details
(and have gcr.io/pause take care of the reaping)
- Discussion of this in [Video - 10 Ways to Shoot Yourself in the Foot with Kubernetes, #9 Will Surprise You](https://www.youtube.com/watch?v=QKI-JRs2RIE)
---
## Healthchecks for worker
- Readiness isn't useful
(because worker isn't a backend for a service)
- Liveness may help us to restart a broken worker, but how can we check it?
- Embedding an HTTP server is an option
(but it has a high potential for unwanted side-effects and false positives)
- Using a "lease" file can be relatively easy:
- touch a file during each iteration of the main loop
- check the timestamp of that file from an exec probe
- Writing logs (and checking them from the probe) also works

View File

@@ -42,11 +42,9 @@
- internal corruption (causing all requests to error)
- Anything where our incident response would be "just restart/reboot it"
- If the liveness probe fails *N* consecutive times, the container is killed
.warning[**Do not** use liveness probes for problems that can't be fixed by a restart]
- Otherwise we just restart our pods for no reason, creating useless load
- *N* is the `failureThreshold` (3 by default)
---
@@ -54,7 +52,7 @@
- Indicates if the container is ready to serve traffic
- If a container becomes "unready" it might be ready again soon
- If a container becomes "unready" (let's say busy!) it might be ready again soon
- If the readiness probe fails:
@@ -68,79 +66,19 @@
## When to use a readiness probe
- To indicate failure due to an external cause
- To indicate temporary failures
- database is down or unreachable
- the application can only service *N* parallel connections
- mandatory auth or other backend service unavailable
- the runtime is busy doing garbage collection or initial data load
- To indicate temporary failure or unavailability
- The container is marked as "not ready" after `failureThreshold` failed attempts
- application can only service *N* parallel connections
(3 by default)
- runtime is busy doing garbage collection or initial data load
- It is marked again as "ready" after `successThreshold` successful attempts
- For processes that take a long time to start
(more on that later)
---
## Dependencies
- If a web server depends on a database to function, and the database is down:
- the web server's liveness probe should succeed
- the web server's readiness probe should fail
- Same thing for any hard dependency (without which the container can't work)
.warning[**Do not** fail liveness probes for problems that are external to the container]
---
## Timing and thresholds
- Probes are executed at intervals of `periodSeconds` (default: 10)
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
.warning[If a probe takes longer than that, it is considered as a FAIL]
- A probe is considered successful after `successThreshold` successes (default: 1)
- A probe is considered failing after `failureThreshold` failures (default: 3)
- A probe can have an `initialDelaySeconds` parameter (default: 0)
- Kubernetes will wait that amount of time before running the probe for the first time
(this is important to avoid killing services that take a long time to start)
---
class: extra-details
## Startup probe
- Kubernetes 1.16 introduces a third type of probe: `startupProbe`
(it is in `alpha` in Kubernetes 1.16)
- It can be used to indicate "container not ready *yet*"
- process is still starting
- loading external data, priming caches
- Before Kubernetes 1.16, we had to use the `initialDelaySeconds` parameter
(available for both liveness and readiness probes)
- `initialDelaySeconds` is a rigid delay (always wait X before running probes)
- `startupProbe` works better when a container start time can vary a lot
(1 by default)
---
@@ -174,12 +112,10 @@ class: extra-details
(instead of serving errors or timeouts)
- Unavailable backends get removed from load balancer rotation
- Overloaded backends get removed from load balancer rotation
(thus improving response times across the board)
- If a probe is not defined, it's as if there was an "always successful" probe
---
## Example: HTTP probe
@@ -229,56 +165,14 @@ If the Redis process becomes unresponsive, it will be killed.
---
## Questions to ask before adding healthchecks
## Details about liveness and readiness probes
- Do we want liveness, readiness, both?
- Probes are executed at intervals of `periodSeconds` (default: 10)
(sometimes, we can use the same check, but with different failure thresholds)
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
- Do we have existing HTTP endpoints that we can use?
- A probe is considered successful after `successThreshold` successes (default: 1)
- Do we need to add new endpoints, or perhaps use something else?
- A probe is considered failing after `failureThreshold` failures (default: 3)
- Are our healthchecks likely to use resources and/or slow down the app?
- Do they depend on additional services?
(this can be particularly tricky, see next slide)
---
## Healthchecks and dependencies
- Liveness checks should not be influenced by the state of external services
- All checks should reply quickly (by default, less than 1 second)
- Otherwise, they are considered to fail
- This might require to check the health of dependencies asynchronously
(e.g. if a database or API might be healthy but still take more than
1 second to reply, we should check the status asynchronously and report
a cached status)
---
## Healthchecks for workers
(In that context, worker = process that doesn't accept connections)
- Readiness isn't useful
(because workers aren't backends for a service)
- Liveness may help us restart a broken worker, but how can we check it?
- Embedding an HTTP server is a (potentially expensive) option
- Using a "lease" file can be relatively easy:
- touch a file during each iteration of the main loop
- check the timestamp of that file from an exec probe
- Writing logs (and checking them from the probe) also works
- If a probe is not defined, it's as if there was an "always successful" probe

View File

@@ -22,9 +22,9 @@
- `helm` is a CLI tool
- It is used to find, install, upgrade *charts*
- `tiller` is its companion server-side component
- A chart is an archive containing templatized YAML bundles
- A "chart" is an archive containing templatized YAML bundles
- Charts are versioned
@@ -32,90 +32,6 @@
---
## Differences between charts and packages
- A package (deb, rpm...) contains binaries, libraries, etc.
- A chart contains YAML manifests
(the binaries, libraries, etc. are in the images referenced by the chart)
- On most distributions, a package can only be installed once
(installing another version replaces the installed one)
- A chart can be installed multiple times
- Each installation is called a *release*
- This allows to install e.g. 10 instances of MongoDB
(with potentially different versions and configurations)
---
class: extra-details
## Wait a minute ...
*But, on my Debian system, I have Python 2 **and** Python 3.
<br/>
Also, I have multiple versions of the Postgres database engine!*
Yes!
But they have different package names:
- `python2.7`, `python3.8`
- `postgresql-10`, `postgresql-11`
Good to know: the Postgres package in Debian includes
provisions to deploy multiple Postgres servers on the
same system, but it's an exception (and it's a lot of
work done by the package maintainer, not by the `dpkg`
or `apt` tools).
---
## Helm 2 vs Helm 3
- Helm 3 was released [November 13, 2019](https://helm.sh/blog/helm-3-released/)
- Charts remain compatible between Helm 2 and Helm 3
- The CLI is very similar (with minor changes to some commands)
- The main difference is that Helm 2 uses `tiller`, a server-side component
- Helm 3 doesn't use `tiller` at all, making it simpler (yay!)
---
class: extra-details
## With or without `tiller`
- With Helm 3:
- the `helm` CLI communicates directly with the Kubernetes API
- it creates resources (deployments, services...) with our credentials
- With Helm 2:
- the `helm` CLI communicates with `tiller`, telling `tiller` what to do
- `tiller` then communicates with the Kubernetes API, using its own credentials
- This indirect model caused significant permissions headaches
(`tiller` required very broad permissions to function)
- `tiller` was removed in Helm 3 to simplify the security aspects
---
## Installing Helm
- If the `helm` CLI is not installed in your environment, install it
@@ -129,21 +45,14 @@ class: extra-details
- If it's not installed, run the following command:
```bash
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 \
| bash
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
```
]
(To install Helm 2, replace `get-helm-3` with `get`.)
---
class: extra-details
## Only if using Helm 2 ...
- We need to install Tiller and give it some permissions
## Installing Tiller
- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace
@@ -158,6 +67,8 @@ class: extra-details
]
If Tiller was already installed, don't worry: this won't break it.
At the end of the install process, you will see:
```
@@ -166,11 +77,9 @@ Happy Helming!
---
class: extra-details
## Fix account permissions
## Only if using Helm 2 ...
- Tiller needs permissions to create Kubernetes resources
- Helm permission model requires us to tweak permissions
- In a more realistic deployment, you might create per-user or per-team
service accounts, roles, and role bindings
@@ -183,7 +92,6 @@ class: extra-details
--clusterrole=cluster-admin --serviceaccount=kube-system:default
```
]
(Defining the exact roles and permissions on your cluster requires
@@ -192,228 +100,79 @@ fine for personal and development clusters.)
---
## Charts and repositories
## View available charts
- A *repository* (or repo in short) is a collection of charts
- A public repo is pre-configured when installing Helm
- It's just a bunch of files
(they can be hosted by a static HTTP server, or on a local directory)
- We can add "repos" to Helm, giving them a nickname
- The nickname is used when referring to charts on that repo
(for instance, if we try to install `hello/world`, that
means the chart `world` on the repo `hello`; and that repo
`hello` might be something like https://blahblah.hello.io/charts/)
---
## Managing repositories
- Let's check what repositories we have, and add the `stable` repo
(the `stable` repo contains a set of official-ish charts)
- We can view available charts with `helm search` (and an optional keyword)
.exercise[
- List our repos:
- View all available charts:
```bash
helm repo list
helm search
```
- Add the `stable` repo:
- View charts related to `prometheus`:
```bash
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
helm search prometheus
```
]
Adding a repo can take a few seconds (it downloads the list of charts from the repo).
It's OK to add a repo that already exists (it will merely update it).
---
## Search available charts
## Install a chart
- We can search available charts with `helm search`
- Most charts use `LoadBalancer` service types by default
- We need to specify where to search (only our repos, or Helm Hub)
- Most charts require persistent volumes to store data
- Let's search for all charts mentioning tomcat!
- We need to relax these requirements a bit
.exercise[
- Search for tomcat in the repo that we added earlier:
- Install the Prometheus metrics collector on our cluster:
```bash
helm search repo tomcat
```
- Search for tomcat on the Helm Hub:
```bash
helm search hub tomcat
helm install stable/prometheus \
--set server.service.type=NodePort \
--set server.persistentVolume.enabled=false
```
]
[Helm Hub](https://hub.helm.sh/) indexes many repos, using the [Monocular](https://github.com/helm/monocular) server.
Where do these `--set` options come from?
---
## Charts and releases
## Inspecting a chart
- "Installing a chart" means creating a *release*
- We need to name that release
(or use the `--generate-name` to get Helm to generate one for us)
- `helm inspect` shows details about a chart (including available options)
.exercise[
- Install the tomcat chart that we found earlier:
- See the metadata and all available options for `stable/prometheus`:
```bash
helm install java4ever stable/tomcat
helm inspect stable/prometheus
```
- List the releases:
]
The chart's metadata includes a URL to the project's home page.
(Sometimes it conveniently points to the documentation for the chart.)
---
## Viewing installed charts
- Helm keeps track of what we've installed
.exercise[
- List installed Helm charts:
```bash
helm list
```
]
---
class: extra-details
## Searching and installing with Helm 2
- Helm 2 doesn't have support for the Helm Hub
- The `helm search` command only takes a search string argument
(e.g. `helm search tomcat`)
- With Helm 2, the name is optional:
`helm install stable/tomcat` will automatically generate a name
`helm install --name java4ever stable/tomcat` will specify a name
---
## Viewing resources of a release
- This specific chart labels all its resources with a `release` label
- We can use a selector to see these resources
.exercise[
- List all the resources created by this release:
```bash
kuectl get all --selector=release=java4ever
```
]
Note: this `release` label wasn't added automatically by Helm.
<br/>
It is defined in that chart. In other words, not all charts will provide this label.
---
## Configuring a release
- By default, `stable/tomcat` creates a service of type `LoadBalancer`
- We would like to change that to a `NodePort`
- We could use `kubectl edit service java4ever-tomcat`, but ...
... our changes would get overwritten next time we update that chart!
- Instead, we are going to *set a value*
- Values are parameters that the chart can use to change its behavior
- Values have default values
- Each chart is free to define its own values and their defaults
---
## Checking possible values
- We can inspect a chart with `helm show` or `helm inspect`
.exercise[
- Look at the README for tomcat:
```bash
helm show readme stable/tomcat
```
- Look at the values and their defaults:
```bash
helm show values stable/tomcat
```
]
The `values` may or may not have useful comments.
The `readme` may or may not have (accurate) explanations for the values.
(If we're unlucky, there won't be any indication about how to use the values!)
---
## Setting values
- Values can be set when installing a chart, or when upgrading it
- We are going to update `java4ever` to change the type of the service
.exercise[
- Update `java4ever`:
```bash
helm upgrade java4ever stable/tomcat --set service.type=NodePort
```
]
Note that we have to specify the chart that we use (`stable/tomcat`),
even if we just want to update some values.
We can set multiple values. If we want to set many values, we can use `-f`/`--values` and pass a YAML file with all the values.
All unspecified values will take the default values defined in the chart.
---
## Connecting to tomcat
- Let's check the tomcat server that we just installed
- Note: its readiness probe has a 60s delay
(so it will take 60s after the initial deployment before the service works)
.exercise[
- Check the node port allocated to the service:
```bash
kubectl get service java4ever-tomcat
PORT=$(kubectl get service java4ever-tomcat -o jsonpath={..nodePort})
```
- Connect to it, checking the demo app on `/sample/`:
```bash
curl localhost:$PORT/sample/
```
]

View File

@@ -105,36 +105,19 @@
- Monitor pod CPU usage:
```bash
watch kubectl top pods -l app=busyhttp
watch kubectl top pods
```
<!--
```wait NAME```
```tmux split-pane -v```
```bash CLUSTERIP=$(kubectl get svc busyhttp -o jsonpath={.spec.clusterIP})```
-->
- Monitor service latency:
```bash
httping http://`$CLUSTERIP`/
httping http://`ClusterIP`/
```
<!--
```wait connected to```
```tmux split-pane -v```
-->
- Monitor cluster events:
```bash
kubectl get events -w
```
<!--
```wait Normal```
```tmux split-pane -v```
```bash CLUSTERIP=$(kubectl get svc busyhttp -o jsonpath={.spec.clusterIP})```
-->
]
---
@@ -147,15 +130,9 @@
- Send a lot of requests to the service, with a concurrency level of 3:
```bash
ab -c 3 -n 100000 http://`$CLUSTERIP`/
ab -c 3 -n 100000 http://`ClusterIP`/
```
<!--
```wait be patient```
```tmux split-pane -v```
```tmux selectl even-vertical```
-->
]
The latency (reported by `httping`) should increase above 3s.
@@ -216,20 +193,6 @@ This can also be set with `--cpu-percent=`.
kubectl edit deployment busyhttp
```
<!--
```wait Please edit```
```keys /resources```
```key ^J```
```keys $xxxo requests:```
```key ^J```
```key Space```
```key Space```
```keys cpu: "1"```
```key Escape```
```keys :wq```
```key ^J```
-->
- In the `containers` list, add the following block:
```yaml
resources:
@@ -280,29 +243,3 @@ This can also be set with `--cpu-percent=`.
- The metrics provided by metrics server are standard; everything else is custom
- For more details, see [this great blog post](https://medium.com/uptime-99/kubernetes-hpa-autoscaling-with-custom-and-external-metrics-da7f41ff7846) or [this talk](https://www.youtube.com/watch?v=gSiGFH4ZnS8)
---
## Cleanup
- Since `busyhttp` uses CPU cycles, let's stop it before moving on
.exercise[
- Delete the `busyhttp` Deployment:
```bash
kubectl delete deployment busyhttp
```
<!--
```key ^D```
```key ^C```
```key ^D```
```key ^C```
```key ^D```
```key ^C```
```key ^D```
```key ^C```
-->
]

View File

@@ -415,7 +415,7 @@ This is normal: we haven't provided any ingress rule yet.
Here is a minimal host-based ingress resource:
```yaml
apiVersion: networking.k8s.io/v1beta1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cheddar
@@ -523,184 +523,4 @@ spec:
- This should eventually stabilize
(remember that ingresses are currently `apiVersion: networking.k8s.io/v1beta1`)
---
## A special feature in action
- We're going to see how to implement *canary releases* with Traefik
- This feature is available on multiple ingress controllers
- ... But it is configured very differently on each of them
---
## Canary releases
- A *canary release* (or canary launch or canary deployment) is a release that will process only a small fraction of the workload
- After deploying the canary, we compare its metrics to the normal release
- If the metrics look good, the canary will progressively receive more traffic
(until it gets 100% and becomes the new normal release)
- If the metrics aren't good, the canary is automatically removed
- When we deploy a bad release, only a tiny fraction of traffic is affected
---
## Various ways to implement canary
- Example 1: canary for a microservice
- 1% of all requests (sampled randomly) are sent to the canary
- the remaining 99% are sent to the normal release
- Example 2: canary for a web app
- 1% of users are sent to the canary web site
- the remaining 99% are sent to the normal release
- Example 3: canary for shipping physical goods
- 1% of orders are shipped with the canary process
- the reamining 99% are shipped with the normal process
- We're going to implement example 1 (per-request routing)
---
## Canary releases with Traefik
- We need to deploy the canary and expose it with a separate service
- Then, in the Ingress resource, we need:
- multiple `paths` entries (one for each service, canary and normal)
- an extra annotation indicating the weight of each service
- If we want, we can send requests to more than 2 services
- Let's send requests to our 3 cheesy services!
.exercise[
- Create the resource shown on the next slide
]
---
## The Ingress resource
.small[
```yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: cheeseplate
annotations:
traefik.ingress.kubernetes.io/service-weights: |
cheddar: 50%
wensleydale: 25%
stilton: 25%
spec:
rules:
- host: cheeseplate.`A.B.C.D`.nip.io
http:
paths:
- path: /
backend:
serviceName: cheddar
servicePort: 80
- path: /
backend:
serviceName: wensledale
servicePort: 80
- path: /
backend:
serviceName: stilton
servicePort: 80
```
]
---
## Testing the canary
- Let's check the percentage of requests going to each service
.exercise[
- Continuously send HTTP requests to the new ingress:
```bash
while sleep 0.1; do
curl -s http://cheeseplate.A.B.C.D.nip.io/
done
```
]
We should see a 50/25/25 request mix.
---
class: extra-details
## Load balancing fairness
Note: if we use odd request ratios, the load balancing algorithm might appear to be broken on a small scale (when sending a small number of requests), but on a large scale (with many requests) it will be fair.
For instance, with a 11%/89% ratio, we can see 79 requests going to the 89%-weighted service, and then requests alternating between the two services; then 79 requests again, etc.
---
class: extra-details
## Other ingress controllers
*Just to illustrate how different things are ...*
- With the NGINX ingress controller:
- define two ingress ressources
<br/>
(specifying rules with the same host+path)
- add `nginx.ingress.kubernetes.io/canary` annotations on each
- With Linkerd2:
- define two services
- define an extra service for the weighted aggregate of the two
- define a TrafficSplit (this is a CRD introduced by the SMI spec)
---
class: extra-details
## We need more than that
What we saw is just one of the multiple building blocks that we need to achieve a canary release.
We also need:
- metrics (latency, performance ...) for our releases
- automation to alter canary weights
(increase canary weight if metrics look good; decrease otherwise)
- a mechanism to manage the lifecycle of the canary releases
(create them, promote them, delete them ...)
For inspiration, check [flagger by Weave](https://github.com/weaveworks/flagger).
(remember that ingresses are currently `apiVersion: extensions/v1beta1`)

View File

@@ -14,80 +14,42 @@
`ClusterIP`, `NodePort`, `LoadBalancer`, `ExternalName`
- HTTP services can also use `Ingress` resources (more on that later)
---
## Basic service types
- `ClusterIP` (default type)
- a virtual IP address is allocated for the service (in an internal, private range)
- this IP address is reachable only from within the cluster (nodes and pods)
- our code can connect to the service using the original port number
- `NodePort`
- a port is allocated for the service (by default, in the 30000-32768 range)
- that port is made available *on all our nodes* and anybody can connect to it
- our code must be changed to connect to that new port number
These service types are always available.
Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` rules.
---
## `ClusterIP`
## More service types
- It's the default service type
- `LoadBalancer`
- A virtual IP address is allocated for the service
- an external load balancer is allocated for the service
- the load balancer is configured accordingly
<br/>(e.g.: a `NodePort` service is created, and the load balancer sends traffic to that port)
- available only when the underlying infrastructure provides some "load balancer as a service"
<br/>(e.g. AWS, Azure, GCE, OpenStack...)
(in an internal, private range; e.g. 10.96.0.0/12)
- `ExternalName`
- This IP address is reachable only from within the cluster (nodes and pods)
- Our code can connect to the service using the original port number
- Perfect for internal communication, within the cluster
---
## `LoadBalancer`
- An external load balancer is allocated for the service
(typically a cloud load balancer, e.g. ELB on AWS, GLB on GCE ...)
- This is available only when the underlying infrastructure provides some kind of
"load balancer as a service"
- Each service of that type will typically cost a little bit of money
(e.g. a few cents per hour on AWS or GCE)
- Ideally, traffic would flow directly from the load balancer to the pods
- In practice, it will often flow through a `NodePort` first
---
## `NodePort`
- A port number is allocated for the service
(by default, in the 30000-32767 range)
- That port is made available *on all our nodes* and anybody can connect to it
(we can connect to any node on that port to reach the service)
- Our code needs to be changed to connect to that new port number
- Under the hood: `kube-proxy` sets up a bunch of `iptables` rules on our nodes
- Sometimes, it's the only available option for external traffic
(e.g. most clusters deployed with kubeadm or on-premises)
---
class: extra-details
## `ExternalName`
- No load balancer (internal or external) is created
- Only a DNS entry gets added to the DNS managed by Kubernetes
- That DNS entry will just be a `CNAME` to a provided record
Example:
```bash
kubectl create service externalname k8s --external-name kubernetes.io
```
*Creates a CNAME `k8s` pointing to `kubernetes.io`*
- the DNS entry managed by CoreDNS will just be a `CNAME` to a provided record
- no port, no IP address, no nothing else is allocated
---
@@ -124,10 +86,7 @@ kubectl create service externalname k8s --external-name kubernetes.io
kubectl get pods -w
```
<!--
```wait NAME```
```tmux split-pane -h```
-->
<!-- ```keys ^C``` -->
- Create a deployment for this very lightweight HTTP server:
```bash
@@ -194,8 +153,6 @@ kubectl create service externalname k8s --external-name kubernetes.io
<!--
```hide kubectl wait deploy httpenv --for condition=available```
```key ^D```
```key ^C```
-->
- Send a few requests:
@@ -322,28 +279,18 @@ error: the server doesn't have a resource type "endpoint"
---
class: extra-details
## Exposing services to the outside world
## `ExternalIP`
- The default type (ClusterIP) only works for internal traffic
- When creating a servivce, we can also specify an `ExternalIP`
- If we want to accept external traffic, we can use one of these:
(this is not a type, but an extra attribute to the service)
- NodePort (expose a service on a TCP port between 30000-32768)
- It will make the service availableon this IP address
- LoadBalancer (provision a cloud load balancer for our service)
(if the IP address belongs to a node of the cluster)
- ExternalIP (use one node's external IP address)
---
- Ingress (a special mechanism for HTTP services)
## `Ingress`
- Ingresses are another type (kind) of resource
- They are specifically for HTTP services
(not TCP or UDP)
- They can also handle TLS certificates, URL rewriting ...
- They require an *Ingress Controller* to function
*We'll see NodePorts and Ingresses more in detail later.*

View File

@@ -20,50 +20,6 @@
---
class: extra-details
## `kubectl` is the new SSH
- We often start managing servers with SSH
(installing packages, troubleshooting ...)
- At scale, it becomes tedious, repetitive, error-prone
- Instead, we use config management, central logging, etc.
- In many cases, we still need SSH:
- as the underlying access method (e.g. Ansible)
- to debug tricky scenarios
- to inspect and poke at things
---
class: extra-details
## The parallel with `kubectl`
- We often start managing Kubernetes clusters with `kubectl`
(deploying applications, troubleshooting ...)
- At scale (with many applications or clusters), it becomes tedious, repetitive, error-prone
- Instead, we use automated pipelines, observability tooling, etc.
- In many cases, we still need `kubectl`:
- to debug tricky scenarios
- to inspect and poke at things
- The Kubernetes API is always the underlying access method
---
## `kubectl get`
- Let's look at our `Node` resources with `kubectl get`!
@@ -115,7 +71,7 @@ class: extra-details
- Show the capacity of all our nodes as a stream of JSON objects:
```bash
kubectl get nodes -o json |
kubectl get nodes -o json |
jq ".items[] | {name:.metadata.name} + .status.capacity"
```
@@ -226,6 +182,53 @@ class: extra-details
---
## Services
- A *service* is a stable endpoint to connect to "something"
(In the initial proposal, they were called "portals")
.exercise[
- List the services on our cluster with one of these commands:
```bash
kubectl get services
kubectl get svc
```
]
--
There is already one service on our cluster: the Kubernetes API itself.
---
## ClusterIP services
- A `ClusterIP` service is internal, available from the cluster only
- This is useful for introspection from within containers
.exercise[
- Try to connect to the API:
```bash
curl -k https://`10.96.0.1`
```
- `-k` is used to skip certificate verification
- Make sure to replace 10.96.0.1 with the CLUSTER-IP shown by `kubectl get svc`
]
--
The error that we see is expected: the Kubernetes API requires authentication.
---
## Listing running containers
- Containers are manipulated through *pods*
@@ -464,117 +467,3 @@ class: extra-details
[KEP-0009]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0009-node-heartbeat.md
[node controller documentation]: https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller
---
## Services
- A *service* is a stable endpoint to connect to "something"
(In the initial proposal, they were called "portals")
.exercise[
- List the services on our cluster with one of these commands:
```bash
kubectl get services
kubectl get svc
```
]
--
There is already one service on our cluster: the Kubernetes API itself.
---
## ClusterIP services
- A `ClusterIP` service is internal, available from the cluster only
- This is useful for introspection from within containers
.exercise[
- Try to connect to the API:
```bash
curl -k https://`10.96.0.1`
```
- `-k` is used to skip certificate verification
- Make sure to replace 10.96.0.1 with the CLUSTER-IP shown by `kubectl get svc`
]
The command above should either time out, or show an authentication error. Why?
---
## Time out
- Connections to ClusterIP services only work *from within the cluster*
- If we are outside the cluster, the `curl` command will probably time out
(Because the IP address, e.g. 10.96.0.1, isn't routed properly outside the cluster)
- This is the case with most "real" Kubernetes clusters
- To try the connection from within the cluster, we can use [shpod](https://github.com/jpetazzo/shpod)
---
## Authentication error
This is what we should see when connecting from within the cluster:
```json
$ curl -k https://10.96.0.1
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {
},
"status": "Failure",
"message": "forbidden: User \"system:anonymous\" cannot get path \"/\"",
"reason": "Forbidden",
"details": {
},
"code": 403
}
```
---
## Explanations
- We can see `kind`, `apiVersion`, `metadata`
- These are typical of a Kubernetes API reply
- Because we *are* talking to the Kubernetes API
- The Kubernetes API tells us "Forbidden"
(because it requires authentication)
- The Kubernetes API is reachable from within the cluster
(many apps integrating with Kubernetes will use this)
---
## DNS integration
- Each service also gets a DNS record
- The Kubernetes DNS resolver is available *from within pods*
(and sometimes, from within nodes, depending on configuration)
- Code running in pods can connect to services using their name
(e.g. https://kubernetes/...)

View File

@@ -101,7 +101,7 @@ If we wanted to talk to the API, we would need to:
<!--
```wait /version```
```key ^J```
```keys ^J```
-->
- Terminate the proxy:

View File

@@ -20,9 +20,10 @@
.exercise[
- Let's ping the address of `localhost`, the loopback interface:
- Let's ping `1.1.1.1`, Cloudflare's
[public DNS resolver](https://blog.cloudflare.com/announcing-1111/):
```bash
kubectl run pingpong --image alpine ping 127.0.0.1
kubectl run pingpong --image alpine ping 1.1.1.1
```
<!-- ```hide kubectl wait deploy/pingpong --for condition=available``` -->
@@ -152,11 +153,9 @@ pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m
kubectl logs deploy/pingpong --tail 1 --follow
```
- Leave that command running, so that we can keep an eye on these logs
<!--
```wait seq=3```
```tmux split-pane -h```
```keys ^C```
-->
]
@@ -187,54 +186,6 @@ We could! But the *deployment* would notice it right away, and scale back to the
---
## Log streaming
- Let's look again at the output of `kubectl logs`
(the one we started before scaling up)
- `kubectl logs` shows us one line per second
- We could expect 3 lines per second
(since we should now have 3 pods running `ping`)
- Let's try to figure out what's happening!
---
## Streaming logs of multiple pods
- What happens if we restart `kubectl logs`?
.exercise[
- Interrupt `kubectl logs` (with Ctrl-C)
<!--
```tmux last-pane```
```key ^C```
-->
- Restart it:
```bash
kubectl logs deploy/pingpong --tail 1 --follow
```
<!--
```wait using pod/pingpong-```
```tmux last-pane```
-->
]
`kubectl logs` will warn us that multiple pods were found, and that it's showing us only one of them.
Let's leave `kubectl logs` running while we keep exploring.
---
## Resilience
- The *deployment* `pingpong` watches its *replica set*
@@ -245,56 +196,27 @@ Let's leave `kubectl logs` running while we keep exploring.
.exercise[
- In a separate window, watch the list of pods:
- In a separate window, list pods, and keep watching them:
```bash
watch kubectl get pods
kubectl get pods -w
```
<!--
```wait Every 2.0s```
```tmux split-pane -v```
```wait Running```
```keys ^C```
```hide kubectl wait deploy pingpong --for condition=available```
```keys kubectl delete pod ping```
```copypaste pong-..........-.....```
-->
- Destroy the pod currently shown by `kubectl logs`:
- Destroy a pod:
```
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
```
<!--
```tmux select-pane -t 0```
```copy pingpong-[^-]*-.....```
```tmux last-pane```
```keys kubectl delete pod ```
```paste```
```key ^J```
```check```
```key ^D```
```tmux select-pane -t 1```
```key ^C```
```key ^D```
-->
]
---
## What happened?
- `kubectl delete pod` terminates the pod gracefully
(sending it the TERM signal and waiting for it to shutdown)
- As soon as the pod is in "Terminating" state, the Replica Set replaces it
- But we can still see the output of the "Terminating" pod in `kubectl logs`
- Until 30 seconds later, when the grace period expires
- The pod is then killed, and `kubectl logs` exits
---
## What if we wanted something different?
- What if we wanted to start a "one-shot" container that *doesn't* get restarted?
@@ -312,73 +234,6 @@ Let's leave `kubectl logs` running while we keep exploring.
---
## Scheduling periodic background work
- A Cron Job is a job that will be executed at specific intervals
(the name comes from the traditional cronjobs executed by the UNIX crond)
- It requires a *schedule*, represented as five space-separated fields:
- minute [0,59]
- hour [0,23]
- day of the month [1,31]
- month of the year [1,12]
- day of the week ([0,6] with 0=Sunday)
- `*` means "all valid values"; `/N` means "every N"
- Example: `*/3 * * * *` means "every three minutes"
---
## Creating a Cron Job
- Let's create a simple job to be executed every three minutes
- Cron Jobs need to terminate, otherwise they'd run forever
.exercise[
- Create the Cron Job:
```bash
kubectl run every3mins --schedule="*/3 * * * *" --restart=OnFailure \
--image=alpine sleep 10
```
- Check the resource that was created:
```bash
kubectl get cronjobs
```
]
---
## Cron Jobs in action
- At the specified schedule, the Cron Job will create a Job
- The Job will create a Pod
- The Job will make sure that the Pod completes
(re-creating another one if it fails, for instance if its node fails)
.exercise[
- Check the Jobs that are created:
```bash
kubectl get jobs
```
]
(It will take a few minutes before the first job is scheduled.)
---
## What about that deprecation warning?
- As we can see from the previous slide, `kubectl run` can do many things
@@ -402,12 +257,12 @@ Let's leave `kubectl logs` running while we keep exploring.
## Various ways of creating resources
- `kubectl run`
- `kubectl run`
- easy way to get started
- versatile
- `kubectl create <resource>`
- `kubectl create <resource>`
- explicit, but lacks some features
- can't create a CronJob before Kubernetes 1.14
@@ -454,7 +309,7 @@ Let's leave `kubectl logs` running while we keep exploring.
<!--
```wait seq=```
```key ^C```
```keys ^C```
-->
]
@@ -483,8 +338,6 @@ class: extra-details
kubectl logs -l run=pingpong --tail 1 -f
```
<!-- ```wait error:``` -->
]
We see a message like the following one:
@@ -553,36 +406,15 @@ class: extra-details
---
class: extra-details
## Aren't we flooding 1.1.1.1?
## Party tricks involving IP addresses
- If you're wondering this, good question!
- It is possible to specify an IP address with less than 4 bytes
- Don't worry, though:
(example: `127.1`)
*APNIC's research group held the IP addresses 1.1.1.1 and 1.0.0.1. While the addresses were valid, so many people had entered them into various random systems that they were continuously overwhelmed by a flood of garbage traffic. APNIC wanted to study this garbage traffic but any time they'd tried to announce the IPs, the flood would overwhelm any conventional network.*
- Zeroes are then inserted in the middle
(Source: https://blog.cloudflare.com/announcing-1111/)
- As a result, `127.1` expands to `127.0.0.1`
- So we can `ping 127.1` to ping `localhost`!
(See [this blog post](https://ma.ttias.be/theres-more-than-one-way-to-write-an-ip-address/
) for more details.)
---
class: extra-details
## More party tricks with IP addresses
- We can also ping `1.1`
- `1.1` will expand to `1.0.0.1`
- This is one of the addresses of Cloudflare's
[public DNS resolver](https://blog.cloudflare.com/announcing-1111/)
- This is a quick way to check connectivity
(if we can reach 1.1, we probably have internet access)
- It's very unlikely that our concerted pings manage to produce
even a modest blip at Cloudflare's NOC!

View File

@@ -12,9 +12,9 @@
<!--
```wait RESTARTS```
```key ^C```
```keys ^C```
```wait AVAILABLE```
```key ^C```
```keys ^C```
-->
- Now, create more `worker` replicas:

View File

@@ -97,8 +97,6 @@
ship init https://github.com/jpetazzo/kubercoins
```
<!-- ```wait Open browser``` -->
]
---
@@ -191,11 +189,6 @@
kubectl logs deploy/worker --tail=10 --follow --namespace=kustomcoins
```
<!--
```wait units of work done```
```key ^C```
-->
]
Note: it might take a minute or two for the worker to start.

View File

@@ -56,6 +56,28 @@
---
## Work in a separate namespace
- To avoid conflicts with existing resources, let's create and use a new namespace
.exercise[
- Create a new namespace:
```bash
kubectl create namespace orange
```
- Switch to that namespace:
```bash
kns orange
```
]
.warning[Make sure to call that namespace `orange`: it is hardcoded in the YAML files.]
---
## Deploying Consul
- We will use a slightly different YAML file
@@ -66,9 +88,7 @@
- the corresponding `volumeMounts` in the Pod spec
- the label `consul` has been changed to `persistentconsul`
<br/>
(to avoid conflicts with the other Stateful Set)
- the namespace `orange` used for discovery of Pods
.exercise[
@@ -97,7 +117,7 @@
kubectl get pv
```
- The Pod `persistentconsul-0` is not scheduled yet:
- The Pod `consul-0` is not scheduled yet:
```bash
kubectl get pods -o wide
```
@@ -112,9 +132,9 @@
- In a Stateful Set, the Pods are started one by one
- `persistentconsul-1` won't be created until `persistentconsul-0` is running
- `consul-1` won't be created until `consul-0` is running
- `persistentconsul-0` has a dependency on an unbound Persistent Volume Claim
- `consul-0` has a dependency on an unbound Persistent Volume Claim
- The scheduler won't schedule the Pod until the PVC is bound
@@ -152,7 +172,7 @@
- Once a PVC is bound, its pod can start normally
- Once the pod `persistentconsul-0` has started, `persistentconsul-1` can be created, etc.
- Once the pod `consul-0` has started, `consul-1` can be created, etc.
- Eventually, our Consul cluster is up, and backend by "persistent" volumes
@@ -160,7 +180,7 @@
- Check that our Consul clusters has 3 members indeed:
```bash
kubectl exec persistentconsul-0 consul members
kubectl exec consul-0 consul members
```
]

View File

@@ -1,8 +1,8 @@
# Controlling a Kubernetes cluster remotely
# Controlling the cluster remotely
- `kubectl` can be used either on cluster instances or outside the cluster
- All the operations that we do with `kubectl` can be done remotely
- Here, we are going to use `kubectl` from our local machine
- In this section, we are going to use `kubectl` from our local machine
---
@@ -34,11 +34,11 @@
- Download the `kubectl` binary from one of these links:
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/linux/amd64/kubectl)
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/linux/amd64/kubectl)
|
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/darwin/amd64/kubectl)
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/darwin/amd64/kubectl)
|
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/windows/amd64/kubectl.exe)
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/windows/amd64/kubectl.exe)
- On Linux and macOS, make the binary executable with `chmod +x kubectl`
@@ -67,10 +67,10 @@ Note: if you are following along with a different platform (e.g. Linux on an arc
The output should look like this:
```
Client Version: version.Info{Major:"1", Minor:"15", GitVersion:"v1.15.0",
GitCommit:"e8462b5b5dc2584fdcd18e6bcfe9f1e4d970a529", GitTreeState:"clean",
BuildDate:"2019-06-19T16:40:16Z", GoVersion:"go1.12.5", Compiler:"gc",
Platform:"darwin/amd64"}
Client Version: version.Info{Major:"1", Minor:"14", GitVersion:"v1.14.0",
GitCommit:"641856db18352033a0d96dbc99153fa3b27298e5", GitTreeState:"clean",
BuildDate:"2019-03-25T15:53:57Z", GoVersion:"go1.12.1", Compiler:"gc",
Platform:"linux/amd64"}
```
---
@@ -192,4 +192,4 @@ class: extra-details
]
We can now utilize the cluster exactly as if we're logged into a node, except that it's remote.
We can now utilize the cluster exactly as we did before, except that it's remote.

View File

@@ -62,12 +62,10 @@ Exactly what we need!
- The following commands will install Stern on a Linux Intel 64 bit machine:
```bash
sudo curl -L -o /usr/local/bin/stern \
https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64
https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64
sudo chmod +x /usr/local/bin/stern
```
- On OS X, just `brew install stern`
<!-- ##VERSION## -->
---
@@ -84,14 +82,14 @@ Exactly what we need!
.exercise[
- View the logs for all the pingpong containers:
- View the logs for all the rng containers:
```bash
stern pingpong
stern rng
```
<!--
```wait seq=```
```key ^C```
```wait HTTP/1.1```
```keys ^C```
-->
]
@@ -117,7 +115,7 @@ Exactly what we need!
<!--
```wait weave-npc```
```key ^C```
```keys ^C```
-->
]
@@ -138,14 +136,14 @@ Exactly what we need!
.exercise[
- View the logs for all the things started with `kubectl run`:
- View the logs for all the things started with `kubectl create deployment`:
```bash
stern -l run
stern -l app
```
<!--
```wait seq=```
```key ^C```
```wait units of work```
```keys ^C```
-->
]

View File

@@ -1,8 +1,8 @@
# Checking pod and node resource usage
- Since Kubernetes 1.8, metrics are collected by the [resource metrics pipeline](https://kubernetes.io/docs/tasks/debug-application-cluster/resource-metrics-pipeline/)
- Since Kubernetes 1.8, metrics are collected by the [core metrics pipeline](https://v1-13.docs.kubernetes.io/docs/tasks/debug-application-cluster/core-metrics-pipeline/)
- The resource metrics pipeline is:
- The core metrics pipeline is:
- optional (Kubernetes can function without it)
@@ -37,7 +37,7 @@ If it shows our nodes and their CPU and memory load, we're good!
(it doesn't need persistence, as it doesn't *store* metrics)
- It has its own repository, [kubernetes-incubator/metrics-server](https://github.com/kubernetes-incubator/metrics-server)
- It has its own repository, [kubernetes-incubator/metrics-server](https://github.com/kubernetes-incubator/metrics-server])
- The repository comes with [YAML files for deployment](https://github.com/kubernetes-incubator/metrics-server/tree/master/deploy/1.8%2B)
@@ -59,7 +59,7 @@ If it shows our nodes and their CPU and memory load, we're good!
- Show resource usage across all containers:
```bash
kubectl top pods --containers --all-namespaces
kuebectl top pods --containers --all-namespaces
```
]

View File

@@ -195,7 +195,7 @@ class: extra-details
## Check our pods
- The pods will be scheduled on the nodes
- The pods will be scheduled to the nodes
- The nodes will pull the `nginx` image, and start the pods
@@ -218,18 +218,6 @@ class: extra-details
## What's going on?
- Without the `--network-plugin` flag, kubelet defaults to "no-op" networking
- It lets the container engine use a default network
(in that case, we end up with the default Docker bridge)
- Our pods are running on independent, disconnected, host-local networks
---
## What do we need to do?
- On a normal cluster, kubelet is configured to set up pod networking with CNI plugins
- This requires:
@@ -240,6 +228,14 @@ class: extra-details
- running kubelet with `--network-plugin=cni`
- Without the `--network-plugin` flag, kubelet defaults to "no-op" networking
- It lets the container engine use a default network
(in that case, we end up with the default Docker bridge)
- Our pods are running on independent, disconnected, host-local networks
---
## Using network plugins
@@ -329,7 +325,7 @@ class: extra-details
- We will add the `--network-plugin` and `--pod-cidr` flags
- We all have a "cluster number" (let's call that `C`) printed on your VM info card
- We all have a "cluster number" (let's call that `C`)
- We will use pod CIDR `10.C.N.0/24` (where `N` is the node number: 1, 2, 3)
@@ -398,7 +394,7 @@ class: extra-details
- Start kube-proxy:
```bash
sudo kube-proxy --kubeconfig ~/.kube/config
sudo kube-proxy --kubeconfig ~/kubeconfig
```
- Expose our Deployment:
@@ -484,23 +480,6 @@ Sometimes it works, sometimes it doesn't. Why?
```bash
kubectl get nodes -o wide
```
---
## Firewalling
- By default, Docker prevents containers from using arbitrary IP addresses
(by setting up iptables rules)
- We need to allow our containers to use our pod CIDR
- For simplicity, we will insert a blanket iptables rule allowing all traffic:
`iptables -I FORWARD -j ACCEPT`
- This has to be done on every node
---
## Setting up routing
@@ -509,8 +488,6 @@ Sometimes it works, sometimes it doesn't. Why?
- Create all the routes on all the nodes
- Insert the iptables rule allowing traffic
- Check that you can ping all the pods from one of the nodes
- Check that you can `curl` the ClusterIP of the Service successfully

View File

@@ -120,12 +120,6 @@ This is our game plan:
kubectl create deployment testweb --image=nginx
```
<!--
```bash
kubectl wait deployment testweb --for condition=available
```
-->
- Find out the IP address of the pod with one of these two commands:
```bash
kubectl get pods -o wide -l app=testweb
@@ -160,11 +154,6 @@ The `curl` command should show us the "Welcome to nginx!" page.
curl $IP
```
<!--
```wait curl```
```key ^C```
-->
]
The `curl` command should now time out.

View File

@@ -1,379 +0,0 @@
# OpenID Connect
- The Kubernetes API server can perform authentication with OpenID connect
- This requires an *OpenID provider*
(external authorization server using the OAuth 2.0 protocol)
- We can use a third-party provider (e.g. Google) or run our own (e.g. Dex)
- We are going to give an overview of the protocol
- We will show it in action (in a simplified scenario)
---
## Workflow overview
- We want to access our resources (a Kubernetes cluster)
- We authenticate with the OpenID provider
- we can do this directly (e.g. by going to https://accounts.google.com)
- or maybe a kubectl plugin can open a browser page on our behalf
- After authenticating us, the OpenID provider gives us:
- an *id token* (a short-lived signed JSON Web Token, see next slide)
- a *refresh token* (to renew the *id token* when needed)
- We can now issue requests to the Kubernetes API with the *id token*
- The API server will verify that token's content to authenticate us
---
## JSON Web Tokens
- A JSON Web Token (JWT) has three parts:
- a header specifying algorithms and token type
- a payload (indicating who issued the token, for whom, which purposes...)
- a signature generated by the issuer (the issuer = the OpenID provider)
- Anyone can verify a JWT without contacting the issuer
(except to obtain the issuer's public key)
- Pro tip: we can inspect a JWT with https://jwt.io/
---
## How the Kubernetes API uses JWT
- Server side
- enable OIDC authentication
- indicate which issuer (provider) should be allowed
- indicate which audience (or "client id") should be allowed
- optionally, map or prefix user and group names
- Client side
- obtain JWT as described earlier
- pass JWT as authentication token
- renew JWT when needed (using the refresh token)
---
## Demo time!
- We will use [Google Accounts](https://accounts.google.com) as our OpenID provider
- We will use the [Google OAuth Playground](https://developers.google.com/oauthplayground) as the "audience" or "client id"
- We will obtain a JWT through Google Accounts and the OAuth Playground
- We will enable OIDC in the Kubernetes API server
- We will use the JWT to authenticate
.footnote[If you can't or won't use a Google account, you can try to adapt this to another provider.]
---
## Checking the API server logs
- The API server logs will be particularly useful in this section
(they will indicate e.g. why a specific token is rejected)
- Let's keep an eye on the API server output!
.exercise[
- Tail the logs of the API server:
```bash
kubectl logs kube-apiserver-node1 --follow --namespace=kube-system
```
]
---
## Authenticate with the OpenID provider
- We will use the Google OAuth Playground for convenience
- In a real scenario, we would need our own OAuth client instead of the playground
(even if we were still using Google as the OpenID provider)
.exercise[
- Open the Google OAuth Playground:
```
https://developers.google.com/oauthplayground/
```
- Enter our own custom scope in the text field:
```
https://www.googleapis.com/auth/userinfo.email
```
- Click on "Authorize APIs" and allow the playground to access our email address
]
---
## Obtain our JSON Web Token
- The previous step gave us an "authorization code"
- We will use it to obtain tokens
.exercise[
- Click on "Exchange authorization code for tokens"
]
- The JWT is the very long `id_token` that shows up on the right hand side
(it is a base64-encoded JSON object, and should therefore start with `eyJ`)
---
## Using our JSON Web Token
- We need to create a context (in kubeconfig) for our token
(if we just add the token or use `kubectl --token`, our certificate will still be used)
.exercise[
- Create a new authentication section in kubeconfig:
```bash
kubectl config set-credentials myjwt --token=eyJ...
```
- Try to use it:
```bash
kubectl --user=myjwt get nodes
```
]
We should get an `Unauthorized` response, since we haven't enabled OpenID Connect in the API server yet. We should also see `invalid bearer token` in the API server log output.
---
## Enabling OpenID Connect
- We need to add a few flags to the API server configuration
- These two are mandatory:
`--oidc-issuer-url` → URL of the OpenID provider
`--oidc-client-id` → app requesting the authentication
<br/>(in our case, that's the ID for the Google OAuth Playground)
- This one is optional:
`--oidc-username-claim` → which field should be used as user name
<br/>(we will use the user's email address instead of an opaque ID)
- See the [API server documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server
) for more details about all available flags
---
## Updating the API server configuration
- The instructions below will work for clusters deployed with kubeadm
(or where the control plane is deployed in static pods)
- If your cluster is deployed differently, you will need to adapt them
.exercise[
- Edit `/etc/kubernetes/manifests/kube-apiserver.yaml`
- Add the following lines to the list of command-line flags:
```yaml
- --oidc-issuer-url=https://accounts.google.com
- --oidc-client-id=407408718192.apps.googleusercontent.com
- --oidc-username-claim=email
```
]
---
## Restarting the API server
- The kubelet monitors the files in `/etc/kubernetes/manifests`
- When we save the pod manifest, kubelet will restart the corresponding pod
(using the updated command line flags)
.exercise[
- After making the changes described on the previous slide, save the file
- Issue a simple command (like `kubectl version`) until the API server is back up
(it might take between a few seconds and one minute for the API server to restart)
- Restart the `kubectl logs` command to view the logs of the API server
]
---
## Using our JSON Web Token
- Now that the API server is set up to recognize our token, try again!
.exercise[
- Try an API command with our token:
```bash
kubectl --user=myjwt get nodes
kubectl --user=myjwt get pods
```
]
We should see a message like:
```
Error from server (Forbidden): nodes is forbidden: User "jean.doe@gmail.com"
cannot list resource "nodes" in API group "" at the cluster scope
```
→ We were successfully *authenticated*, but not *authorized*.
---
## Authorizing our user
- As an extra step, let's grant read access to our user
- We will use the pre-defined ClusterRole `view`
.exercise[
- Create a ClusterRoleBinding allowing us to view resources:
```bash
kubectl create clusterrolebinding i-can-view \
--user=`jean.doe@gmail.com` --clusterrole=view
```
(make sure to put *your* Google email address there)
- Confirm that we can now list pods with our token:
```bash
kubectl --user=myjwt get pods
```
]
---
## From demo to production
.warning[This was a very simplified demo! In a real deployment...]
- We wouldn't use the Google OAuth Playground
- We *probably* wouldn't even use Google at all
(it doesn't seem to provide a way to include groups!)
- Some popular alternatives:
- [Dex](https://github.com/dexidp/dex),
[Keycloak](https://www.keycloak.org/)
(self-hosted)
- [Okta](https://developer.okta.com/docs/how-to/creating-token-with-groups-claim/#step-five-decode-the-jwt-to-verify)
(SaaS)
- We would use a helper (like the [kubelogin](https://github.com/int128/kubelogin) plugin) to automatically obtain tokens
---
class: extra-details
## Service Account tokens
- The tokens used by Service Accounts are JWT tokens as well
- They are signed and verified using a special service account key pair
.exercise[
- Extract the token of a service account in the current namespace:
```bash
kubectl get secrets -o jsonpath={..token} | base64 -d
```
- Copy-paste the token to a verification service like https://jwt.io
- Notice that it says "Invalid Signature"
]
---
class: extra-details
## Verifying Service Account tokens
- JSON Web Tokens embed the URL of the "issuer" (=OpenID provider)
- The issuer provides its public key through a well-known discovery endpoint
(similar to https://accounts.google.com/.well-known/openid-configuration)
- There is no such endpoint for the Service Account key pair
- But we can provide the public key ourselves for verification
---
class: extra-details
## Verifying a Service Account token
- On clusters provisioned with kubeadm, the Service Account key pair is:
`/etc/kubernetes/pki/sa.key` (used by the controller manager to generate tokens)
`/etc/kubernetes/pki/sa.pub` (used by the API server to validate the same tokens)
.exercise[
- Display the public key used to sign Service Account tokens:
```bash
sudo cat /etc/kubernetes/pki/sa.pub
```
- Copy-paste the key in the "verify signature" area on https://jwt.io
- It should now say "Signature Verified"
]

View File

@@ -32,7 +32,7 @@
- must be able to anticipate all the events that might happen
- design will be better only to the extent of what we anticipated
- design will be better only to the extend of what we anticipated
- hard to anticipate if we don't have production experience
@@ -86,7 +86,7 @@ class: extra-details
## What can we store via the Kubernetes API?
- The API server stores most Kubernetes resources in etcd
- The API server stores most Kubernetes resources into etcd
- Etcd is designed for reliability, not for performance
@@ -187,8 +187,6 @@ class: extra-details
[Intro talk](https://www.youtube.com/watch?v=8k_ayO1VRXE)
|
[Deep dive talk](https://www.youtube.com/watch?v=fu7ecA2rXmc)
|
[Simple example](https://medium.com/faun/writing-your-first-kubernetes-operator-8f3df4453234)
- Zalando Kubernetes Operator Pythonic Framework (KOPF)

View File

@@ -121,7 +121,7 @@ Examples:
## One operator in action
- We will install [Elastic Cloud on Kubernetes](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-quickstart.html), an ElasticSearch operator
- We will install the UPMC Enterprises ElasticSearch operator
- This operator requires PersistentVolumes
@@ -206,92 +206,51 @@ Now, the StorageClass should have `(default)` next to its name.
## Install the ElasticSearch operator
- The operator provides:
- The operator needs:
- a few CustomResourceDefinitions
- a Namespace for its other resources
- a ValidatingWebhookConfiguration for type checking
- a StatefulSet for its controller and webhook code
- a Deployment for its controller
- a ServiceAccount, ClusterRole, ClusterRoleBinding for permissions
- a Namespace
- All these resources are grouped in a convenient YAML file
- We have grouped all the definitions for these resources in a YAML file
.exercise[
- Install the operator:
```bash
kubectl apply -f ~/container.training/k8s/eck-operator.yaml
kubectl apply -f ~/container.training/k8s/elasticsearch-operator.yaml
```
]
---
## Check our new custom resources
## Wait for the operator to be ready
- Let's see which CRDs were created
- Some operators require to create their CRDs separately
- This operator will create its CRD itself
(i.e. the CRD is not listed in the YAML that we applied earlier)
.exercise[
- List all CRDs:
- Wait until the `elasticsearchclusters` CRD shows up:
```bash
kubectl get crds
```
]
This operator supports ElasticSearch, but also Kibana and APM. Cool!
---
## Create the `eck-demo` namespace
- For clarity, we will create everything in a new namespace, `eck-demo`
- This namespace is hard-coded in the YAML files that we are going to use
- We need to create that namespace
.exercise[
- Create the `eck-demo` namespace:
```bash
kubectl create namespace eck-demo
```
- Switch to that namespace:
```bash
kns eck-demo
```
]
---
class: extra-details
## Can we use a different namespace?
Yes, but then we need to update all the YAML manifests that we
are going to apply in the next slides.
The `eck-demo` namespace is hard-coded in these YAML manifests.
Why?
Because when defining a ClusterRoleBinding that references a
ServiceAccount, we have to indicate in which namespace the
ServiceAccount is located.
---
## Create an ElasticSearch resource
- We can now create a resource with `kind: ElasticSearch`
- We can now create a resource with `kind: ElasticsearchCluster`
- The YAML for that resource will specify all the desired parameters:
- how many nodes we want
- how many nodes do we want of each type (client, master, data)
- image to use
- add-ons (kibana, cerebro, ...)
- whether to use TLS or not
@@ -301,7 +260,7 @@ ServiceAccount is located.
- Create our ElasticSearch cluster:
```bash
kubectl apply -f ~/container.training/k8s/eck-elasticsearch.yaml
kubectl apply -f ~/container.training/k8s/elasticsearch-cluster.yaml
```
]
@@ -310,88 +269,49 @@ ServiceAccount is located.
## Operator in action
- Over the next minutes, the operator will create our ES cluster
- Over the next minutes, the operator will create:
- It will report our cluster status through the CRD
- StatefulSets (one for master nodes, one for data nodes)
- Deployments (for client nodes; and for add-ons like cerebro and kibana)
- Services (for all these pods)
.exercise[
- Check the logs of the operator:
- Wait for all the StatefulSets to be fully up and running:
```bash
stern --namespace=elastic-system operator
kubectl get statefulsets -w
```
<!--
```wait elastic-operator-0```
```tmux split-pane -v```
--->
- Watch the status of the cluster through the CRD:
```bash
kubectl get es -w
```
<!--
```longwait green```
```key ^C```
```key ^D```
```key ^C```
-->
]
---
## Connecting to our cluster
- It's not easy to use the ElasticSearch API from the shell
- But let's check at least if ElasticSearch is up!
- Since connecting directly to the ElasticSearch API is a bit raw,
<br/>we'll connect to the cerebro frontend instead
.exercise[
- Get the ClusterIP of our ES instance:
- Edit the cerebro service to change its type from ClusterIP to NodePort:
```bash
kubectl get services
kubectl patch svc cerebro-es -p "spec: { type: NodePort }"
```
- Issue a request with `curl`:
- Retrieve the NodePort that was allocated:
```bash
curl http://`CLUSTERIP`:9200
kubectl get svc cerebreo-es
```
- Connect to that port with a browser
]
We get an authentication error. Our cluster is protected!
---
## Obtaining the credentials
- The operator creates a user named `elastic`
- It generates a random password and stores it in a Secret
.exercise[
- Extract the password:
```bash
kubectl get secret demo-es-elastic-user \
-o go-template="{{ .data.elastic | base64decode }} "
```
- Use it to connect to the API:
```bash
curl -u elastic:`PASSWORD` http://`CLUSTERIP`:9200
```
]
We should see a JSON payload with the `"You Know, for Search"` tagline.
---
## Sending data to the cluster
## (Bonus) Setup filebeat
- Let's send some data to our brand new ElasticSearch cluster!
@@ -401,170 +321,22 @@ We should see a JSON payload with the `"You Know, for Search"` tagline.
- Deploy filebeat:
```bash
kubectl apply -f ~/container.training/k8s/eck-filebeat.yaml
```
- Wait until some pods are up:
```bash
watch kubectl get pods -l k8s-app=filebeat
```
<!--
```wait Running```
```key ^C```
-->
- Check that a filebeat index was created:
```bash
curl -u elastic:`PASSWORD` http://`CLUSTERIP`:9200/_cat/indices
kubectl apply -f ~/container.training/k8s/filebeat.yaml
```
]
---
## Deploying an instance of Kibana
- Kibana can visualize the logs injected by filebeat
- The ECK operator can also manage Kibana
- Let's give it a try!
.exercise[
- Deploy a Kibana instance:
```bash
kubectl apply -f ~/container.training/k8s/eck-kibana.yaml
```
- Wait for it to be ready:
```bash
kubectl get kibana -w
```
<!--
```longwait green```
```key ^C```
-->
]
We should see at least one index being created in cerebro.
---
## Connecting to Kibana
## (Bonus) Access log data with kibana
- Kibana is automatically set up to conect to ElasticSearch
- Let's expose kibana (by making kibana-es a NodePort too)
(this is arranged by the YAML that we're using)
- Then access kibana
- However, it will ask for authentication
- It's using the same user/password as ElasticSearch
.exercise[
- Get the NodePort allocated to Kibana:
```bash
kubectl get services
```
- Connect to it with a web browser
- Use the same user/password as before
]
---
## Setting up Kibana
After the Kibana UI loads, we need to click around a bit
.exercise[
- Pick "explore on my own"
- Click on Use Elasticsearch data / Connect to your Elasticsearch index"
- Enter `filebeat-*` for the index pattern and click "Next step"
- Select `@timestamp` as time filter field name
- Click on "discover" (the small icon looking like a compass on the left bar)
- Play around!
]
---
## Scaling up the cluster
- At this point, we have only one node
- We are going to scale up
- But first, we'll deploy Cerebro, an UI for ElasticSearch
- This will let us see the state of the cluster, how indexes are sharded, etc.
---
## Deploying Cerebro
- Cerebro is stateless, so it's fairly easy to deploy
(one Deployment + one Service)
- However, it needs the address and credentials for ElasticSearch
- We prepared yet another manifest for that!
.exercise[
- Deploy Cerebro:
```bash
kubectl apply -f ~/container.training/k8s/eck-cerebro.yaml
```
- Lookup the NodePort number and connect to it:
```bash
kuebctl get services
```
]
---
## Scaling up the cluster
- We can see on Cerebro that the cluster is "yellow"
(because our index is not replicated)
- Let's change that!
.exercise[
- Edit the ElasticSearch cluster manifest:
```bash
kubectl edit es demo
```
- Find the field `count: 1` and change it to 3
- Save and quit
<!--
```wait Please edit```
```keys /count:```
```key ^J```
```keys $r3:x```
```key ^J```
-->
]
- We'll need to configure kibana indexes
---
@@ -604,14 +376,14 @@ After the Kibana UI loads, we need to click around a bit
- Look at the ElasticSearch resource definition
(`~/container.training/k8s/eck-elasticsearch.yaml`)
(`~/container.training/k8s/elasticsearch-cluster.yaml`)
- What should happen if we flip the TLS flag? Twice?
- What should happen if we flip the `use-tls` flag? Twice?
- What should happen if we add another group of nodes?
- What should happen if we remove / re-add the kibana or cerebro sections?
- What should happen if we change the number of nodes?
- What if we want different images or parameters for the different nodes?
*Operators can be very powerful.
<br/>
But we need to know exactly the scenarios that they can handle.*
*Operators can be very powerful, iff we know exactly the scenarios that they can handle.*

View File

@@ -11,36 +11,16 @@
- Deploy everything else:
```bash
kubectl create deployment hasher --image=dockercoins/hasher:v0.1
kubectl create deployment rng --image=dockercoins/rng:v0.1
kubectl create deployment webui --image=dockercoins/webui:v0.1
kubectl create deployment worker --image=dockercoins/worker:v0.1
set -u
for SERVICE in hasher rng webui worker; do
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
done
```
]
---
class: extra-details
## Deploying other images
- If we wanted to deploy images from another registry ...
- ... Or with a different tag ...
- ... We could use the following snippet:
```bash
REGISTRY=dockercoins
TAG=v0.1
for SERVICE in hasher rng webui worker; do
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
done
```
---
## Is this working?
- After waiting for the deployment to complete, let's look at the logs!
@@ -108,7 +88,7 @@ kubectl wait deploy/worker --for condition=available
<!--
```wait units of work done, updating hash counter```
```key ^C```
```keys ^C```
-->
]

View File

@@ -220,8 +220,6 @@
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
```
<!-- ```wait apiVersion``` -->
]
---
@@ -242,16 +240,6 @@
- Save, quit
<!--
```keys /--enable-admission-plugins=```
```key ^J```
```key $```
```keys a,PodSecurityPolicy```
```key Escape```
```keys :wq```
```key ^J```
-->
]
---
@@ -283,8 +271,6 @@
kubectl run testpsp1 --image=nginx --restart=Never
```
<!-- ```wait forbidden: no providers available``` -->
- Try to create a Deployment:
```bash
kubectl run testpsp2 --image=nginx
@@ -512,22 +498,3 @@ class: extra-details
- bind `psp:restricted` to the group `system:authenticated`
- bind `psp:privileged` to the ServiceAccount `kube-system:default`
---
## Fixing the cluster
- Let's disable the PSP admission plugin
.exercise[
- Edit the Kubernetes API server static pod manifest
- Remove the PSP admission plugin
- This can be done with this one-liner:
```bash
sudo sed -i s/,PodSecurityPolicy// /etc/kubernetes/manifests/kube-apiserver.yaml
```
]

Some files were not shown because too many files have changed in this diff Show More