mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-17 11:09:55 +00:00
Compare commits
1 Commits
2020-02-vm
...
exercises
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6df7529885 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,12 +3,10 @@
|
||||
*~
|
||||
prepare-vms/tags
|
||||
prepare-vms/infra
|
||||
prepare-vms/www
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
slides/past.html
|
||||
slides/slides.zip
|
||||
node_modules
|
||||
|
||||
### macOS ###
|
||||
|
||||
@@ -9,21 +9,21 @@ services:
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
image: k8s.gcr.io/etcd:3.3.10
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
|
||||
"Edit the CLUSTER placeholder first. Then, remove this line.":
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
|
||||
@@ -12,6 +12,7 @@ metadata:
|
||||
name: kube-router-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: kube-router
|
||||
data:
|
||||
cni-conf.json: |
|
||||
@@ -31,21 +32,20 @@ data:
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-router
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
|
||||
@@ -9,20 +9,20 @@ services:
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
image: k8s.gcr.io/etcd:3.3.10
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-controller-manager --master http://localhost:8080
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/service-weights: |
|
||||
whatever: 90%
|
||||
whatever-new: 10%
|
||||
spec:
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever-new
|
||||
servicePort: 80
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- taste
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: arabica
|
||||
spec:
|
||||
taste: strong
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: robusta
|
||||
spec:
|
||||
taste: stronger
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: liberica
|
||||
spec:
|
||||
taste: smoky
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: excelsa
|
||||
spec:
|
||||
taste: fruity
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: consul
|
||||
labels:
|
||||
app: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
@@ -27,6 +29,8 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
labels:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
@@ -68,7 +72,7 @@ spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.6"
|
||||
image: "consul:1.4.4"
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
|
||||
@@ -1,160 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hasher
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/hasher:v0.1
|
||||
name: hasher
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: hasher
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- image: redis
|
||||
name: redis
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
protocol: TCP
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rng
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/rng:v0.1
|
||||
name: rng
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rng
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/webui:v0.1
|
||||
name: webui
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: webui
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
name: worker
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/worker:v0.1
|
||||
name: worker
|
||||
@@ -1,69 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
name: cerebro
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cerebro
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
spec:
|
||||
volumes:
|
||||
- name: conf
|
||||
configMap:
|
||||
name: cerebro
|
||||
containers:
|
||||
- image: lmenezes/cerebro
|
||||
name: cerebro
|
||||
volumeMounts:
|
||||
- name: conf
|
||||
mountPath: /conf
|
||||
args:
|
||||
- -Dconfig.file=/conf/application.conf
|
||||
env:
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: demo-es-elastic-user
|
||||
key: elastic
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
name: cerebro
|
||||
spec:
|
||||
ports:
|
||||
- port: 9000
|
||||
protocol: TCP
|
||||
targetPort: 9000
|
||||
selector:
|
||||
app: cerebro
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cerebro
|
||||
data:
|
||||
application.conf: |
|
||||
secret = "ki:s:[[@=Ag?QI`W2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
|
||||
|
||||
hosts = [
|
||||
{
|
||||
host = "http://demo-es-http.eck-demo.svc.cluster.local:9200"
|
||||
name = "demo"
|
||||
auth = {
|
||||
username = "elastic"
|
||||
password = ${?ELASTICSEARCH_PASSWORD}
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: elasticsearch.k8s.elastic.co/v1
|
||||
kind: Elasticsearch
|
||||
metadata:
|
||||
name: demo
|
||||
namespace: eck-demo
|
||||
spec:
|
||||
http:
|
||||
tls:
|
||||
selfSignedCertificate:
|
||||
disabled: true
|
||||
nodeSets:
|
||||
- name: default
|
||||
count: 1
|
||||
config:
|
||||
node.data: true
|
||||
node.ingest: true
|
||||
node.master: true
|
||||
node.store.allow_mmap: false
|
||||
version: 7.5.1
|
||||
@@ -1,168 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-config
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
filebeat.yml: |-
|
||||
filebeat.inputs:
|
||||
- type: container
|
||||
paths:
|
||||
- /var/log/containers/*.log
|
||||
processors:
|
||||
- add_kubernetes_metadata:
|
||||
host: ${NODE_NAME}
|
||||
matchers:
|
||||
- logs_path:
|
||||
logs_path: "/var/log/containers/"
|
||||
|
||||
# To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
|
||||
#filebeat.autodiscover:
|
||||
# providers:
|
||||
# - type: kubernetes
|
||||
# node: ${NODE_NAME}
|
||||
# hints.enabled: true
|
||||
# hints.default_config:
|
||||
# type: container
|
||||
# paths:
|
||||
# - /var/log/containers/*${data.kubernetes.container.id}.log
|
||||
|
||||
processors:
|
||||
- add_cloud_metadata:
|
||||
- add_host_metadata:
|
||||
|
||||
cloud.id: ${ELASTIC_CLOUD_ID}
|
||||
cloud.auth: ${ELASTIC_CLOUD_AUTH}
|
||||
|
||||
output.elasticsearch:
|
||||
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
|
||||
username: ${ELASTICSEARCH_USERNAME}
|
||||
password: ${ELASTICSEARCH_PASSWORD}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: filebeat
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
serviceAccountName: filebeat
|
||||
terminationGracePeriodSeconds: 30
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: filebeat
|
||||
image: docker.elastic.co/beats/filebeat:7.5.1
|
||||
args: [
|
||||
"-c", "/etc/filebeat.yml",
|
||||
"-e",
|
||||
]
|
||||
env:
|
||||
- name: ELASTICSEARCH_HOST
|
||||
value: demo-es-http
|
||||
- name: ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: ELASTICSEARCH_USERNAME
|
||||
value: elastic
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: demo-es-elastic-user
|
||||
key: elastic
|
||||
- name: ELASTIC_CLOUD_ID
|
||||
value:
|
||||
- name: ELASTIC_CLOUD_AUTH
|
||||
value:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
# If using Red Hat OpenShift uncomment this:
|
||||
#privileged: true
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/filebeat.yml
|
||||
readOnly: true
|
||||
subPath: filebeat.yml
|
||||
- name: data
|
||||
mountPath: /usr/share/filebeat/data
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-config
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
rules:
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
---
|
||||
@@ -1,17 +0,0 @@
|
||||
apiVersion: kibana.k8s.elastic.co/v1
|
||||
kind: Kibana
|
||||
metadata:
|
||||
name: demo
|
||||
spec:
|
||||
version: 7.5.1
|
||||
count: 1
|
||||
elasticsearchRef:
|
||||
name: demo
|
||||
namespace: eck-demo
|
||||
http:
|
||||
service:
|
||||
spec:
|
||||
type: NodePort
|
||||
tls:
|
||||
selfSignedCertificate:
|
||||
disabled: true
|
||||
File diff suppressed because it is too large
Load Diff
17
k8s/efk.yaml
17
k8s/efk.yaml
@@ -3,7 +3,6 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
@@ -33,17 +32,13 @@ subjects:
|
||||
name: fluentd
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd
|
||||
namespace: default
|
||||
labels:
|
||||
app: fluentd
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: fluentd
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -56,7 +51,7 @@ spec:
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd
|
||||
image: fluent/fluentd-kubernetes-daemonset:v1.4-debian-elasticsearch-1
|
||||
image: fluent/fluentd-kubernetes-daemonset:v1.3-debian-elasticsearch-1
|
||||
env:
|
||||
- name: FLUENT_ELASTICSEARCH_HOST
|
||||
value: "elasticsearch"
|
||||
@@ -91,13 +86,12 @@ spec:
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: elasticsearch
|
||||
name: elasticsearch
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -125,7 +119,6 @@ metadata:
|
||||
labels:
|
||||
app: elasticsearch
|
||||
name: elasticsearch
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
@@ -135,13 +128,12 @@ spec:
|
||||
app: elasticsearch
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: kibana
|
||||
name: kibana
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -165,7 +157,6 @@ metadata:
|
||||
labels:
|
||||
app: kibana
|
||||
name: kibana
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- port: 5601
|
||||
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy:1
|
||||
image: haproxy
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
name: cheddar
|
||||
spec:
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
- host: cheddar.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
serviceName: cheddar
|
||||
servicePort: 80
|
||||
|
||||
|
||||
@@ -12,38 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
@@ -51,129 +25,82 @@ metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
@@ -186,125 +113,60 @@ spec:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0-rc2
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
- --enable-skip-login
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
k8s-app: kubernetes-dashboard
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.2
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
@@ -323,12 +185,10 @@ spec:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kubernetes-dashboard:443,verify=0
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kube-system:443,verify=0
|
||||
image: alpine
|
||||
name: dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -343,13 +203,13 @@ spec:
|
||||
selector:
|
||||
app: dashboard
|
||||
type: NodePort
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: insecure-dashboard
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
@@ -357,4 +217,4 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
Kind: Pod
|
||||
metadata:
|
||||
name: hello
|
||||
namespace: default
|
||||
|
||||
@@ -12,6 +12,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
@@ -90,7 +95,7 @@ subjects:
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
@@ -109,7 +114,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
|
||||
@@ -45,7 +45,7 @@ subjects:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: local-path-provisioner
|
||||
|
||||
@@ -58,7 +58,7 @@ metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server
|
||||
@@ -82,7 +82,7 @@ spec:
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: tmp-dir
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-without-volume
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-init
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
@@ -11,10 +11,11 @@ spec:
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
@@ -1,54 +1,51 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
kind: Role
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ get, list ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: persistentconsul
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: persistentconsul
|
||||
namespace: default
|
||||
name: consul
|
||||
namespace: orange
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: persistentconsul
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: persistentconsul
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: persistentconsul
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
@@ -61,9 +58,9 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: persistentconsul
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: persistentconsul
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
@@ -72,19 +69,19 @@ spec:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- persistentconsul
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.6"
|
||||
image: "consul:1.4.4"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=persistentconsul\""
|
||||
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
|
||||
1160
k8s/portworx.yaml
1160
k8s/portworx.yaml
File diff suppressed because it is too large
Load Diff
@@ -12,17 +12,10 @@ spec:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
#schedulerName: stork
|
||||
initContainers:
|
||||
- name: rmdir
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- mountPath: /vol
|
||||
name: postgres
|
||||
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
|
||||
schedulerName: stork
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:11
|
||||
image: postgres:10.5
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
|
||||
@@ -6,16 +6,13 @@ metadata:
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -29,7 +26,7 @@ spec:
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:1.7
|
||||
- image: traefik
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -7,8 +7,8 @@ workshop.
|
||||
|
||||
|
||||
## 1. Prerequisites
|
||||
Virtualbox, Vagrant and Ansible
|
||||
|
||||
Virtualbox, Vagrant and Ansible
|
||||
|
||||
- Virtualbox: https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
@@ -25,7 +25,7 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
$ git clone --recursive https://github.com/ansible/ansible.git
|
||||
$ cd ansible
|
||||
$ git checkout stable-{{ getStableVersionFromAnsibleProject }}
|
||||
$ git checkout stable-2.0.0.1
|
||||
$ git submodule update
|
||||
|
||||
- source the setup script to make Ansible available on this terminal session:
|
||||
@@ -38,7 +38,6 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
|
||||
## 2. Preparing the environment
|
||||
Change into directory that has your Vagrantfile
|
||||
|
||||
Run the following commands:
|
||||
|
||||
@@ -67,14 +66,6 @@ will reflect inside the instance.
|
||||
|
||||
- Depending on the Vagrant version, `sudo apt-get install bsdtar` may be needed
|
||||
|
||||
- If you get an error like "no Vagrant file found" or you have a file but "cannot open base box" when running `vagrant up`,
|
||||
chances are good you not in the correct directory.
|
||||
Make sure you are in sub directory named "prepare-local". It has all the config files required by ansible, vagrant and virtualbox
|
||||
|
||||
- If you are using Python 3.7, running the ansible-playbook provisioning, see an error like "SyntaxError: invalid syntax" and it mentions
|
||||
the word "async", you need to upgrade your Ansible version to 2.6 or higher to resolve the keyword conflict.
|
||||
https://github.com/ansible/ansible/issues/42105
|
||||
|
||||
- If you get strange Ansible errors about dependencies, try to check your pip
|
||||
version with `pip --version`. The current version is 8.1.1. If your pip is
|
||||
older than this, upgrade it with `sudo pip install --upgrade pip`, restart
|
||||
|
||||
@@ -10,21 +10,15 @@ These tools can help you to create VMs on:
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
|
||||
|
||||
Depending on the infrastructure that you want to use, you also need to install
|
||||
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
|
||||
|
||||
And if you want to generate printable cards:
|
||||
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2)
|
||||
|
||||
You can install them with pip (perhaps with `pip install --user`, or even use `virtualenv` if that's your thing).
|
||||
|
||||
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
|
||||
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
|
||||
may have changed your default Python to Python 2.
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2) (on a Mac: `brew install jinja2`)
|
||||
|
||||
## General Workflow
|
||||
|
||||
@@ -93,37 +87,26 @@ You're all set!
|
||||
```
|
||||
workshopctl - the orchestration workshop swiss army knife
|
||||
Commands:
|
||||
build Build the Docker image to run this program in a container
|
||||
cards Generate ready-to-print cards for a group of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
disableaddrchecks Disable source/destination IP address checks
|
||||
disabledocker Stop Docker Engine and don't restart it automatically
|
||||
helmprom Install Helm and Prometheus
|
||||
help Show available commands
|
||||
ids (FIXME) List the instance IDs belonging to a given tag or token
|
||||
kubebins Install Kubernetes and CNI binaries but don't start anything
|
||||
kubereset Wipe out Kubernetes configuration on all nodes
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
kubetest Check that all nodes are reporting as Ready
|
||||
listall List VMs running on all configured infrastructures
|
||||
list List available groups for a given infrastructure
|
||||
netfix Disable GRO and run a pinger job on the VMs
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
ping Ping VMs in a given tag, to check that they have network access
|
||||
pssh Run an arbitrary command on all nodes
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
quotas Check our infrastructure quotas (max instances)
|
||||
remap_nodeports Remap NodePort range to 10000-10999
|
||||
retag (FIXME) Apply a new tag to a group of VMs
|
||||
ssh Open an SSH session to the first node of a tag
|
||||
start Start a group of VMs
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
tags List groups of VMs known locally
|
||||
test Run tests (pre-flight checks) on a group of VMs
|
||||
weavetest Check that weave seems properly setup
|
||||
webssh Install a WEB SSH server on the machines (port 1080)
|
||||
wrap Run this program in a container
|
||||
www Run a web server to access card HTML and PDF
|
||||
ami Show the AMI that will be used for deployment
|
||||
amis List Ubuntu AMIs in the current region
|
||||
build Build the Docker image to run this program in a container
|
||||
cards Generate ready-to-print cards for a group of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
ec2quotas Check our EC2 quotas (max instances)
|
||||
help Show available commands
|
||||
ids List the instance IDs belonging to a given tag or token
|
||||
ips List the IP addresses of the VMs for a given tag or token
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
kubetest Check that all notes are reporting as Ready
|
||||
list List available groups in the current region
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
retag Apply a new tag to a group of VMs
|
||||
start Start a group of VMs
|
||||
status List instance status for a given group
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
test Run tests (pre-flight checks) on a group of VMs
|
||||
wrap Run this program in a container
|
||||
```
|
||||
|
||||
### Summary of What `./workshopctl` Does For You
|
||||
@@ -262,32 +245,3 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
|
||||
|
||||
- Don't write to bash history in system() in postprep
|
||||
- compose, etc version inconsistent (int vs str)
|
||||
|
||||
## Making sure Python3 is the default (Mac only)
|
||||
|
||||
Check the `/usr/local/bin/python` symlink. It should be pointing to
|
||||
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
|
||||
instructions.
|
||||
|
||||
1) Verify that Python 3 is installed.
|
||||
|
||||
```
|
||||
ls -la /usr/local/Cellar/Python
|
||||
```
|
||||
|
||||
You should see one or more versions of Python 3. If you don't,
|
||||
install it with `brew install python`.
|
||||
|
||||
2) Verify that `python` points to Python3.
|
||||
|
||||
```
|
||||
ls -la /usr/local/bin/python
|
||||
```
|
||||
|
||||
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
|
||||
|
||||
```
|
||||
rm /usr/local/bin/python
|
||||
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
|
||||
# where xxxx is the most recent Python 3 version you saw above
|
||||
```
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
TAG=$(./workshopctl maketag)
|
||||
./workshopctl start --settings settings/jerome.yaml --infra infra/aws-eu-central-1 --tag $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kube $TAG
|
||||
./workshopctl helmprom $TAG
|
||||
while ! ./workshopctl kubetest $TAG; do sleep 1; done
|
||||
./workshopctl tmux $TAG
|
||||
echo ./workshopctl stop $TAG
|
||||
@@ -33,14 +33,9 @@ _cmd_cards() {
|
||||
../../lib/ips-txt-to-html.py settings.yaml
|
||||
)
|
||||
|
||||
ln -sf ../tags/$TAG/ips.html www/$TAG.html
|
||||
ln -sf ../tags/$TAG/ips.pdf www/$TAG.pdf
|
||||
|
||||
info "Cards created. You can view them with:"
|
||||
info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)"
|
||||
info "open tags/$TAG/ips.html (on macOS)"
|
||||
info "Or you can start a web server with:"
|
||||
info "$0 www"
|
||||
}
|
||||
|
||||
_cmd deploy "Install Docker on a bunch of running VMs"
|
||||
@@ -113,12 +108,9 @@ _cmd_disabledocker() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo systemctl disable docker.service
|
||||
sudo systemctl disable docker.socket
|
||||
sudo systemctl stop docker
|
||||
sudo killall containerd
|
||||
"
|
||||
pssh "sudo systemctl disable docker.service"
|
||||
pssh "sudo systemctl disable docker.socket"
|
||||
pssh "sudo systemctl stop docker"
|
||||
}
|
||||
|
||||
_cmd kubebins "Install Kubernetes and CNI binaries but don't start anything"
|
||||
@@ -130,20 +122,23 @@ _cmd_kubebins() {
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.3/etcd-v3.4.3-linux-amd64.tar.gz \
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
##VERSION##
|
||||
curl -L https://dl.k8s.io/v1.17.2/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx \
|
||||
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
|
||||
curl -L https://dl.k8s.io/v1.14.1/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
|
||||
fi
|
||||
if ! [ -x kubelet ]; then
|
||||
for BINARY in kubectl kube-apiserver kube-scheduler kube-controller-manager kubelet kube-proxy;
|
||||
do
|
||||
sudo ln -s hyperkube \$BINARY
|
||||
done
|
||||
fi
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.6/cni-plugins-amd64-v0.7.6.tgz \
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
@@ -157,10 +152,10 @@ _cmd_kube() {
|
||||
# Optional version, e.g. 1.13.5
|
||||
KUBEVERSION=$2
|
||||
if [ "$KUBEVERSION" ]; then
|
||||
EXTRA_APTGET="=$KUBEVERSION-00"
|
||||
EXTRA_KUBELET="=$KUBEVERSION-00"
|
||||
EXTRA_KUBEADM="--kubernetes-version=v$KUBEVERSION"
|
||||
else
|
||||
EXTRA_APTGET=""
|
||||
EXTRA_KUBELET=""
|
||||
EXTRA_KUBEADM=""
|
||||
fi
|
||||
|
||||
@@ -172,7 +167,7 @@ _cmd_kube() {
|
||||
sudo tee /etc/apt/sources.list.d/kubernetes.list"
|
||||
pssh --timeout 200 "
|
||||
sudo apt-get update -q &&
|
||||
sudo apt-get install -qy kubelet$EXTRA_APTGET kubeadm$EXTRA_APTGET kubectl$EXTRA_APTGET &&
|
||||
sudo apt-get install -qy kubelet$EXTRA_KUBELET kubeadm kubectl &&
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
|
||||
|
||||
# Initialize kube master
|
||||
@@ -242,14 +237,13 @@ EOF"
|
||||
# Install helm
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/helm ]; then
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 | sudo bash &&
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash &&
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
fi"
|
||||
|
||||
# Install ship
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ship ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
@@ -257,7 +251,7 @@ EOF"
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
@@ -324,15 +318,6 @@ _cmd_listall() {
|
||||
done
|
||||
}
|
||||
|
||||
_cmd maketag "Generate a quasi-unique tag for a group of instances"
|
||||
_cmd_maketag() {
|
||||
if [ -z $USER ]; then
|
||||
export USER=anonymous
|
||||
fi
|
||||
MS=$(($(date +%N)/1000000))
|
||||
date +%Y-%m-%d-%H-%M-$MS-$USER
|
||||
}
|
||||
|
||||
_cmd ping "Ping VMs in a given tag, to check that they have network access"
|
||||
_cmd_ping() {
|
||||
TAG=$1
|
||||
@@ -372,16 +357,6 @@ _cmd_opensg() {
|
||||
infra_opensg
|
||||
}
|
||||
|
||||
_cmd portworx "Prepare the nodes for Portworx deployment"
|
||||
_cmd_portworx() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo truncate --size 10G /portworx.blk &&
|
||||
sudo losetup /dev/loop4 /portworx.blk"
|
||||
}
|
||||
|
||||
_cmd disableaddrchecks "Disable source/destination IP address checks"
|
||||
_cmd_disableaddrchecks() {
|
||||
TAG=$1
|
||||
@@ -406,20 +381,6 @@ _cmd_pull_images() {
|
||||
pull_tag
|
||||
}
|
||||
|
||||
_cmd remap_nodeports "Remap NodePort range to 10000-10999"
|
||||
_cmd_remap_nodeports() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
FIND_LINE=" - --service-cluster-ip-range=10.96.0.0\/12"
|
||||
ADD_LINE=" - --service-node-port-range=10000-10999"
|
||||
MANIFEST_FILE=/etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
pssh "
|
||||
if i_am_first_node && ! grep -q '$ADD_LINE' $MANIFEST_FILE; then
|
||||
sudo sed -i 's/\($FIND_LINE\)\$/\1\n$ADD_LINE/' $MANIFEST_FILE
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd quotas "Check our infrastructure quotas (max instances)"
|
||||
_cmd_quotas() {
|
||||
need_infra $1
|
||||
@@ -475,7 +436,7 @@ _cmd_start() {
|
||||
need_infra $INFRA
|
||||
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(_cmd_maketag)
|
||||
TAG=$(make_tag)
|
||||
fi
|
||||
mkdir -p tags/$TAG
|
||||
ln -s ../../$INFRA tags/$TAG/infra.sh
|
||||
@@ -537,24 +498,20 @@ _cmd_test() {
|
||||
test_tag
|
||||
}
|
||||
|
||||
_cmd tmux "Log into the first node and start a tmux server"
|
||||
_cmd_tmux() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Opening ssh+tmux with $IP"
|
||||
rm -f /tmp/tmux-$UID/default
|
||||
ssh -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
|
||||
}
|
||||
|
||||
_cmd helmprom "Install Helm and Prometheus"
|
||||
_cmd_helmprom() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
sudo -u docker -H helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
sudo -u docker -H helm install prometheus stable/prometheus \
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
sudo -u docker -H helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
@@ -611,18 +568,6 @@ EOF"
|
||||
sudo systemctl start webssh.service"
|
||||
}
|
||||
|
||||
_cmd www "Run a web server to access card HTML and PDF"
|
||||
_cmd_www() {
|
||||
cd www
|
||||
IPADDR=$(curl -sL canihazip.com/s)
|
||||
info "The following files are available:"
|
||||
for F in *; do
|
||||
echo "http://$IPADDR:8000/$F"
|
||||
done
|
||||
info "Press Ctrl-C to stop server."
|
||||
python3 -m http.server
|
||||
}
|
||||
|
||||
greet() {
|
||||
IAMUSER=$(aws iam get-user --query 'User.UserName')
|
||||
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
|
||||
@@ -741,3 +686,10 @@ sync_keys() {
|
||||
info "Using existing key $AWS_KEY_NAME."
|
||||
fi
|
||||
}
|
||||
|
||||
make_tag() {
|
||||
if [ -z $USER ]; then
|
||||
export USER=anonymous
|
||||
fi
|
||||
date +%Y-%m-%d-%H-%M-$USER
|
||||
}
|
||||
|
||||
@@ -4,12 +4,17 @@ import sys
|
||||
import yaml
|
||||
import jinja2
|
||||
|
||||
def prettify(l):
|
||||
l = [ip.strip() for ip in l]
|
||||
ret = [ "node{}: <code>{}</code>".format(i+1, s) for (i, s) in zip(range(len(l)), l) ]
|
||||
return ret
|
||||
|
||||
# Read settings from user-provided settings file
|
||||
context = yaml.safe_load(open(sys.argv[1]))
|
||||
SETTINGS = yaml.load(open(sys.argv[1]))
|
||||
|
||||
clustersize = SETTINGS["clustersize"]
|
||||
|
||||
ips = list(open("ips.txt"))
|
||||
clustersize = context["clustersize"]
|
||||
|
||||
print("---------------------------------------------")
|
||||
print(" Number of IPs: {}".format(len(ips)))
|
||||
@@ -25,9 +30,7 @@ while ips:
|
||||
ips = ips[clustersize:]
|
||||
clusters.append(cluster)
|
||||
|
||||
context["clusters"] = clusters
|
||||
|
||||
template_file_name = context["cards_template"]
|
||||
template_file_name = SETTINGS["cards_template"]
|
||||
template_file_path = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
"..",
|
||||
@@ -36,19 +39,18 @@ template_file_path = os.path.join(
|
||||
)
|
||||
template = jinja2.Template(open(template_file_path).read())
|
||||
with open("ips.html", "w") as f:
|
||||
f.write(template.render(**context))
|
||||
f.write(template.render(clusters=clusters, **SETTINGS))
|
||||
print("Generated ips.html")
|
||||
|
||||
|
||||
try:
|
||||
import pdfkit
|
||||
with open("ips.html") as f:
|
||||
pdfkit.from_file(f, "ips.pdf", options={
|
||||
"page-size": context["paper_size"],
|
||||
"margin-top": context["paper_margin"],
|
||||
"margin-bottom": context["paper_margin"],
|
||||
"margin-left": context["paper_margin"],
|
||||
"margin-right": context["paper_margin"],
|
||||
"page-size": SETTINGS["paper_size"],
|
||||
"margin-top": SETTINGS["paper_margin"],
|
||||
"margin-bottom": SETTINGS["paper_margin"],
|
||||
"margin-left": SETTINGS["paper_margin"],
|
||||
"margin-right": SETTINGS["paper_margin"],
|
||||
})
|
||||
print("Generated ips.pdf")
|
||||
except ImportError:
|
||||
|
||||
@@ -73,29 +73,8 @@ set expandtab
|
||||
set number
|
||||
set shiftwidth=2
|
||||
set softtabstop=2
|
||||
set nowrap
|
||||
SQRL""")
|
||||
|
||||
# Custom .tmux.conf
|
||||
system(
|
||||
"""sudo -u docker tee /home/docker/.tmux.conf <<SQRL
|
||||
bind h select-pane -L
|
||||
bind j select-pane -D
|
||||
bind k select-pane -U
|
||||
bind l select-pane -R
|
||||
|
||||
# Allow using mouse to switch panes
|
||||
set -g mouse on
|
||||
|
||||
# Make scrolling with wheels work
|
||||
|
||||
bind -n WheelUpPane if-shell -F -t = "#{mouse_any_flag}" "send-keys -M" "if -Ft= '#{pane_in_mode}' 'send-keys -M' 'select-pane -t=; copy-mode -e; send-keys -M'"
|
||||
bind -n WheelDownPane select-pane -t= \; send-keys -M
|
||||
|
||||
SQRL"""
|
||||
)
|
||||
|
||||
|
||||
# add docker user to sudoers and allow password authentication
|
||||
system("""sudo tee /etc/sudoers.d/docker <<SQRL
|
||||
docker ALL=(ALL) NOPASSWD:ALL
|
||||
@@ -106,7 +85,6 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
|
||||
system("sudo service ssh restart")
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install git jq")
|
||||
system("sudo apt-get -qy install emacs-nox joe")
|
||||
|
||||
#######################
|
||||
### DOCKER INSTALLS ###
|
||||
|
||||
@@ -26,5 +26,3 @@ machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
image:
|
||||
|
||||
@@ -26,6 +26,3 @@ machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
clusternumber: 100
|
||||
image:
|
||||
|
||||
@@ -26,6 +26,3 @@ machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
clusternumber: 200
|
||||
image:
|
||||
|
||||
@@ -26,5 +26,3 @@ machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
image:
|
||||
|
||||
@@ -26,3 +26,4 @@ machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
|
||||
@@ -1,24 +1,9 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
retry () {
|
||||
N=$1
|
||||
I=0
|
||||
shift
|
||||
|
||||
while ! "$@"; do
|
||||
I=$(($I+1))
|
||||
if [ $I -gt $N ]; then
|
||||
echo "FAILED, ABORTING"
|
||||
exit 1
|
||||
fi
|
||||
echo "FAILED, RETRYING ($I/$N)"
|
||||
done
|
||||
}
|
||||
|
||||
export AWS_INSTANCE_TYPE=t3a.small
|
||||
|
||||
INFRA=infra/aws-eu-west-3
|
||||
INFRA=infra/aws-us-west-2
|
||||
|
||||
STUDENTS=2
|
||||
|
||||
@@ -32,9 +17,9 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $STUDENTS
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl disabledocker $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl disabledocker $TAG
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kubenet
|
||||
@@ -45,9 +30,9 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kuberouter
|
||||
@@ -58,9 +43,9 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
#INFRA=infra/aws-us-west-1
|
||||
@@ -75,6 +60,7 @@ TAG=$PREFIX-$SETTINGS
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kube $TAG 1.15.9
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kube $TAG 1.13.5
|
||||
./workshopctl cards $TAG
|
||||
|
||||
|
||||
@@ -1,23 +1,12 @@
|
||||
{#
|
||||
The variables below can be customized here directly, or in your
|
||||
settings.yaml file. Any variable in settings.yaml will be exposed
|
||||
in here as well.
|
||||
#}
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
|
||||
{%- set url = url
|
||||
| default("http://FIXME.container.training/") -%}
|
||||
{%- set pagesize = pagesize
|
||||
| default(9) -%}
|
||||
{%- set lang = lang
|
||||
| default("en") -%}
|
||||
{%- set event = event
|
||||
| default("training session") -%}
|
||||
{%- set backside = backside
|
||||
| default(False) -%}
|
||||
{%- set image = image
|
||||
| default("kube") -%}
|
||||
{%- set clusternumber = clusternumber
|
||||
| default(None) -%}
|
||||
{%- set url = "http://FIXME.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- set lang = "en" -%}
|
||||
{%- set event = "training session" -%}
|
||||
{%- set backside = False -%}
|
||||
{%- set image = "kube" -%}
|
||||
{%- set clusternumber = 100 -%}
|
||||
|
||||
{%- set image_src = {
|
||||
"docker": "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png",
|
||||
@@ -172,9 +161,7 @@ img.kube {
|
||||
<div>
|
||||
<p>{{ intro }}</p>
|
||||
<p>
|
||||
{% if image_src %}
|
||||
<img src="{{ image_src }}" />
|
||||
{% endif %}
|
||||
<table>
|
||||
{% if clusternumber != None %}
|
||||
<tr><td>cluster:</td></tr>
|
||||
@@ -200,10 +187,8 @@ img.kube {
|
||||
</p>
|
||||
|
||||
<p>
|
||||
{% if url %}
|
||||
{{ slides_are_at }}
|
||||
<center>{{ url }}</center>
|
||||
{% endif %}
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
This directory will contain symlinks to HTML and PDF files for the cards
|
||||
with the IP address, login, and password for the training environments.
|
||||
|
||||
The file "index.html" is empty on purpose: it prevents listing the files.
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.11
|
||||
RUN apk add --no-cache entr py3-pip git zip
|
||||
FROM alpine:3.9
|
||||
RUN apk add --no-cache entr py-pip git
|
||||
COPY requirements.txt .
|
||||
RUN pip3 install -r requirements.txt
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
# Uncomment and/or edit one of the the following lines if necessary.
|
||||
#/ /kube-halfday.yml.html 200
|
||||
/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200
|
||||
#/ /kube-twodays.yml.html 200
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
|
||||
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
|
||||
|
||||
34
slides/assignments/TODO.txt
Normal file
34
slides/assignments/TODO.txt
Normal file
@@ -0,0 +1,34 @@
|
||||
# Our sample application
|
||||
|
||||
No assignment
|
||||
|
||||
# Kubernetes concepts
|
||||
|
||||
Do we want some kind of multiple-choice quiz?
|
||||
|
||||
# First contact with kubectl
|
||||
|
||||
Start some pre-defined image and check its logs
|
||||
(Do we want to make a custom "mystery image" that shows a message
|
||||
and then sleeps forever?)
|
||||
|
||||
Start another one (to make sure they understand that they need
|
||||
to specify a unique name each time)
|
||||
|
||||
Provide as many ways as you can to figure out on which node
|
||||
these pods are running (even if you only have one node).
|
||||
|
||||
# Exposing containers
|
||||
|
||||
Start a container running the official tomcat image.
|
||||
Expose it.
|
||||
Connect to it.
|
||||
|
||||
# Shipping apps
|
||||
|
||||
(We need a few images for a demo app other than DockerCoins?)
|
||||
|
||||
Start the components of the app.
|
||||
Expose what needs to be exposed.
|
||||
Connect to the app and check that it works.
|
||||
|
||||
105
slides/assignments/setup.md
Normal file
105
slides/assignments/setup.md
Normal file
@@ -0,0 +1,105 @@
|
||||
## Assignment: get Kubernetes
|
||||
|
||||
- In order to do the other assignments, we need a Kubernetes cluster
|
||||
|
||||
- Here are some *free* options:
|
||||
|
||||
- Docker Desktop
|
||||
|
||||
- Minikube
|
||||
|
||||
- Online sandbox like Katacoda
|
||||
|
||||
- You can also get a managed cluster (but this costs some money)
|
||||
|
||||
---
|
||||
|
||||
## Recommendation 1: Docker Desktop
|
||||
|
||||
- If you are already using Docker Desktop, use it for Kubernetes
|
||||
|
||||
- If you are running MacOS, [install Docker Desktop](https://docs.docker.com/docker-for-mac/install/)
|
||||
|
||||
- you will need a post-2010 Mac
|
||||
|
||||
- you will need macOS Sierra 10.12 or later
|
||||
|
||||
- If you are running Windows 10, [install Docker Desktop](https://docs.docker.com/docker-for-windows/install/)
|
||||
|
||||
- you will need Windows 10 64 bits Pro, Enterprise, or Education
|
||||
|
||||
- virtualization needs to be enabled in your BIOS
|
||||
|
||||
- Then [enable Kubernetes](https://blog.docker.com/2018/07/kubernetes-is-now-available-in-docker-desktop-stable-channel/) if it's not already on
|
||||
|
||||
---
|
||||
|
||||
## Recommendation 2: Minikube
|
||||
|
||||
- In some scenarios, you can't use Docker Desktop:
|
||||
|
||||
- if you run Linux
|
||||
|
||||
- if you are running an unsupported version of Windows
|
||||
|
||||
- You might also want to install Minikube for other reasons
|
||||
|
||||
(there are more tutorials and instructions out there for Minikube)
|
||||
|
||||
- Minikube installation is a bit more complex
|
||||
|
||||
(depending on which hypervisor and OS you are using)
|
||||
|
||||
---
|
||||
|
||||
## Minikube installation details
|
||||
|
||||
- Minikube typically runs in a local virtual machine
|
||||
|
||||
- It supports multiple hypervisors:
|
||||
|
||||
- VirtualBox (Linux, Mac, Windows)
|
||||
|
||||
- HyperV (Windows)
|
||||
|
||||
- HyperKit, VMware (Mac)
|
||||
|
||||
- KVM (Linux)
|
||||
|
||||
- Check the [documentation](https://kubernetes.io/docs/tasks/tools/install-minikube/) for details relevant to your setup
|
||||
|
||||
---
|
||||
|
||||
## Recommendation 3: learning platform
|
||||
|
||||
- Sometimes, you can't even install Minikube
|
||||
|
||||
(computer locked by IT policies; insufficient resources...)
|
||||
|
||||
- In that case, you can use a platform like:
|
||||
|
||||
- Katacoda
|
||||
|
||||
- Play-with-Kubernetes
|
||||
|
||||
---
|
||||
|
||||
## Recommendation 4: hosted cluster
|
||||
|
||||
- You can also get your own hosted cluster
|
||||
|
||||
- This will cost a little bit of money
|
||||
|
||||
(unless you have free hosting credits)
|
||||
|
||||
- Setup will vary depending on the provider, platform, etc.
|
||||
|
||||
---
|
||||
|
||||
class: assignment
|
||||
|
||||
- Make sure that you have a Kubernetes cluster
|
||||
|
||||
- You should be able to run `kubectl get nodes` and see a list of nodes
|
||||
|
||||
- These nodes should be in `Ready` state
|
||||
@@ -26,10 +26,9 @@ IPADDR = None
|
||||
class State(object):
|
||||
|
||||
def __init__(self):
|
||||
self.clipboard = ""
|
||||
self.interactive = True
|
||||
self.verify_status = True
|
||||
self.simulate_type = False
|
||||
self.verify_status = False
|
||||
self.simulate_type = True
|
||||
self.switch_desktop = False
|
||||
self.sync_slides = False
|
||||
self.open_links = False
|
||||
@@ -39,7 +38,6 @@ class State(object):
|
||||
|
||||
def load(self):
|
||||
data = yaml.load(open("state.yaml"))
|
||||
self.clipboard = str(data["clipboard"])
|
||||
self.interactive = bool(data["interactive"])
|
||||
self.verify_status = bool(data["verify_status"])
|
||||
self.simulate_type = bool(data["simulate_type"])
|
||||
@@ -53,7 +51,6 @@ class State(object):
|
||||
def save(self):
|
||||
with open("state.yaml", "w") as f:
|
||||
yaml.dump(dict(
|
||||
clipboard=self.clipboard,
|
||||
interactive=self.interactive,
|
||||
verify_status=self.verify_status,
|
||||
simulate_type=self.simulate_type,
|
||||
@@ -69,8 +66,6 @@ class State(object):
|
||||
state = State()
|
||||
|
||||
|
||||
outfile = open("autopilot.log", "w")
|
||||
|
||||
def hrule():
|
||||
return "="*int(subprocess.check_output(["tput", "cols"]))
|
||||
|
||||
@@ -90,11 +85,9 @@ class Snippet(object):
|
||||
# On single-line snippets, the data follows the method immediately
|
||||
if '\n' in content:
|
||||
self.method, self.data = content.split('\n', 1)
|
||||
self.data = self.data.strip()
|
||||
elif ' ' in content:
|
||||
self.method, self.data = content.split(' ', 1)
|
||||
else:
|
||||
self.method, self.data = content, None
|
||||
self.method, self.data = content.split(' ', 1)
|
||||
self.data = self.data.strip()
|
||||
self.next = None
|
||||
|
||||
def __str__(self):
|
||||
@@ -193,7 +186,7 @@ def wait_for_prompt():
|
||||
if last_line == "$":
|
||||
# This is a perfect opportunity to grab the node's IP address
|
||||
global IPADDR
|
||||
IPADDR = re.findall("\[(.*)\]", output, re.MULTILINE)[-1]
|
||||
IPADDR = re.findall("^\[(.*)\]", output, re.MULTILINE)[-1]
|
||||
return
|
||||
# When we are in an alpine container, the prompt will be "/ #"
|
||||
if last_line == "/ #":
|
||||
@@ -242,8 +235,6 @@ tmux
|
||||
|
||||
rm -f /tmp/tmux-{uid}/default && ssh -t -L /tmp/tmux-{uid}/default:/tmp/tmux-1001/default docker@{ipaddr} tmux new-session -As 0
|
||||
|
||||
(Or use workshopctl tmux)
|
||||
|
||||
3. If you cannot control a remote tmux:
|
||||
|
||||
tmux new-session ssh docker@{ipaddr}
|
||||
@@ -268,11 +259,26 @@ for slide in re.split("\n---?\n", content):
|
||||
slide_classes = slide_classes[0].split(",")
|
||||
slide_classes = [c.strip() for c in slide_classes]
|
||||
if excluded_classes & set(slide_classes):
|
||||
logging.debug("Skipping excluded slide.")
|
||||
logging.info("Skipping excluded slide.")
|
||||
continue
|
||||
slides.append(Slide(slide))
|
||||
|
||||
|
||||
def send_keys(data):
|
||||
if state.simulate_type and data[0] != '^':
|
||||
for key in data:
|
||||
if key == ";":
|
||||
key = "\\;"
|
||||
if key == "\n":
|
||||
if interruptible_sleep(1): return
|
||||
subprocess.check_call(["tmux", "send-keys", key])
|
||||
if interruptible_sleep(0.15*random.random()): return
|
||||
if key == "\n":
|
||||
if interruptible_sleep(1): return
|
||||
else:
|
||||
subprocess.check_call(["tmux", "send-keys", data])
|
||||
|
||||
|
||||
def capture_pane():
|
||||
return subprocess.check_output(["tmux", "capture-pane", "-p"]).decode('utf-8')
|
||||
|
||||
@@ -282,7 +288,7 @@ setup_tmux_and_ssh()
|
||||
|
||||
try:
|
||||
state.load()
|
||||
logging.debug("Successfully loaded state from file.")
|
||||
logging.info("Successfully loaded state from file.")
|
||||
# Let's override the starting state, so that when an error occurs,
|
||||
# we can restart the auto-tester and then single-step or debug.
|
||||
# (Instead of running again through the same issue immediately.)
|
||||
@@ -291,7 +297,6 @@ except Exception as e:
|
||||
logging.exception("Could not load state from file.")
|
||||
logging.warning("Using default values.")
|
||||
|
||||
|
||||
def move_forward():
|
||||
state.snippet += 1
|
||||
if state.snippet > len(slides[state.slide].snippets):
|
||||
@@ -315,147 +320,10 @@ def check_bounds():
|
||||
state.slide = len(slides)-1
|
||||
|
||||
|
||||
##########################################################
|
||||
# All functions starting with action_ correspond to the
|
||||
# code to be executed when seeing ```foo``` blocks in the
|
||||
# input. ```foo``` would call action_foo(state, snippet).
|
||||
##########################################################
|
||||
|
||||
|
||||
def send_keys(keys):
|
||||
subprocess.check_call(["tmux", "send-keys", keys])
|
||||
|
||||
# Send a single key.
|
||||
# Useful for special keys, e.g. tmux interprets these strings:
|
||||
# ^C (and all other sequences starting with a caret)
|
||||
# Space
|
||||
# ... and many others (check tmux manpage for details).
|
||||
def action_key(state, snippet):
|
||||
send_keys(snippet.data)
|
||||
|
||||
|
||||
# Send multiple keys.
|
||||
# If keystroke simulation is off, all keys are sent at once.
|
||||
# If keystroke simulation is on, keys are sent one by one, with a delay between them.
|
||||
def action_keys(state, snippet, keys=None):
|
||||
if keys is None:
|
||||
keys = snippet.data
|
||||
if not state.simulate_type:
|
||||
send_keys(keys)
|
||||
else:
|
||||
for key in keys:
|
||||
if key == ";":
|
||||
key = "\\;"
|
||||
if key == "\n":
|
||||
if interruptible_sleep(1): return
|
||||
send_keys(key)
|
||||
if interruptible_sleep(0.15*random.random()): return
|
||||
if key == "\n":
|
||||
if interruptible_sleep(1): return
|
||||
|
||||
|
||||
def action_hide(state, snippet):
|
||||
if state.run_hidden:
|
||||
action_bash(state, snippet)
|
||||
|
||||
|
||||
def action_bash(state, snippet):
|
||||
data = snippet.data
|
||||
# Make sure that we're ready
|
||||
wait_for_prompt()
|
||||
# Strip leading spaces
|
||||
data = re.sub("\n +", "\n", data)
|
||||
# Remove backticks (they are used to highlight sections)
|
||||
data = data.replace('`', '')
|
||||
# Add "RETURN" at the end of the command :)
|
||||
data += "\n"
|
||||
# Send command
|
||||
action_keys(state, snippet, data)
|
||||
# Force a short sleep to avoid race condition
|
||||
time.sleep(0.5)
|
||||
if snippet.next and snippet.next.method == "wait":
|
||||
wait_for_string(snippet.next.data)
|
||||
elif snippet.next and snippet.next.method == "longwait":
|
||||
wait_for_string(snippet.next.data, 10*TIMEOUT)
|
||||
else:
|
||||
wait_for_prompt()
|
||||
# Verify return code
|
||||
check_exit_status()
|
||||
|
||||
|
||||
def action_copy(state, snippet):
|
||||
screen = capture_pane()
|
||||
matches = re.findall(snippet.data, screen, flags=re.DOTALL)
|
||||
if len(matches) == 0:
|
||||
raise Exception("Could not find regex {} in output.".format(snippet.data))
|
||||
# Arbitrarily get the most recent match
|
||||
match = matches[-1]
|
||||
# Remove line breaks (like a screen copy paste would do)
|
||||
match = match.replace('\n', '')
|
||||
logging.debug("Copied {} to clipboard.".format(match))
|
||||
state.clipboard = match
|
||||
|
||||
|
||||
def action_paste(state, snippet):
|
||||
logging.debug("Pasting {} from clipboard.".format(state.clipboard))
|
||||
action_keys(state, snippet, state.clipboard)
|
||||
|
||||
|
||||
def action_check(state, snippet):
|
||||
wait_for_prompt()
|
||||
check_exit_status()
|
||||
|
||||
|
||||
def action_open(state, snippet):
|
||||
# Cheap way to get node1's IP address
|
||||
screen = capture_pane()
|
||||
url = snippet.data.replace("/node1", "/{}".format(IPADDR))
|
||||
# This should probably be adapted to run on different OS
|
||||
if state.open_links:
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
if state.interactive:
|
||||
print("Press any key to continue to next step...")
|
||||
click.getchar()
|
||||
|
||||
|
||||
def action_tmux(state, snippet):
|
||||
subprocess.check_call(["tmux"] + snippet.data.split())
|
||||
|
||||
|
||||
def action_unknown(state, snippet):
|
||||
logging.warning("Unknown method {}: {!r}".format(snippet.method, snippet.data))
|
||||
|
||||
|
||||
def run_snippet(state, snippet):
|
||||
logging.info("Running with method {}: {}".format(snippet.method, snippet.data))
|
||||
try:
|
||||
action = globals()["action_"+snippet.method]
|
||||
except KeyError:
|
||||
action = action_unknown
|
||||
try:
|
||||
action(state, snippet)
|
||||
result = "OK"
|
||||
except:
|
||||
result = "ERR"
|
||||
logging.exception("While running method {} with {!r}".format(snippet.method, snippet.data))
|
||||
# Try to recover
|
||||
try:
|
||||
wait_for_prompt()
|
||||
except:
|
||||
subprocess.check_call(["tmux", "new-window"])
|
||||
wait_for_prompt()
|
||||
outfile.write("{} SLIDE={} METHOD={} DATA={!r}\n".format(result, state.slide, snippet.method, snippet.data))
|
||||
outfile.flush()
|
||||
|
||||
|
||||
while True:
|
||||
state.save()
|
||||
slide = slides[state.slide]
|
||||
if state.snippet and state.snippet <= len(slide.snippets):
|
||||
snippet = slide.snippets[state.snippet-1]
|
||||
else:
|
||||
snippet = None
|
||||
snippet = slide.snippets[state.snippet-1] if state.snippet else None
|
||||
click.clear()
|
||||
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}] "
|
||||
"[switch_desktop:{}] [sync_slides:{}] [open_links:{}] [run_hidden:{}]"
|
||||
@@ -517,10 +385,7 @@ while True:
|
||||
# continue until next timeout
|
||||
state.interactive = False
|
||||
elif command in ("y", "\r", " "):
|
||||
if snippet:
|
||||
run_snippet(state, snippet)
|
||||
move_forward()
|
||||
else:
|
||||
if not snippet:
|
||||
# Advance to next snippet
|
||||
# Advance until a slide that has snippets
|
||||
while not slides[state.slide].snippets:
|
||||
@@ -530,5 +395,59 @@ while True:
|
||||
break
|
||||
# And then advance to the snippet
|
||||
move_forward()
|
||||
continue
|
||||
method, data = snippet.method, snippet.data
|
||||
logging.info("Running with method {}: {}".format(method, data))
|
||||
if method == "keys":
|
||||
send_keys(data)
|
||||
elif method == "bash" or (method == "hide" and state.run_hidden):
|
||||
# Make sure that we're ready
|
||||
wait_for_prompt()
|
||||
# Strip leading spaces
|
||||
data = re.sub("\n +", "\n", data)
|
||||
# Remove backticks (they are used to highlight sections)
|
||||
data = data.replace('`', '')
|
||||
# Add "RETURN" at the end of the command :)
|
||||
data += "\n"
|
||||
# Send command
|
||||
send_keys(data)
|
||||
# Force a short sleep to avoid race condition
|
||||
time.sleep(0.5)
|
||||
if snippet.next and snippet.next.method == "wait":
|
||||
wait_for_string(snippet.next.data)
|
||||
elif snippet.next and snippet.next.method == "longwait":
|
||||
wait_for_string(snippet.next.data, 10*TIMEOUT)
|
||||
else:
|
||||
wait_for_prompt()
|
||||
# Verify return code
|
||||
check_exit_status()
|
||||
elif method == "copypaste":
|
||||
screen = capture_pane()
|
||||
matches = re.findall(data, screen, flags=re.DOTALL)
|
||||
if len(matches) == 0:
|
||||
raise Exception("Could not find regex {} in output.".format(data))
|
||||
# Arbitrarily get the most recent match
|
||||
match = matches[-1]
|
||||
# Remove line breaks (like a screen copy paste would do)
|
||||
match = match.replace('\n', '')
|
||||
send_keys(match + '\n')
|
||||
# FIXME: we should factor out the "bash" method
|
||||
wait_for_prompt()
|
||||
check_exit_status()
|
||||
elif method == "open":
|
||||
# Cheap way to get node1's IP address
|
||||
screen = capture_pane()
|
||||
url = data.replace("/node1", "/{}".format(IPADDR))
|
||||
# This should probably be adapted to run on different OS
|
||||
if state.open_links:
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
if state.interactive:
|
||||
print("Press any key to continue to next step...")
|
||||
click.getchar()
|
||||
else:
|
||||
logging.warning("Unknown method {}: {!r}".format(method, data))
|
||||
move_forward()
|
||||
|
||||
else:
|
||||
logging.warning("Unknown command {}.".format(command))
|
||||
|
||||
@@ -14,7 +14,6 @@ once)
|
||||
./appendcheck.py $YAML.html
|
||||
done
|
||||
fi
|
||||
zip -qr slides.zip . && echo "Created slides.zip archive."
|
||||
;;
|
||||
|
||||
forever)
|
||||
|
||||
@@ -104,6 +104,22 @@ like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
---
|
||||
|
||||
## rkt
|
||||
|
||||
* Compares to `runc`.
|
||||
|
||||
* No daemon or API.
|
||||
|
||||
* Strong emphasis on security (through privilege separation).
|
||||
|
||||
* Networking has to be set up separately (e.g. through CNI plugins).
|
||||
|
||||
* Partial image management (pull, but no push).
|
||||
|
||||
(Image build is handled by separate tools.)
|
||||
|
||||
---
|
||||
|
||||
## CRI-O
|
||||
|
||||
* Designed to be used with Kubernetes as a simple, basic runtime.
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
# Init systems and PID 1
|
||||
|
||||
In this chapter, we will consider:
|
||||
|
||||
- the role of PID 1 in the world of Docker,
|
||||
|
||||
- how to avoid some common pitfalls due to the misuse of init systems.
|
||||
|
||||
---
|
||||
|
||||
## What's an init system?
|
||||
|
||||
- On UNIX, the "init system" (or "init" in short) is PID 1.
|
||||
|
||||
- It is the first process started by the kernel when the system starts.
|
||||
|
||||
- It has multiple responsibilities:
|
||||
|
||||
- start every other process on the machine,
|
||||
|
||||
- reap orphaned zombie processes.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Orphaned zombie processes ?!?
|
||||
|
||||
- When a process exits (or "dies"), it becomes a "zombie".
|
||||
|
||||
(Zombie processes show up in `ps` or `top` with the status code `Z`.)
|
||||
|
||||
- Its parent process must *reap* the zombie process.
|
||||
|
||||
(This is done by calling `waitpid()` to retrieve the process' exit status.)
|
||||
|
||||
- When a process exits, if it has child processes, these processes are "orphaned."
|
||||
|
||||
- They are then re-parented to PID 1, init.
|
||||
|
||||
- Init therefore needs to take care of these orphaned processes when they exit.
|
||||
|
||||
---
|
||||
|
||||
## Don't use init systems in containers
|
||||
|
||||
- It's often tempting to use an init system or a process manager.
|
||||
|
||||
(Examples: *systemd*, *supervisord*...)
|
||||
|
||||
- Our containers are then called "system containers".
|
||||
|
||||
(By contrast with "application containers".)
|
||||
|
||||
- "System containers" are similar to lightweight virtual machines.
|
||||
|
||||
- They have multiple downsides:
|
||||
|
||||
- when starting multiple processes, their logs get mixed on stdout,
|
||||
|
||||
- if the application process dies, the container engine doesn't see it.
|
||||
|
||||
- Overall, they make it harder to operate troubleshoot containerized apps.
|
||||
|
||||
---
|
||||
|
||||
## Exceptions and workarounds
|
||||
|
||||
- Sometimes, it's convenient to run a real init system like *systemd*.
|
||||
|
||||
(Example: a CI system whose goal is precisely to test an init script or unit file.)
|
||||
|
||||
- If we need to run multiple processes: can we use multiple containers?
|
||||
|
||||
(Example: [this Compose file](https://github.com/jpetazzo/container.training/blob/master/compose/simple-k8s-control-plane/docker-compose.yaml) runs multiple processes together.)
|
||||
|
||||
- When deploying with Kubernetes:
|
||||
|
||||
- a container belong to a pod,
|
||||
|
||||
- a pod can have multiple containers.
|
||||
|
||||
---
|
||||
|
||||
## What about these zombie processes?
|
||||
|
||||
- Our application runs as PID 1 in the container.
|
||||
|
||||
- Our application may or may not be designed to reap zombie processes.
|
||||
|
||||
- If our application uses subprocesses and doesn't reap them ...
|
||||
|
||||
... this can lead to PID exhaustion!
|
||||
|
||||
(Or, more realistically, to a confusing herd of zombie processes.)
|
||||
|
||||
- How can we solve this?
|
||||
|
||||
---
|
||||
|
||||
## Tini to the rescue
|
||||
|
||||
- Docker can automatically provide a minimal `init` process.
|
||||
|
||||
- This is enabled with `docker run --init ...`
|
||||
|
||||
- It uses a small init system ([tini](https://github.com/krallin/tini)) as PID 1:
|
||||
|
||||
- it reaps zombies,
|
||||
|
||||
- it forwards signals,
|
||||
|
||||
- it exits when the child exits.
|
||||
|
||||
- It is totally transparent to our application.
|
||||
|
||||
- We should use it if our application creates subprocess but doesn't reap them.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What about Kubernetes?
|
||||
|
||||
- Kubernetes does not expose that `--init` option.
|
||||
|
||||
- However, we can achieve the same result with [Process Namespace Sharing](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/).
|
||||
|
||||
- When Process Namespace Sharing is enabled, PID 1 will be `pause`.
|
||||
|
||||
- That `pause` process takes care of reaping zombies.
|
||||
|
||||
- Process Namespace Sharing is available since Kubernetes 1.16.
|
||||
|
||||
- If you're using an older version of Kubernetes ...
|
||||
|
||||
... you might have to add `tini` explicitly to your Docker image.
|
||||
@@ -102,44 +102,29 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Docker Desktop
|
||||
## Docker Desktop for Mac and Docker Desktop for Windows
|
||||
|
||||
* Special Docker edition available for Mac and Windows
|
||||
* Special Docker Editions that integrate well with their respective host OS
|
||||
|
||||
* Integrates well with the host OS:
|
||||
* Provide user-friendly GUI to edit Docker configuration and settings
|
||||
|
||||
* installed like normal user applications on the host
|
||||
* Leverage the host OS virtualization subsystem (e.g. the [Hypervisor API](https://developer.apple.com/documentation/hypervisor) on macOS)
|
||||
|
||||
* provides user-friendly GUI to edit Docker configuration and settings
|
||||
* Installed like normal user applications on the host
|
||||
|
||||
* Only support running one Docker VM at a time ...
|
||||
* Under the hood, they both run a tiny VM (transparent to our daily use)
|
||||
|
||||
* Access network resources like normal applications
|
||||
<br/>(and therefore, play better with enterprise VPNs and firewalls)
|
||||
|
||||
* Support filesystem sharing through volumes (we'll talk about this later)
|
||||
|
||||
* They only support running one Docker VM at a time ...
|
||||
<br/>
|
||||
... but we can use `docker-machine`, the Docker Toolbox, VirtualBox, etc. to get a cluster.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Docker Desktop internals
|
||||
|
||||
* Leverages the host OS virtualization subsystem
|
||||
|
||||
(e.g. the [Hypervisor API](https://developer.apple.com/documentation/hypervisor) on macOS)
|
||||
|
||||
* Under the hood, runs a tiny VM
|
||||
|
||||
(transparent to our daily use)
|
||||
|
||||
* Accesses network resources like normal applications
|
||||
|
||||
(and therefore, plays better with enterprise VPNs and firewalls)
|
||||
|
||||
* Supports filesystem sharing through volumes
|
||||
|
||||
(we'll talk about this later)
|
||||
|
||||
---
|
||||
|
||||
## Running Docker on macOS and Windows
|
||||
|
||||
When you execute `docker version` from the terminal:
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# Container Super-structure
|
||||
|
||||
- Multiple orchestration platforms support some kind of container super-structure.
|
||||
|
||||
(i.e., a construct or abstraction bigger than a single container.)
|
||||
|
||||
- For instance, on Kubernetes, this super-structure is called a *pod*.
|
||||
|
||||
- A pod is a group of containers (it could be a single container, too).
|
||||
|
||||
- These containers run together, on the same host.
|
||||
|
||||
(A pod cannot straddle multiple hosts.)
|
||||
|
||||
- All the containers in a pod have the same IP address.
|
||||
|
||||
- How does that map to the Docker world?
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Anatomy of a Pod
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Pods in Docker
|
||||
|
||||
- The containers inside a pod share the same network namespace.
|
||||
|
||||
(Just like when using `docker run --net=container:<container_id>` with the CLI.)
|
||||
|
||||
- As a result, they can communicate together over `localhost`.
|
||||
|
||||
- In addition to "our" containers, the pod has a special container, the *sandbox*.
|
||||
|
||||
- That container uses a special image: `k8s.gcr.io/pause`.
|
||||
|
||||
(This is visible when listing containers running on a Kubernetes node.)
|
||||
|
||||
- Containers within a pod have independent filesystems.
|
||||
|
||||
- They can share directories by using a mechanism called *volumes.*
|
||||
|
||||
(Which is similar to the concept of volumes in Docker.)
|
||||
@@ -100,25 +100,3 @@ class: extra-details
|
||||
* In "Build rules" block near page bottom, put `/www` in "Build Context" column (or whichever directory the Dockerfile is in).
|
||||
* Click "Save and Build" to build the repository immediately (without waiting for a git push).
|
||||
* Subsequent builds will happen automatically, thanks to GitHub hooks.
|
||||
|
||||
---
|
||||
|
||||
## Building on the fly
|
||||
|
||||
- Some services can build images on the fly from a repository
|
||||
|
||||
- Example: [ctr.run](https://ctr.run/)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Use ctr.run to automatically build a container image and run it:
|
||||
```bash
|
||||
docker run ctr.run/github.com/undefinedlabs/hello-world
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
There might be a long pause before the first layer is pulled,
|
||||
because the API behind `docker pull` doesn't allow to stream build logs, and there is no feedback during the build.
|
||||
|
||||
It is possible to view the build logs by setting up an account on [ctr.run](https://ctr.run/).
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
# Links and resources
|
||||
|
||||
- [Docker Community Slack](https://community.docker.com/registrations/groups/4316)
|
||||
- [Docker Community Forums](https://forums.docker.com/)
|
||||
- [Docker Hub](https://hub.docker.com)
|
||||
- [Docker Blog](https://blog.docker.com/)
|
||||
- [Docker documentation](https://docs.docker.com/)
|
||||
- [Docker on StackOverflow](https://stackoverflow.com/questions/tagged/docker)
|
||||
- [Docker on Twitter](https://twitter.com/docker)
|
||||
- [Play With Docker Hands-On Labs](https://training.play-with-docker.com/)
|
||||
|
||||
.footnote[These slides (and future updates) are on → https://container.training/]
|
||||
1
slides/containers/links.md
Symbolic link
1
slides/containers/links.md
Symbolic link
@@ -0,0 +1 @@
|
||||
../swarm/links.md
|
||||
@@ -1 +0,0 @@
|
||||
<mxfile host="www.draw.io" modified="2019-12-06T15:04:22.728Z" agent="Mozilla/5.0 (X11; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0" etag="zsQLtxL9GRXJF3jcROIq" version="12.3.7" type="device" pages="1"><diagram id="hOpsmMj0j3CSse8MyRSQ" name="Page-1">3VhLU9swEP41nmkPzcR2EpIjCaHtUEo6HCi9dBRb2BoUy8hyHvz6rmzJD9mBQBJgmoMjrVcr6dtvVytb7mSx/spRHF4yH1PL6fpryz2zHMcZ9Rz4k5JNLrFtd5RLAk58JSsF1+QRK2FXSVPi46SmKBijgsR1oceiCHuiJkOcs1Vd7Y7R+qwxCnBDcO0h2pTeEF+EuXTonJTyb5gEoZ7ZHqj9LZBWVjtJQuSzVUXkTi13whkTeWuxnmAq0dO45OPOt7wtFsZxJHYZ8PfXAwsvwsvLP5duOpn2bx4ufnyx1WqXiKZqx5YzoGBw7JMlNAPZDFkiQOkTGF8iDk9K5vC8T+eYYnhz3ul0Putxc66HaQkoVIwpNMRGQ8xZGvlYrrILr1chEfg6Rp58uwJWyfnFgkLPhiaiJIigzXPMlbUl5gKvtwJjF3ADUTFbYME3oKIGDJWDFEXdruqvSn/3ekoWVn2tPYsUx4LCdOkGaChPvMQrDafMmL8fbiHj5JFFAmmBhIwAz08VoILFR4GztyOaheLB0XQaaMYoTXCeNAQiEeb7YXsA0AoubeogVlBz3RbUjgaa2wAtCki0/nBA2S38elukei0Z1AAJR/6pPIug51GUJMSr4wJ755vf1c6tBLTT192ztQI47210b01EMQzalVHQKwfJTjHGh/NNLQ3TOVtNS4FykFR52j2wO5ZyDz9PIIF4gMVz0dl0d8Wd/RZvahnHFAmyrC+3zcVqhhkjkaiwqWvk/oHBknybalT1cDUN9Q1DtmEox6FhCGiBNhW1WCok2xfcM7Kr7dYOfWjkFks6F5i+nuHNGiHm0miI00TSZR0ziOiPl0SdlpP8bXOD3TzJd0sOCfBFaHHEIvxBE0a2znMiUcmUd00g7xXwPSNOHbOG2zXgTUNFJjl2wA/eIODtYQttG7eCn1isGL+3JIQDtJDxnD9B8n02yeU7XgkaxiO0wEmWLLKbEydRsON1AvKHaL8zeMBBSFPN2ndBfD+jM8cJeUTzzJSks/IO2O2Prf6ZnM4dUwTXnjHy7oMswU0YZTyb2r3LftIOE8BSJm2PyrBoSW7q2qqmtAo6VgPmicyyNRV2O1Bl92rM0XXwvkfm0AigugF2d5dgYVD0MKRslqQN3wNTYpxlTIGfP3LmhQ+vUkGJTLKZ3Ef8/gpGEZHlwE5XJsgk/zThHOmscp3mWTVoyYPDox1VB6hjP3r2t/XnKBP0F5d7hiF7aITBlux/sFgY/E+x4JhV+LvHwsn+saBLLV1P3VZrK7lxe1QWXtX6bIY5gW3Ig+pFJdUOd7KcNu8VfeaHoZNXBp9jlvlm+f7q4INu+T02Vy8/a7vTfw==</diagram></mxfile>
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 11 KiB |
@@ -1,14 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python2
|
||||
# coding: utf-8
|
||||
|
||||
FLAGS=dict(
|
||||
cz=u"🇨🇿",
|
||||
de=u"🇩🇪",
|
||||
fr=u"🇫🇷",
|
||||
uk=u"🇬🇧",
|
||||
us=u"🇺🇸",
|
||||
)
|
||||
|
||||
TEMPLATE="""<html>
|
||||
<head>
|
||||
<title>{{ title }}</title>
|
||||
@@ -43,7 +34,7 @@ TEMPLATE="""<html>
|
||||
|
||||
{% for item in coming_soon %}
|
||||
<tr>
|
||||
<td>{{ item.flag }} {{ item.title }}</td>
|
||||
<td>{{ item.title }}</td>
|
||||
<td>{% if item.slides %}<a class="slides" href="{{ item.slides }}" />{% endif %}</td>
|
||||
<td>{% if item.attend %}<a class="attend" href="{{ item.attend }}" />
|
||||
{% else %}
|
||||
@@ -132,13 +123,13 @@ TEMPLATE="""<html>
|
||||
</table>
|
||||
</div>
|
||||
</body>
|
||||
</html>"""
|
||||
</html>""".decode("utf-8")
|
||||
|
||||
import datetime
|
||||
import jinja2
|
||||
import yaml
|
||||
|
||||
items = yaml.safe_load(open("index.yaml"))
|
||||
items = yaml.load(open("index.yaml"))
|
||||
|
||||
# Items with a date correspond to scheduled sessions.
|
||||
# Items without a date correspond to self-paced content.
|
||||
@@ -169,7 +160,6 @@ for item in items:
|
||||
item["prettydate"] = date_begin.strftime("%B %d{}, %Y").format(suffix)
|
||||
item["begin"] = date_begin
|
||||
item["end"] = date_end
|
||||
item["flag"] = FLAGS.get(item.get("country"),"")
|
||||
|
||||
today = datetime.date.today()
|
||||
coming_soon = [i for i in items if i.get("date") and i["end"] >= today]
|
||||
@@ -187,10 +177,10 @@ with open("index.html", "w") as f:
|
||||
past_workshops=past_workshops,
|
||||
self_paced=self_paced,
|
||||
recorded_workshops=recorded_workshops
|
||||
))
|
||||
).encode("utf-8"))
|
||||
|
||||
with open("past.html", "w") as f:
|
||||
f.write(template.render(
|
||||
title="Container Training",
|
||||
all_past_workshops=past_workshops
|
||||
))
|
||||
).encode("utf-8"))
|
||||
|
||||
@@ -1,66 +1,3 @@
|
||||
- date: 2020-03-06
|
||||
country: uk
|
||||
city: London
|
||||
event: QCON
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes Intensive Course
|
||||
attend: https://qconlondon.com/london2020/workshop/kubernetes-intro
|
||||
#slides: https://qconuk2019.container.training/
|
||||
|
||||
- date: 2020-03-05
|
||||
country: uk
|
||||
city: London
|
||||
event: QCON
|
||||
speaker: jpetazzo
|
||||
title: Docker Intensive Course
|
||||
attend: https://qconlondon.com/london2020/workshop/docker-intensive-course
|
||||
#slides: https://qconuk2019.container.training/
|
||||
|
||||
- date: 2020-02-03
|
||||
country: fr
|
||||
city: Paris
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Conteneurs et Docker (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/
|
||||
|
||||
- date: 2020-02-04
|
||||
country: fr
|
||||
city: Paris
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Orchestration et Kubernetes (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/
|
||||
|
||||
- date: 2020-02-05
|
||||
country: fr
|
||||
city: Paris
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes et Méthodologies DevOps (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/
|
||||
|
||||
- date: 2020-02-06
|
||||
country: fr
|
||||
city: Paris
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes Avancé (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/
|
||||
|
||||
- date: 2020-02-07
|
||||
country: fr
|
||||
city: Paris
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Opérer Kubernetes (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/
|
||||
|
||||
- date: [2019-11-04, 2019-11-05]
|
||||
country: de
|
||||
city: Berlin
|
||||
@@ -68,7 +5,6 @@
|
||||
speaker: jpetazzo
|
||||
title: Deploying and scaling applications with Kubernetes
|
||||
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/79109
|
||||
slides: https://velocity-2019-11.container.training/
|
||||
|
||||
- date: 2019-11-13
|
||||
country: fr
|
||||
@@ -79,38 +15,6 @@
|
||||
lang: fr
|
||||
attend: http://2019.devops-dday.com/Workshop.html
|
||||
|
||||
- date: 2019-10-30
|
||||
country: us
|
||||
city: Portland, OR
|
||||
event: LISA
|
||||
speaker: jpetazzo
|
||||
title: Deep Dive into Kubernetes Internals for Builders and Operators
|
||||
attend: https://www.usenix.org/conference/lisa19/presentation/petazzoni-tutorial
|
||||
|
||||
- date: [2019-10-22, 2019-10-24]
|
||||
country: us
|
||||
city: Charlotte, NC
|
||||
event: Ardan Labs
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes Training
|
||||
attend: https://www.eventbrite.com/e/containers-docker-and-kubernetes-training-for-devs-and-ops-charlotte-nc-november-2019-tickets-73296659281
|
||||
|
||||
- date: 2019-10-22
|
||||
country: us
|
||||
city: Charlotte, NC
|
||||
event: Ardan Labs
|
||||
speaker: jpetazzo
|
||||
title: Docker & Containers Training
|
||||
attend: https://www.eventbrite.com/e/containers-docker-and-kubernetes-training-for-devs-and-ops-charlotte-nc-november-2019-tickets-73296659281
|
||||
|
||||
- date: 2019-10-22
|
||||
country: de
|
||||
city: Berlin
|
||||
event: GOTO
|
||||
speaker: bretfisher
|
||||
title: Kubernetes or Swarm? Build Both, Deploy Apps, Learn The Differences
|
||||
attend: https://gotober.com/2019/workshops/194
|
||||
|
||||
- date: [2019-09-24, 2019-09-25]
|
||||
country: fr
|
||||
city: Paris
|
||||
@@ -119,34 +23,6 @@
|
||||
title: Déployer ses applications avec Kubernetes (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
|
||||
slides: https://kube-2019-09.container.training/
|
||||
|
||||
- date: 2019-08-27
|
||||
country: tr
|
||||
city: Izmir
|
||||
event: HacknBreak
|
||||
speaker: gurayyildirim
|
||||
title: Deploying and scaling applications with Kubernetes (in Turkish)
|
||||
lang: tr
|
||||
attend: https://hacknbreak.com
|
||||
|
||||
- date: 2019-08-26
|
||||
country: tr
|
||||
city: Izmir
|
||||
event: HacknBreak
|
||||
speaker: gurayyildirim
|
||||
title: Container Orchestration with Docker and Swarm (in Turkish)
|
||||
lang: tr
|
||||
attend: https://hacknbreak.com
|
||||
|
||||
- date: 2019-08-25
|
||||
country: tr
|
||||
city: Izmir
|
||||
event: HackBreak
|
||||
speaker: gurayyildirim
|
||||
title: Introduction to Docker and Containers (in Turkish)
|
||||
lang: tr
|
||||
attend: https://hacknbreak.com
|
||||
|
||||
- date: 2019-07-16
|
||||
country: us
|
||||
|
||||
@@ -9,8 +9,6 @@ gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
@@ -20,48 +18,46 @@ chapters:
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
-
|
||||
#- containers/Docker_Overview.md
|
||||
- - containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
#- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
#- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- - containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Container_Networking_Basics.md
|
||||
#- containers/Network_Drivers.md
|
||||
#- containers/Container_Network_Model.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Multi_Stage_Builds.md
|
||||
#- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Advanced_Dockerfiles.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- - containers/Docker_Machine.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
- - containers/Container_Engines.md
|
||||
#- containers/Ecosystem.md
|
||||
#- containers/Orchestration_Overview.md
|
||||
-
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
|
||||
@@ -9,8 +9,6 @@ gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
@@ -52,7 +50,6 @@ chapters:
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
@@ -60,7 +57,6 @@ chapters:
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Pods_Anatomy.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -118,9 +118,9 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
<!--
|
||||
```wait Connected to localhost```
|
||||
```keys INFO server```
|
||||
```key ^J```
|
||||
```keys ^J```
|
||||
```keys QUIT```
|
||||
```key ^J```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- Terminate the port forwarder:
|
||||
|
||||
@@ -20,7 +20,7 @@ The control plane can run:
|
||||
|
||||
- in containers, on the same nodes that run other application workloads
|
||||
|
||||
(example: [Minikube](https://github.com/kubernetes/minikube); 1 node runs everything, [kind](https://kind.sigs.k8s.io/))
|
||||
(example: Minikube; 1 node runs everything)
|
||||
|
||||
- on a dedicated node
|
||||
|
||||
@@ -28,7 +28,7 @@ The control plane can run:
|
||||
|
||||
- on a dedicated set of nodes
|
||||
|
||||
(example: [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way); [kops](https://github.com/kubernetes/kops))
|
||||
(example: Kubernetes The Hard Way; kops)
|
||||
|
||||
- outside of the cluster
|
||||
|
||||
|
||||
@@ -547,7 +547,7 @@ It's important to note a couple of details in these flags...
|
||||
|
||||
- Exit the container with `exit` or `^D`
|
||||
|
||||
<!-- ```key ^D``` -->
|
||||
<!-- ```keys ^D``` -->
|
||||
|
||||
]
|
||||
|
||||
@@ -667,12 +667,17 @@ class: extra-details
|
||||
|
||||
- For auditing purposes, sometimes we want to know who can perform an action
|
||||
|
||||
- There are a few tools to help us with that
|
||||
- There is a proof-of-concept tool by Aqua Security which does exactly that:
|
||||
|
||||
- [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security
|
||||
https://github.com/aquasecurity/kubectl-who-can
|
||||
|
||||
- [Review Access (aka Rakkess)](https://github.com/corneliusweig/rakkess)
|
||||
- This is one way to install it:
|
||||
```bash
|
||||
docker run --rm -v /usr/local/bin:/go/bin golang \
|
||||
go get -v github.com/aquasecurity/kubectl-who-can
|
||||
```
|
||||
|
||||
- Both are available as standalone programs, or as plugins for `kubectl`
|
||||
|
||||
(`kubectl` plugins can be installed and managed with `krew`)
|
||||
- This is one way to use it:
|
||||
```bash
|
||||
kubectl-who-can create pods
|
||||
```
|
||||
|
||||
@@ -109,7 +109,7 @@ spec:
|
||||
|
||||
<!--
|
||||
```longwait latest: digest: sha256:```
|
||||
```key ^C```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
@@ -174,7 +174,7 @@ spec:
|
||||
|
||||
<!--
|
||||
```longwait registry:5000/rng-kaniko:latest:```
|
||||
```key ^C```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
@@ -15,3 +15,26 @@
|
||||
- `dockercoins/webui:v0.1`
|
||||
|
||||
- `dockercoins/worker:v0.1`
|
||||
|
||||
---
|
||||
|
||||
## Setting `$REGISTRY` and `$TAG`
|
||||
|
||||
- In the upcoming exercises and labs, we use a couple of environment variables:
|
||||
|
||||
- `$REGISTRY` as a prefix to all image names
|
||||
|
||||
- `$TAG` as the image version tag
|
||||
|
||||
- For example, the worker image is `$REGISTRY/worker:$TAG`
|
||||
|
||||
- If you copy-paste the commands in these exercises:
|
||||
|
||||
**make sure that you set `$REGISTRY` and `$TAG` first!**
|
||||
|
||||
- For example:
|
||||
```
|
||||
export REGISTRY=dockercoins TAG=v0.1
|
||||
```
|
||||
|
||||
(this will expand `$REGISTRY/worker:$TAG` to `dockercoins/worker:v0.1`)
|
||||
|
||||
@@ -360,7 +360,3 @@ docker run --rm --net host -v $PWD:/vol \
|
||||
- [kube-backup](https://github.com/pieterlange/kube-backup)
|
||||
|
||||
simple scripts to save resource YAML to a git repository
|
||||
|
||||
- [bivac](https://github.com/camptocamp/bivac)
|
||||
|
||||
Backup Interface for Volumes Attached to Containers
|
||||
|
||||
@@ -154,7 +154,7 @@ class: extra-details
|
||||
|
||||
- "Running Kubernetes without nodes"
|
||||
|
||||
- Systems like [Virtual Kubelet](https://virtual-kubelet.io/) or [Kiyot](https://static.elotl.co/docs/latest/kiyot/kiyot.html) can run pods using on-demand resources
|
||||
- Systems like [Virtual Kubelet](https://virtual-kubelet.io/) or Kiyot can run pods using on-demand resources
|
||||
|
||||
- Virtual Kubelet can leverage e.g. ACI or Fargate to run pods
|
||||
|
||||
|
||||
@@ -10,8 +10,6 @@
|
||||
|
||||
- Components can be upgraded one at a time without problems
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
---
|
||||
|
||||
## Checking what we're running
|
||||
@@ -81,7 +79,7 @@
|
||||
|
||||
## What version are we running anyway?
|
||||
|
||||
- When I say, "I'm running Kubernetes 1.15", is that the version of:
|
||||
- When I say, "I'm running Kubernetes 1.11", is that the version of:
|
||||
|
||||
- kubectl
|
||||
|
||||
@@ -139,73 +137,6 @@
|
||||
|
||||
---
|
||||
|
||||
## Important questions
|
||||
|
||||
- Should we upgrade the control plane before or after the kubelets?
|
||||
|
||||
- Within the control plane, should we upgrade the API server first or last?
|
||||
|
||||
- How often should we upgrade?
|
||||
|
||||
- How long are versions maintained?
|
||||
|
||||
- All the answers are in [the documentation about version skew policy](https://kubernetes.io/docs/setup/release/version-skew-policy/)!
|
||||
|
||||
- Let's review the key elements together ...
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes uses semantic versioning
|
||||
|
||||
- Kubernetes versions look like MAJOR.MINOR.PATCH; e.g. in 1.17.2:
|
||||
|
||||
- MAJOR = 1
|
||||
- MINOR = 17
|
||||
- PATCH = 2
|
||||
|
||||
- It's always possible to mix and match different PATCH releases
|
||||
|
||||
(e.g. 1.16.1 and 1.16.6 are compatible)
|
||||
|
||||
- It is recommended to run the latest PATCH release
|
||||
|
||||
(but it's mandatory only when there is a security advisory)
|
||||
|
||||
---
|
||||
|
||||
## Version skew
|
||||
|
||||
- API server must be more recent than its clients (kubelet and control plane)
|
||||
|
||||
- ... Which means it must always be upgraded first
|
||||
|
||||
- All components support a difference of one¹ MINOR version
|
||||
|
||||
- This allows live upgrades (since we can mix e.g. 1.15 and 1.16)
|
||||
|
||||
- It also means that going from 1.14 to 1.16 requires going through 1.15
|
||||
|
||||
.footnote[¹Except kubelet, which can be up to two MINOR behind API server,
|
||||
and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
|
||||
---
|
||||
|
||||
## Release cycle
|
||||
|
||||
- There is a new PATCH relese whenever necessary
|
||||
|
||||
(every few weeks, or "ASAP" when there is a security vulnerability)
|
||||
|
||||
- There is a new MINOR release every 3 months (approximately)
|
||||
|
||||
- At any given time, three MINOR releases are maintained
|
||||
|
||||
- ... Which means that MINOR releases are maintained approximately 9 months
|
||||
|
||||
- We should expect to upgrade at least every 3 months (on average)
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- We are going to update a few cluster components
|
||||
@@ -218,6 +149,47 @@ and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
|
||||
---
|
||||
|
||||
## Updating kubelet
|
||||
|
||||
- These nodes have been installed using the official Kubernetes packages
|
||||
|
||||
- We can therefore use `apt` or `apt-get`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into node `test3`
|
||||
|
||||
- View available versions for package `kubelet`:
|
||||
```bash
|
||||
apt show kubelet -a | grep ^Version
|
||||
```
|
||||
|
||||
- Upgrade kubelet:
|
||||
```bash
|
||||
apt install kubelet=1.14.2-00
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into node `test1`
|
||||
|
||||
- Check node versions:
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
```
|
||||
|
||||
- Create a deployment and scale it to make sure that the node still works
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating the API server
|
||||
|
||||
- This cluster has been deployed with kubeadm
|
||||
@@ -254,7 +226,7 @@ and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
```
|
||||
|
||||
- Look for the `image:` line, and update it to e.g. `v1.16.0`
|
||||
- Look for the `image:` line, and update it to e.g. `v1.14.0`
|
||||
|
||||
]
|
||||
|
||||
@@ -275,27 +247,9 @@ and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
|
||||
---
|
||||
|
||||
## Was that a good idea?
|
||||
|
||||
--
|
||||
|
||||
**No!**
|
||||
|
||||
--
|
||||
|
||||
- Remember the guideline we gave earlier:
|
||||
|
||||
*To update a component, use whatever was used to install it.*
|
||||
|
||||
- This control plane was deployed with kubeadm
|
||||
|
||||
- We should use kubeadm to upgrade it!
|
||||
|
||||
---
|
||||
|
||||
## Updating the whole control plane
|
||||
|
||||
- Let's make it right, and use kubeadm to upgrade the entire control plane
|
||||
- As an example, we'll use kubeadm to upgrade the entire control plane
|
||||
|
||||
(note: this is possible only because the cluster was installed with kubeadm)
|
||||
|
||||
@@ -306,167 +260,35 @@ and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
sudo kubeadm upgrade plan
|
||||
```
|
||||
|
||||
]
|
||||
(Note: kubeadm is confused by our manual upgrade of the API server.
|
||||
<br/>It thinks the cluster is running 1.14.0!)
|
||||
|
||||
Note 1: kubeadm thinks that our cluster is running 1.16.0.
|
||||
<br/>It is confused by our manual upgrade of the API server!
|
||||
|
||||
Note 2: kubeadm itself is still version 1.15.9.
|
||||
<br/>It doesn't know how to upgrade do 1.16.X.
|
||||
|
||||
---
|
||||
|
||||
## Upgrading kubeadm
|
||||
|
||||
- First things first: we need to upgrade kubeadm
|
||||
|
||||
.exercise[
|
||||
|
||||
- Upgrade kubeadm:
|
||||
```
|
||||
sudo apt install kubeadm
|
||||
```
|
||||
|
||||
- Check what kubeadm tells us:
|
||||
```
|
||||
sudo kubeadm upgrade plan
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Problem: kubeadm doesn't know know how to handle
|
||||
upgrades from version 1.15.
|
||||
|
||||
This is because we installed version 1.17 (or even later).
|
||||
|
||||
We need to install kubeadm version 1.16.X.
|
||||
|
||||
---
|
||||
|
||||
## Downgrading kubeadm
|
||||
|
||||
- We need to go back to version 1.16.X (e.g. 1.16.6)
|
||||
|
||||
.exercise[
|
||||
|
||||
- View available versions for package `kubeadm`:
|
||||
```bash
|
||||
apt show kubeadm -a | grep ^Version | grep 1.16
|
||||
```
|
||||
|
||||
- Downgrade kubeadm:
|
||||
```
|
||||
sudo apt install kubeadm=1.16.6-00
|
||||
```
|
||||
|
||||
- Check what kubeadm tells us:
|
||||
```
|
||||
sudo kubeadm upgrade plan
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
kubeadm should now agree to upgrade to 1.16.6.
|
||||
|
||||
---
|
||||
|
||||
## Upgrading the cluster with kubeadm
|
||||
|
||||
- Ideally, we should revert our `image:` change
|
||||
|
||||
(so that kubeadm executes the right migration steps)
|
||||
|
||||
- Or we can try the upgrade anyway
|
||||
|
||||
.exercise[
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
- Perform the upgrade:
|
||||
```bash
|
||||
sudo kubeadm upgrade apply v1.16.6
|
||||
sudo kubeadm upgrade apply v1.14.2
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating kubelet
|
||||
## Updating kubelets
|
||||
|
||||
- These nodes have been installed using the official Kubernetes packages
|
||||
- After updating the control plane, we need to update each kubelet
|
||||
|
||||
- We can therefore use `apt` or `apt-get`
|
||||
- This requires to run a special command on each node, to download the config
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into node `test3`
|
||||
|
||||
- View available versions for package `kubelet`:
|
||||
```bash
|
||||
apt show kubelet -a | grep ^Version
|
||||
```
|
||||
|
||||
- Upgrade kubelet:
|
||||
```bash
|
||||
sudo apt install kubelet=1.16.6-00
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into node `test1`
|
||||
|
||||
- Check node versions:
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
```
|
||||
|
||||
- Create a deployment and scale it to make sure that the node still works
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Was that a good idea?
|
||||
|
||||
--
|
||||
|
||||
**Almost!**
|
||||
|
||||
--
|
||||
|
||||
- Yes, kubelet was installed with distribution packages
|
||||
|
||||
- However, kubeadm took care of configuring kubelet
|
||||
|
||||
(when doing `kubeadm join ...`)
|
||||
|
||||
- We were supposed to run a special command *before* upgrading kubelet!
|
||||
|
||||
- That command should be executed on each node
|
||||
|
||||
- It will download the kubelet configuration generated by kubeadm
|
||||
|
||||
---
|
||||
|
||||
## Upgrading kubelet the right way
|
||||
|
||||
- We need to upgrade kubeadm, upgrade kubelet config, then upgrade kubelet
|
||||
|
||||
(after upgrading the control plane)
|
||||
(this config is generated by kubeadm)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Download the configuration on each node, and upgrade kubelet:
|
||||
```bash
|
||||
for N in 1 2 3; do
|
||||
ssh test$N "
|
||||
sudo apt install kubeadm=1.16.6-00 &&
|
||||
sudo kubeadm upgrade node &&
|
||||
sudo apt install kubelet=1.16.6-00"
|
||||
ssh test$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
|
||||
ssh test$N sudo apt install kubelet=1.14.2-00
|
||||
done
|
||||
```
|
||||
]
|
||||
@@ -475,7 +297,7 @@ kubeadm should now agree to upgrade to 1.16.6.
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- All our nodes should now be updated to version 1.16.6
|
||||
- All our nodes should now be updated to version 1.14.2
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -485,19 +307,3 @@ kubeadm should now agree to upgrade to 1.16.6.
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Skipping versions
|
||||
|
||||
- This example worked because we went from 1.15 to 1.16
|
||||
|
||||
- If you are upgrading from e.g. 1.14, you will have to go through 1.15 first
|
||||
|
||||
- This means upgrading kubeadm to 1.15.X, then using it to upgrade the cluster
|
||||
|
||||
- Then upgrading kubeadm to 1.16.X, etc.
|
||||
|
||||
- **Make sure to read the release notes before upgrading!**
|
||||
|
||||
@@ -10,29 +10,6 @@
|
||||
|
||||
---
|
||||
|
||||
## What can we do with Kubernetes?
|
||||
|
||||
- Let's imagine that we have a 3-tier e-commerce app:
|
||||
|
||||
- web frontend
|
||||
|
||||
- API backend
|
||||
|
||||
- database (that we will keep out of Kubernetes for now)
|
||||
|
||||
- We have built images for our frontend and backend components
|
||||
|
||||
(e.g. with Dockerfiles and `docker build`)
|
||||
|
||||
- We are running them successfully with a local environment
|
||||
|
||||
(e.g. with Docker Compose)
|
||||
|
||||
- Let's see how we would deploy our app on Kubernetes!
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Basic things we can ask Kubernetes to do
|
||||
|
||||
--
|
||||
@@ -67,37 +44,21 @@
|
||||
|
||||
## Other things that Kubernetes can do for us
|
||||
|
||||
- Autoscaling
|
||||
- Basic autoscaling
|
||||
|
||||
(straightforward on CPU; more complex on other metrics)
|
||||
- Blue/green deployment, canary deployment
|
||||
|
||||
- Ressource management and scheduling
|
||||
- Long running services, but also batch (one-off) jobs
|
||||
|
||||
(reserve CPU/RAM for containers; placement constraints)
|
||||
- Overcommit our cluster and *evict* low-priority jobs
|
||||
|
||||
- Advanced rollout patterns
|
||||
- Run services with *stateful* data (databases etc.)
|
||||
|
||||
(blue/green deployment, canary deployment)
|
||||
- Fine-grained access control defining *what* can be done by *whom* on *which* resources
|
||||
|
||||
---
|
||||
- Integrating third party services (*service catalog*)
|
||||
|
||||
## More things that Kubernetes can do for us
|
||||
|
||||
- Batch jobs
|
||||
|
||||
(one-off; parallel; also cron-style periodic execution)
|
||||
|
||||
- Fine-grained access control
|
||||
|
||||
(defining *what* can be done by *whom* on *which* resources)
|
||||
|
||||
- Stateful services
|
||||
|
||||
(databases, message queues, etc.)
|
||||
|
||||
- Automating complex tasks with *operators*
|
||||
|
||||
(e.g. database replication, failover, etc.)
|
||||
- Automating complex tasks (*operators*)
|
||||
|
||||
---
|
||||
|
||||
@@ -222,30 +183,6 @@ class: extra-details
|
||||
|
||||
class: extra-details
|
||||
|
||||
## How many nodes should a cluster have?
|
||||
|
||||
- There is no particular constraint
|
||||
|
||||
(no need to have an odd number of nodes for quorum)
|
||||
|
||||
- A cluster can have zero node
|
||||
|
||||
(but then it won't be able to start any pods)
|
||||
|
||||
- For testing and development, having a single node is fine
|
||||
|
||||
- For production, make sure that you have extra capacity
|
||||
|
||||
(so that your workload still fits if you lose a node or a group of nodes)
|
||||
|
||||
- Kubernetes is tested with [up to 5000 nodes](https://kubernetes.io/docs/setup/best-practices/cluster-large/)
|
||||
|
||||
(however, running a cluster of that size requires a lot of tuning)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
No!
|
||||
@@ -254,29 +191,11 @@ No!
|
||||
|
||||
- By default, Kubernetes uses the Docker Engine to run containers
|
||||
|
||||
- We can leverage other pluggable runtimes through the *Container Runtime Interface*
|
||||
- We could also use `rkt` ("Rocket") from CoreOS
|
||||
|
||||
- <del>We could also use `rkt` ("Rocket") from CoreOS</del> (deprecated)
|
||||
- Or leverage other pluggable runtimes through the *Container Runtime Interface*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Some runtimes available through CRI
|
||||
|
||||
- [containerd](https://github.com/containerd/containerd/blob/master/README.md)
|
||||
|
||||
- maintained by Docker, IBM, and community
|
||||
- used by Docker Engine, microk8s, k3s, GKE; also standalone
|
||||
- comes with its own CLI, `ctr`
|
||||
|
||||
- [CRI-O](https://github.com/cri-o/cri-o/blob/master/README.md):
|
||||
|
||||
- maintained by Red Hat, SUSE, and community
|
||||
- used by OpenShift and Kubic
|
||||
- designed specifically as a minimal runtime for Kubernetes
|
||||
|
||||
- [And more](https://kubernetes.io/docs/setup/production-environment/container-runtimes/)
|
||||
(like CRI-O, or containerd)
|
||||
|
||||
---
|
||||
|
||||
@@ -346,48 +265,6 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
## Scaling
|
||||
|
||||
- How would we scale the pod shown on the previous slide?
|
||||
|
||||
- **Do** create additional pods
|
||||
|
||||
- each pod can be on a different node
|
||||
|
||||
- each pod will have its own IP address
|
||||
|
||||
- **Do not** add more NGINX containers in the pod
|
||||
|
||||
- all the NGINX containers would be on the same node
|
||||
|
||||
- they would all have the same IP address
|
||||
<br/>(resulting in `Address alreading in use` errors)
|
||||
|
||||
---
|
||||
|
||||
## Together or separate
|
||||
|
||||
- Should we put e.g. a web application server and a cache together?
|
||||
<br/>
|
||||
("cache" being something like e.g. Memcached or Redis)
|
||||
|
||||
- Putting them **in the same pod** means:
|
||||
|
||||
- they have to be scaled together
|
||||
|
||||
- they can communicate very efficiently over `localhost`
|
||||
|
||||
- Putting them **in different pods** means:
|
||||
|
||||
- they can be scaled separately
|
||||
|
||||
- they must communicate over remote IP addresses
|
||||
<br/>(incurring more latency, lower performance)
|
||||
|
||||
- Both scenarios can make sense, depending on our goals
|
||||
|
||||
---
|
||||
|
||||
## Credits
|
||||
|
||||
- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
|
||||
|
||||
@@ -193,12 +193,7 @@
|
||||
|
||||
- Best practice: set a memory limit, and pass it to the runtime
|
||||
|
||||
- Note: recent versions of the JVM can do this automatically
|
||||
|
||||
(see [JDK-8146115](https://bugs.java.com/bugdatabase/view_bug.do?bug_id=JDK-8146115))
|
||||
and
|
||||
[this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/)
|
||||
for detailed examples)
|
||||
(see [this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/) for a detailed example)
|
||||
|
||||
---
|
||||
|
||||
|
||||
114
slides/k8s/create-chart.md
Normal file
114
slides/k8s/create-chart.md
Normal file
@@ -0,0 +1,114 @@
|
||||
## Creating a chart
|
||||
|
||||
- We are going to show a way to create a *very simplified* chart
|
||||
|
||||
- In a real chart, *lots of things* would be templatized
|
||||
|
||||
(Resource names, service types, number of replicas...)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a sample chart:
|
||||
```bash
|
||||
helm create dockercoins
|
||||
```
|
||||
|
||||
- Move away the sample templates and create an empty template directory:
|
||||
```bash
|
||||
mv dockercoins/templates dockercoins/default-templates
|
||||
mkdir dockercoins/templates
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Exporting the YAML for our application
|
||||
|
||||
- The following section assumes that DockerCoins is currently running
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create one YAML file for each resource that we need:
|
||||
.small[
|
||||
```bash
|
||||
|
||||
while read kind name; do
|
||||
kubectl get -o yaml $kind $name > dockercoins/templates/$name-$kind.yaml
|
||||
done <<EOF
|
||||
deployment worker
|
||||
deployment hasher
|
||||
daemonset rng
|
||||
deployment webui
|
||||
deployment redis
|
||||
service hasher
|
||||
service rng
|
||||
service webui
|
||||
service redis
|
||||
EOF
|
||||
```
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our helm chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's install our helm chart! (`dockercoins` is the path to the chart)
|
||||
```
|
||||
helm install dockercoins
|
||||
```
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- Since the application is already deployed, this will fail:<br>
|
||||
`Error: release loitering-otter failed: services "hasher" already exists`
|
||||
|
||||
- To avoid naming conflicts, we will deploy the application in another *namespace*
|
||||
|
||||
---
|
||||
|
||||
## Switching to another namespace
|
||||
|
||||
- We can create a new namespace and switch to it
|
||||
|
||||
(Helm will automatically use the namespace specified in our context)
|
||||
|
||||
- We can also tell Helm which namespace to use
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tell Helm to use a specific namespace:
|
||||
```bash
|
||||
helm install dockercoins --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=magenta
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
367
slides/k8s/create-more-charts.md
Normal file
367
slides/k8s/create-more-charts.md
Normal file
@@ -0,0 +1,367 @@
|
||||
# Creating Helm charts
|
||||
|
||||
- We are going to create a generic Helm chart
|
||||
|
||||
- We will use that Helm chart to deploy DockerCoins
|
||||
|
||||
- Each component of DockerCoins will have its own *release*
|
||||
|
||||
- In other words, we will "install" that Helm chart multiple times
|
||||
|
||||
(one time per component of DockerCoins)
|
||||
|
||||
---
|
||||
|
||||
## Creating a generic chart
|
||||
|
||||
- Rather than starting from scratch, we will use `helm create`
|
||||
|
||||
- This will give us a basic chart that we will customize
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a basic chart:
|
||||
```bash
|
||||
cd ~
|
||||
helm create helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
This creates a basic chart in the directory `helmcoins`.
|
||||
|
||||
---
|
||||
|
||||
## What's in the basic chart?
|
||||
|
||||
- The basic chart will create a Deployment and a Service
|
||||
|
||||
- Optionally, it will also include an Ingress
|
||||
|
||||
- If we don't pass any values, it will deploy the `nginx` image
|
||||
|
||||
- We can override many things in that chart
|
||||
|
||||
- Let's try to deploy DockerCoins components with that chart!
|
||||
|
||||
---
|
||||
|
||||
## Writing `values.yaml` for our components
|
||||
|
||||
- We need to write one `values.yaml` file for each component
|
||||
|
||||
(hasher, redis, rng, webui, worker)
|
||||
|
||||
- We will start with the `values.yaml` of the chart, and remove what we don't need
|
||||
|
||||
- We will create 5 files:
|
||||
|
||||
hasher.yaml, redis.yaml, rng.yaml, webui.yaml, worker.yaml
|
||||
|
||||
---
|
||||
|
||||
## Getting started
|
||||
|
||||
- For component X, we want to use the image dockercoins/X:v0.1
|
||||
|
||||
(for instance, for rng, we want to use the image dockercoins/rng:v0.1)
|
||||
|
||||
- Exception: for redis, we want to use the official image redis:latest
|
||||
|
||||
.exercise[
|
||||
|
||||
- Write minimal YAML files for the 5 components, specifying only the image
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
*Hint: our YAML files should look like this.*
|
||||
|
||||
```yaml
|
||||
### rng.yaml
|
||||
image:
|
||||
repository: dockercoins/`rng`
|
||||
tag: v0.1
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Deploying DockerCoins components
|
||||
|
||||
- For convenience, let's work in a separate namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace helmcoins
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the chart
|
||||
|
||||
- To install a chart, we can use the following command:
|
||||
```bash
|
||||
helm install [--name `X`] <chart>
|
||||
```
|
||||
|
||||
- We can also use the following command, which is idempotent:
|
||||
```bash
|
||||
helm upgrade --install `X` chart
|
||||
```
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the 5 components of DockerCoins:
|
||||
```bash
|
||||
for COMPONENT in hasher redis rng webui worker; do
|
||||
helm upgrade --install $COMPONENT helmcoins/ --values=$COMPONENT.yaml
|
||||
done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- Let's see if DockerCoins is working!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of the worker:
|
||||
```bash
|
||||
stern worker
|
||||
```
|
||||
|
||||
- Look at the resources that were created:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
There are *many* issues to fix!
|
||||
|
||||
---
|
||||
|
||||
## Service names
|
||||
|
||||
- Our services should be named `rng`, `hasher`, etc., but they are named differently
|
||||
|
||||
- Look at the YAML template used for the services
|
||||
|
||||
- Does it look like we can override the name of the services?
|
||||
|
||||
--
|
||||
|
||||
- *Yes*, we can use `.Values.nameOverride`
|
||||
|
||||
- This means setting `nameOverride` in the values YAML file
|
||||
|
||||
---
|
||||
|
||||
## Setting service names
|
||||
|
||||
- Let's add `nameOverride: X` in each values YAML file!
|
||||
|
||||
(where X is hasher, redis, rng, etc.)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the 5 YAML files to add `nameOverride: X`
|
||||
|
||||
- Deploy the updated Chart:
|
||||
```bash
|
||||
for COMPONENT in hasher redis rng webui worker; do
|
||||
helm upgrade --install $COMPONENT helmcoins/ --values=$COMPONENT.yaml
|
||||
done
|
||||
```
|
||||
(Yes, this is exactly the same command as before!)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the service names:
|
||||
```bash
|
||||
kubectl get services
|
||||
```
|
||||
Great! (We have a useless service for `worker`, but let's ignore it for now.)
|
||||
|
||||
- Check the state of the pods:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
Not so great... Some pods are *not ready.*
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting pods
|
||||
|
||||
- The easiest way to troubleshoot pods is to look at *events*
|
||||
|
||||
- We can look at all the events on the cluster (with `kubectl get events`)
|
||||
|
||||
- Or we can use `kubectl describe` on the objects that have problems
|
||||
|
||||
(`kubectl describe` will retrieve the events related to the object)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the events for the redis pods:
|
||||
```bash
|
||||
kubectl describe pod -l app.kubernetes.io/name=redis
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
What's going on?
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks
|
||||
|
||||
- The default chart defines healthchecks doing HTTP requests on port 80
|
||||
|
||||
- That won't work for redis and worker
|
||||
|
||||
(redis is not HTTP, and not on port 80; worker doesn't even listen)
|
||||
|
||||
--
|
||||
|
||||
- We could comment out the healthchecks
|
||||
|
||||
- We could also make them conditional
|
||||
|
||||
- This sounds more interesting, let's do that!
|
||||
|
||||
---
|
||||
|
||||
## Conditionals
|
||||
|
||||
- We need to enclose the healthcheck block with:
|
||||
|
||||
`{{ if CONDITION }}` at the beginning
|
||||
|
||||
`{{ end }}` at the end
|
||||
|
||||
- For the condition, we will use `.Values.healthcheck`
|
||||
|
||||
---
|
||||
|
||||
## Updating the deployment template
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `helmcoins/templates/deployment.yaml`
|
||||
|
||||
- Before the healthchecks section (it starts with `livenessProbe:`), add:
|
||||
|
||||
`{{ if .Values.healthcheck }}`
|
||||
|
||||
- After the healthchecks section (just before `resources:`), add:
|
||||
|
||||
`{{ end }}`
|
||||
|
||||
- Edit `hasher.yaml`, `rng.yaml`, `webui.yaml` to add:
|
||||
|
||||
`healthcheck: true`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Update the deployed charts
|
||||
|
||||
- We can now apply the new templates (and the new values)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Use the same command as earlier to upgrade all five components
|
||||
|
||||
- Use `kubectl describe` to confirm that `redis` starts correctly
|
||||
|
||||
- Use `kubectl describe` to confirm that `hasher` still has healthchecks
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Is it working now?
|
||||
|
||||
- If we look at the worker logs, it appears that the worker is still stuck
|
||||
|
||||
- What could be happening?
|
||||
|
||||
--
|
||||
|
||||
- The redis service is not on port 80!
|
||||
|
||||
- We need to update the port number in redis.yaml
|
||||
|
||||
- We also need to update the port number in deployment.yaml
|
||||
|
||||
(it is hard-coded to 80 there)
|
||||
|
||||
---
|
||||
|
||||
## Setting the redis port
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `redis.yaml` to add:
|
||||
```yaml
|
||||
service:
|
||||
port: 6379
|
||||
```
|
||||
|
||||
- Edit `helmcoins/templates/deployment.yaml`
|
||||
|
||||
- The line with `containerPort` should be:
|
||||
```yaml
|
||||
containerPort: {{ .Values.service.port }}
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Apply changes
|
||||
|
||||
- Re-run the for loop to execute `helm upgrade` one more time
|
||||
|
||||
- Check the worker logs
|
||||
|
||||
- This time, it should be working!
|
||||
|
||||
---
|
||||
|
||||
## Extra steps
|
||||
|
||||
- We don't need to create a service for the worker
|
||||
|
||||
- We can put the whole service block in a conditional
|
||||
|
||||
(this will require additional changes in other files referencing the service)
|
||||
|
||||
- We can set the webui to be a NodePort service
|
||||
|
||||
- We can change the number of workers with `replicaCount`
|
||||
|
||||
- And much more!
|
||||
@@ -52,7 +52,7 @@
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
- Unfortunately, as of Kubernetes 1.17, the CLI cannot create daemon sets
|
||||
- Unfortunately, as of Kubernetes 1.15, the CLI cannot create daemon sets
|
||||
|
||||
--
|
||||
|
||||
@@ -110,22 +110,20 @@
|
||||
```bash vim rng.yml```
|
||||
```wait kind: Deployment```
|
||||
```keys /Deployment```
|
||||
```key ^J```
|
||||
```keys ^J```
|
||||
```keys cwDaemonSet```
|
||||
```key ^[``` ]
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```key ^J```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- Save, quit
|
||||
|
||||
- Try to create our new resource:
|
||||
```bash
|
||||
```
|
||||
kubectl apply -f rng.yml
|
||||
```
|
||||
|
||||
<!-- ```wait error:``` -->
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
@@ -427,7 +425,7 @@ class: extra-details
|
||||
|
||||
- We need to change the selector of the `rng` service!
|
||||
|
||||
- Let's add another label to that selector (e.g. `active=yes`)
|
||||
- Let's add another label to that selector (e.g. `enabled=yes`)
|
||||
|
||||
---
|
||||
|
||||
@@ -445,11 +443,11 @@ class: extra-details
|
||||
|
||||
## The plan
|
||||
|
||||
1. Add the label `active=yes` to all our `rng` pods
|
||||
1. Add the label `enabled=yes` to all our `rng` pods
|
||||
|
||||
2. Update the selector for the `rng` service to also include `active=yes`
|
||||
2. Update the selector for the `rng` service to also include `enabled=yes`
|
||||
|
||||
3. Toggle traffic to a pod by manually adding/removing the `active` label
|
||||
3. Toggle traffic to a pod by manually adding/removing the `enabled` label
|
||||
|
||||
4. Profit!
|
||||
|
||||
@@ -464,7 +462,7 @@ be any interruption.*
|
||||
|
||||
## Adding labels to pods
|
||||
|
||||
- We want to add the label `active=yes` to all pods that have `app=rng`
|
||||
- We want to add the label `enabled=yes` to all pods that have `app=rng`
|
||||
|
||||
- We could edit each pod one by one with `kubectl edit` ...
|
||||
|
||||
@@ -474,9 +472,9 @@ be any interruption.*
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add `active=yes` to all pods that have `app=rng`:
|
||||
- Add `enabled=yes` to all pods that have `app=rng`:
|
||||
```bash
|
||||
kubectl label pods -l app=rng active=yes
|
||||
kubectl label pods -l app=rng enabled=yes
|
||||
```
|
||||
|
||||
]
|
||||
@@ -495,7 +493,7 @@ be any interruption.*
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update the service to add `active: yes` to its selector:
|
||||
- Update the service to add `enabled: yes` to its selector:
|
||||
```bash
|
||||
kubectl edit service rng
|
||||
```
|
||||
@@ -503,11 +501,11 @@ be any interruption.*
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /app: rng```
|
||||
```key ^J```
|
||||
```keys noactive: yes```
|
||||
```key ^[``` ]
|
||||
```keys ^J```
|
||||
```keys noenabled: yes```
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```key ^J```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
]
|
||||
@@ -530,7 +528,7 @@ be any interruption.*
|
||||
|
||||
- If we want the string `"42"` or the string `"yes"`, we have to quote them
|
||||
|
||||
- So we have to use `active: "yes"`
|
||||
- So we have to use `enabled: "yes"`
|
||||
|
||||
.footnote[For a good laugh: if we had used "ja", "oui", "si" ... as the value, it would have worked!]
|
||||
|
||||
@@ -540,18 +538,19 @@ be any interruption.*
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update the YAML manifest of the service
|
||||
|
||||
- Add `active: "yes"` to its selector
|
||||
- Update the service to add `enabled: "yes"` to its selector:
|
||||
```bash
|
||||
kubectl edit service rng
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /yes```
|
||||
```key ^J```
|
||||
```keys cw"yes"```
|
||||
```key ^[``` ]
|
||||
```keys /app: rng```
|
||||
```keys ^J```
|
||||
```keys noenabled: "yes"```
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```key ^J```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
]
|
||||
@@ -566,7 +565,7 @@ If we did everything correctly, the web UI shouldn't show any change.
|
||||
|
||||
- We want to disable the pod that was created by the deployment
|
||||
|
||||
- All we have to do, is remove the `active` label from that pod
|
||||
- All we have to do, is remove the `enabled` label from that pod
|
||||
|
||||
- To identify that pod, we can use its name
|
||||
|
||||
@@ -590,25 +589,16 @@ If we did everything correctly, the web UI shouldn't show any change.
|
||||
```bash
|
||||
POD=$(kubectl get pod -l app=rng,pod-template-hash -o name)
|
||||
kubectl logs --tail 1 --follow $POD
|
||||
|
||||
```
|
||||
(We should see a steady stream of HTTP logs)
|
||||
|
||||
<!--
|
||||
```wait HTTP/1.1```
|
||||
```tmux split-pane -v```
|
||||
-->
|
||||
|
||||
- In another window, remove the label from the pod:
|
||||
```bash
|
||||
kubectl label pod -l app=rng,pod-template-hash active-
|
||||
kubectl label pod -l app=rng,pod-template-hash enabled-
|
||||
```
|
||||
(The stream of HTTP logs should stop immediately)
|
||||
|
||||
<!--
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
There might be a slight change in the web UI (since we removed a bit
|
||||
@@ -623,7 +613,7 @@ class: extra-details
|
||||
|
||||
- If we scale up our cluster by adding new nodes, the daemon set will create more pods
|
||||
|
||||
- These pods won't have the `active=yes` label
|
||||
- These pods won't have the `enabled=yes` label
|
||||
|
||||
- If we want these pods to have that label, we need to edit the daemon set spec
|
||||
|
||||
|
||||
@@ -105,22 +105,6 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
---
|
||||
|
||||
## Other dashboards
|
||||
|
||||
- [Kube Web View](https://codeberg.org/hjacobs/kube-web-view)
|
||||
|
||||
- read-only dashboard
|
||||
|
||||
- optimized for "troubleshooting and incident response"
|
||||
|
||||
- see [vision and goals](https://kube-web-view.readthedocs.io/en/latest/vision.html#vision) for details
|
||||
|
||||
- [Kube Ops View](https://github.com/hjacobs/kube-ops-view)
|
||||
|
||||
- "provides a common operational picture for multiple Kubernetes clusters"
|
||||
|
||||
---
|
||||
|
||||
# Security implications of `kubectl apply`
|
||||
|
||||
- When we do `kubectl apply -f <URL>`, we create arbitrary resources
|
||||
@@ -172,3 +156,4 @@ The dashboard will then ask you which authentication you want to use.
|
||||
- It introduces new failure modes
|
||||
|
||||
(for instance, if you try to apply YAML from a link that's no longer valid)
|
||||
|
||||
|
||||
@@ -481,13 +481,13 @@ docker run alpine echo hello world
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the file `~/.kube/config` with `kubectl`:
|
||||
- Create the file `kubeconfig.kubelet` with `kubectl`:
|
||||
```bash
|
||||
kubectl config \
|
||||
kubectl --kubeconfig kubeconfig.kubelet config \
|
||||
set-cluster localhost --server http://localhost:8080
|
||||
kubectl config \
|
||||
kubectl --kubeconfig kubeconfig.kubelet config \
|
||||
set-context localhost --cluster localhost
|
||||
kubectl config \
|
||||
kubectl --kubeconfig kubeconfig.kubelet config \
|
||||
use-context localhost
|
||||
```
|
||||
|
||||
@@ -495,7 +495,19 @@ docker run alpine echo hello world
|
||||
|
||||
---
|
||||
|
||||
## Our `~/.kube/config` file
|
||||
## All Kubernetes clients can use `kubeconfig`
|
||||
|
||||
- The `kubeconfig.kubelet` file has the same format as e.g. `~/.kubeconfig`
|
||||
|
||||
- All Kubernetes clients can use a similar file
|
||||
|
||||
- The `kubectl config` commands can be used to manipulate these files
|
||||
|
||||
- This highlights that kubelet is a "normal" client of the API server
|
||||
|
||||
---
|
||||
|
||||
## Our `kubeconfig.kubelet` file
|
||||
|
||||
The file that we generated looks like the one below.
|
||||
|
||||
@@ -521,9 +533,9 @@ clusters:
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start kubelet with that kubeconfig file:
|
||||
- Start kubelet with that `kubeconfig.kubelet` file:
|
||||
```bash
|
||||
kubelet --kubeconfig ~/.kube/config
|
||||
kubelet --kubeconfig kubeconfig.kubelet
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -1,211 +0,0 @@
|
||||
# Authoring YAML
|
||||
|
||||
- There are various ways to generate YAML with Kubernetes, e.g.:
|
||||
|
||||
- `kubectl run`
|
||||
|
||||
- `kubectl create deployment` (and a few other `kubectl create` variants)
|
||||
|
||||
- `kubectl expose`
|
||||
|
||||
- When and why do we need to write our own YAML?
|
||||
|
||||
- How do we write YAML from scratch?
|
||||
|
||||
---
|
||||
|
||||
## The limits of generated YAML
|
||||
|
||||
- Many advanced (and even not-so-advanced) features require to write YAML:
|
||||
|
||||
- pods with multiple containers
|
||||
|
||||
- resource limits
|
||||
|
||||
- healthchecks
|
||||
|
||||
- DaemonSets, StatefulSets
|
||||
|
||||
- and more!
|
||||
|
||||
- How do we access these features?
|
||||
|
||||
---
|
||||
|
||||
## We don't have to start from scratch
|
||||
|
||||
- Create a resource (e.g. Deployment)
|
||||
|
||||
- Dump its YAML with `kubectl get -o yaml ...`
|
||||
|
||||
- Edit the YAML
|
||||
|
||||
- Use `kubectl apply -f ...` with the YAML file to:
|
||||
|
||||
- update the resource (if it's the same kind)
|
||||
|
||||
- create a new resource (if it's a different kind)
|
||||
|
||||
- Or: Use The Docs, Luke
|
||||
|
||||
(the documentation almost always has YAML examples)
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML without creating resources
|
||||
|
||||
- We can use the `--dry-run` option
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate the YAML for a Deployment without creating it:
|
||||
```bash
|
||||
kubectl create deployment web --image nginx --dry-run
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- We can clean up that YAML even more if we want
|
||||
|
||||
(for instance, we can remove the `creationTimestamp` and empty dicts)
|
||||
|
||||
---
|
||||
|
||||
## Using `--dry-run` with `kubectl apply`
|
||||
|
||||
- The `--dry-run` option can also be used with `kubectl apply`
|
||||
|
||||
- However, it can be misleading (it doesn't do a "real" dry run)
|
||||
|
||||
- Let's see what happens in the following scenario:
|
||||
|
||||
- generate the YAML for a Deployment
|
||||
|
||||
- tweak the YAML to transform it into a DaemonSet
|
||||
|
||||
- apply that YAML to see what would actually be created
|
||||
|
||||
---
|
||||
|
||||
## The limits of `kubectl apply --dry-run`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate the YAML for a deployment:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx -o yaml > web.yaml
|
||||
```
|
||||
|
||||
- Change the `kind` in the YAML to make it a `DaemonSet`:
|
||||
```bash
|
||||
sed -i s/Deployment/DaemonSet/ web.yaml
|
||||
```
|
||||
|
||||
- Ask `kubectl` what would be applied:
|
||||
```bash
|
||||
kubectl apply -f web.yaml --dry-run --validate=false -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The resulting YAML doesn't represent a valid DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
## Server-side dry run
|
||||
|
||||
- Since Kubernetes 1.13, we can use [server-side dry run and diffs](https://kubernetes.io/blog/2019/01/14/apiserver-dry-run-and-kubectl-diff/)
|
||||
|
||||
- Server-side dry run will do all the work, but *not* persist to etcd
|
||||
|
||||
(all validation and mutation hooks will be executed)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try the same YAML file as earlier, with server-side dry run:
|
||||
```bash
|
||||
kubectl apply -f web.yaml --server-dry-run --validate=false -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The resulting YAML doesn't have the `replicas` field anymore.
|
||||
|
||||
Instead, it has the fields expected in a DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
## Advantages of server-side dry run
|
||||
|
||||
- The YAML is verified much more extensively
|
||||
|
||||
- The only step that is skipped is "write to etcd"
|
||||
|
||||
- YAML that passes server-side dry run *should* apply successfully
|
||||
|
||||
(unless the cluster state changes by the time the YAML is actually applied)
|
||||
|
||||
- Validating or mutating hooks that have side effects can also be an issue
|
||||
|
||||
---
|
||||
|
||||
## `kubectl diff`
|
||||
|
||||
- Kubernetes 1.13 also introduced `kubectl diff`
|
||||
|
||||
- `kubectl diff` does a server-side dry run, *and* shows differences
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try `kubectl diff` on the YAML that we tweaked earlier:
|
||||
```bash
|
||||
kubectl diff -f web.yaml
|
||||
```
|
||||
|
||||
<!-- ```wait status:``` -->
|
||||
|
||||
]
|
||||
|
||||
Note: we don't need to specify `--validate=false` here.
|
||||
|
||||
---
|
||||
|
||||
## Advantage of YAML
|
||||
|
||||
- Using YAML (instead of `kubectl run`/`create`/etc.) allows to be *declarative*
|
||||
|
||||
- The YAML describes the desired state of our cluster and applications
|
||||
|
||||
- YAML can be stored, versioned, archived (e.g. in git repositories)
|
||||
|
||||
- To change resources, change the YAML files
|
||||
|
||||
(instead of using `kubectl edit`/`scale`/`label`/etc.)
|
||||
|
||||
- Changes can be reviewed before being applied
|
||||
|
||||
(with code reviews, pull requests ...)
|
||||
|
||||
- This workflow is sometimes called "GitOps"
|
||||
|
||||
(there are tools like Weave Flux or GitKube to facilitate it)
|
||||
|
||||
---
|
||||
|
||||
## YAML in practice
|
||||
|
||||
- Get started with `kubectl run`/`create`/`expose`/etc.
|
||||
|
||||
- Dump the YAML with `kubectl get -o yaml`
|
||||
|
||||
- Tweak that YAML and `kubectl apply` it back
|
||||
|
||||
- Store that YAML for reference (for further deployments)
|
||||
|
||||
- Feel free to clean up the YAML:
|
||||
|
||||
- remove fields you don't know
|
||||
|
||||
- check that it still works!
|
||||
|
||||
- That YAML will be useful later when using e.g. Kustomize or Helm
|
||||
@@ -8,8 +8,6 @@ We are going to cover:
|
||||
|
||||
- Admission Webhooks
|
||||
|
||||
- The Aggregation Layer
|
||||
|
||||
---
|
||||
|
||||
## Revisiting the API server
|
||||
@@ -48,90 +46,6 @@ We are going to cover:
|
||||
|
||||
---
|
||||
|
||||
## A very simple CRD
|
||||
|
||||
The YAML below describes a very simple CRD representing different kinds of coffee:
|
||||
|
||||
```yaml
|
||||
apiVersion: apiextensions.k8s.io/v1alpha1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Creating a CRD
|
||||
|
||||
- Let's create the Custom Resource Definition for our Coffee resource
|
||||
|
||||
.exercise[
|
||||
|
||||
- Load the CRD:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/coffee-1.yaml
|
||||
```
|
||||
|
||||
- Confirm that it shows up:
|
||||
```bash
|
||||
kubectl get crds
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating custom resources
|
||||
|
||||
The YAML below defines a resource using the CRD that we just created:
|
||||
|
||||
```yaml
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: arabica
|
||||
spec:
|
||||
taste: strong
|
||||
```
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a few types of coffee beans:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/coffees.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Viewing custom resources
|
||||
|
||||
- By default, `kubectl get` only shows name and age of custom resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the coffee beans that we just created:
|
||||
```bash
|
||||
kubectl get coffees
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- We can improve that, but it's outside the scope of this section!
|
||||
|
||||
---
|
||||
|
||||
## What can we do with CRDs?
|
||||
|
||||
There are many possibilities!
|
||||
@@ -151,7 +65,7 @@ There are many possibilities!
|
||||
|
||||
- Replacing built-in types with CRDs
|
||||
|
||||
(see [this lightning talk by Tim Hockin](https://www.youtube.com/watch?v=ji0FWzFwNhA))
|
||||
(see [this lightning talk by Tim Hockin](https://www.youtube.com/watch?v=ji0FWzFwNhA&index=2&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU))
|
||||
|
||||
---
|
||||
|
||||
@@ -167,7 +81,7 @@ There are many possibilities!
|
||||
|
||||
- Generally, when creating a CRD, we also want to run a *controller*
|
||||
|
||||
(otherwise nothing will happen when we create resources of that type)
|
||||
(otherwise nothing will happen when we create resources of that type)
|
||||
|
||||
- The controller will typically *watch* our custom resources
|
||||
|
||||
@@ -181,22 +95,6 @@ Examples:
|
||||
|
||||
---
|
||||
|
||||
## (Ab)using the API server
|
||||
|
||||
- If we need to store something "safely" (as in: in etcd), we can use CRDs
|
||||
|
||||
- This gives us primitives to read/write/list objects (and optionally validate them)
|
||||
|
||||
- The Kubernetes API server can run on its own
|
||||
|
||||
(without the scheduler, controller manager, and kubelets)
|
||||
|
||||
- By loading CRDs, we can have it manage totally different objects
|
||||
|
||||
(unrelated to containers, clusters, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Service catalog
|
||||
|
||||
- *Service catalog* is another extension mechanism
|
||||
@@ -211,7 +109,7 @@ Examples:
|
||||
- ClusterServiceClass
|
||||
- ClusterServicePlan
|
||||
- ServiceInstance
|
||||
- ServiceBinding
|
||||
- ServiceBinding
|
||||
|
||||
- It uses the Open service broker API
|
||||
|
||||
@@ -219,13 +117,17 @@ Examples:
|
||||
|
||||
## Admission controllers
|
||||
|
||||
- Admission controllers are another way to extend the Kubernetes API
|
||||
- When a Pod is created, it is associated with a ServiceAccount
|
||||
|
||||
- Instead of creating new types, admission controllers can transform or vet API requests
|
||||
(even if we did not specify one explicitly)
|
||||
|
||||
- The diagram on the next slide shows the path of an API request
|
||||
- That ServiceAccount was added on the fly by an *admission controller*
|
||||
|
||||
(courtesy of Banzai Cloud)
|
||||
(specifically, a *mutating admission controller*)
|
||||
|
||||
- Admission controllers sit on the API request path
|
||||
|
||||
(see the cool diagram on next slide, courtesy of Banzai Cloud)
|
||||
|
||||
---
|
||||
|
||||
@@ -235,7 +137,7 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
## Types of admission controllers
|
||||
## Admission controllers
|
||||
|
||||
- *Validating* admission controllers can accept/reject the API call
|
||||
|
||||
@@ -249,27 +151,7 @@ class: pic
|
||||
|
||||
(see [documentation](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#what-does-each-admission-controller-do) for a list)
|
||||
|
||||
- We can also dynamically define and register our own
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Some built-in admission controllers
|
||||
|
||||
- ServiceAccount:
|
||||
|
||||
automatically adds a ServiceAccount to Pods that don't explicitly specify one
|
||||
|
||||
- LimitRanger:
|
||||
|
||||
applies resource constraints specified by LimitRange objects when Pods are created
|
||||
|
||||
- NamespaceAutoProvision:
|
||||
|
||||
automatically creates namespaces when an object is created in a non-existent namespace
|
||||
|
||||
*Note: #1 and #2 are enabled by default; #3 is not.*
|
||||
- But we can also define our own!
|
||||
|
||||
---
|
||||
|
||||
@@ -309,25 +191,19 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## The aggregation layer
|
||||
## (Ab)using the API server
|
||||
|
||||
- We can delegate entire parts of the Kubernetes API to external servers
|
||||
- If we need to store something "safely" (as in: in etcd), we can use CRDs
|
||||
|
||||
- This is done by creating APIService resources
|
||||
- This gives us primitives to read/write/list objects (and optionally validate them)
|
||||
|
||||
(check them with `kubectl get apiservices`!)
|
||||
- The Kubernetes API server can run on its own
|
||||
|
||||
- The APIService resource maps a type (kind) and version to an external service
|
||||
(without the scheduler, controller manager, and kubelets)
|
||||
|
||||
- All requests concerning that type are sent (proxied) to the external service
|
||||
- By loading CRDs, we can have it manage totally different objects
|
||||
|
||||
- This allows to have resources like CRDs, but that aren't stored in etcd
|
||||
|
||||
- Example: `metrics-server`
|
||||
|
||||
(storing live metrics in etcd would be extremely inefficient)
|
||||
|
||||
- Requires significantly more work than CRDs!
|
||||
(unrelated to containers, clusters, etc.)
|
||||
|
||||
---
|
||||
|
||||
@@ -342,5 +218,3 @@ class: extra-details
|
||||
- [Built-in Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/)
|
||||
|
||||
- [Dynamic Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/)
|
||||
|
||||
- [Aggregation Layer](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)
|
||||
|
||||
@@ -87,7 +87,7 @@
|
||||
|
||||
- Clone the Flux repository:
|
||||
```
|
||||
git clone https://github.com/fluxcd/flux
|
||||
git clone https://github.com/weaveworks/flux
|
||||
```
|
||||
|
||||
- Edit `deploy/flux-deployment.yaml`
|
||||
|
||||
@@ -1,3 +1,41 @@
|
||||
## Questions to ask before adding healthchecks
|
||||
|
||||
- Do we want liveness, readiness, both?
|
||||
|
||||
(sometimes, we can use the same check, but with different failure thresholds)
|
||||
|
||||
- Do we have existing HTTP endpoints that we can use?
|
||||
|
||||
- Do we need to add new endpoints, or perhaps use something else?
|
||||
|
||||
- Are our healthchecks likely to use resources and/or slow down the app?
|
||||
|
||||
- Do they depend on additional services?
|
||||
|
||||
(this can be particularly tricky, see next slide)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks and dependencies
|
||||
|
||||
- A good healthcheck should always indicate the health of the service itself
|
||||
|
||||
- It should not be affected by the state of the service's dependencies
|
||||
|
||||
- Example: a web server requiring a database connection to operate
|
||||
|
||||
(make sure that the healthcheck can report "OK" even if the database is down;
|
||||
<br/>
|
||||
because it won't help us to restart the web server if the issue is with the DB!)
|
||||
|
||||
- Example: a microservice calling other microservices
|
||||
|
||||
- Example: a worker process
|
||||
|
||||
(these will generally require minor code changes to report health)
|
||||
|
||||
---
|
||||
|
||||
## Adding healthchecks to an app
|
||||
|
||||
- Let's add healthchecks to DockerCoins!
|
||||
@@ -332,4 +370,24 @@ class: extra-details
|
||||
|
||||
(and have gcr.io/pause take care of the reaping)
|
||||
|
||||
- Discussion of this in [Video - 10 Ways to Shoot Yourself in the Foot with Kubernetes, #9 Will Surprise You](https://www.youtube.com/watch?v=QKI-JRs2RIE)
|
||||
---
|
||||
|
||||
## Healthchecks for worker
|
||||
|
||||
- Readiness isn't useful
|
||||
|
||||
(because worker isn't a backend for a service)
|
||||
|
||||
- Liveness may help us restart a broken worker, but how can we check it?
|
||||
|
||||
- Embedding an HTTP server is an option
|
||||
|
||||
(but it has a high potential for unwanted side effects and false positives)
|
||||
|
||||
- Using a "lease" file can be relatively easy:
|
||||
|
||||
- touch a file during each iteration of the main loop
|
||||
|
||||
- check the timestamp of that file from an exec probe
|
||||
|
||||
- Writing logs (and checking them from the probe) also works
|
||||
|
||||
@@ -42,11 +42,9 @@
|
||||
|
||||
- internal corruption (causing all requests to error)
|
||||
|
||||
- Anything where our incident response would be "just restart/reboot it"
|
||||
- If the liveness probe fails *N* consecutive times, the container is killed
|
||||
|
||||
.warning[**Do not** use liveness probes for problems that can't be fixed by a restart]
|
||||
|
||||
- Otherwise we just restart our pods for no reason, creating useless load
|
||||
- *N* is the `failureThreshold` (3 by default)
|
||||
|
||||
---
|
||||
|
||||
@@ -54,7 +52,7 @@
|
||||
|
||||
- Indicates if the container is ready to serve traffic
|
||||
|
||||
- If a container becomes "unready" it might be ready again soon
|
||||
- If a container becomes "unready" (let's say busy!) it might be ready again soon
|
||||
|
||||
- If the readiness probe fails:
|
||||
|
||||
@@ -68,79 +66,19 @@
|
||||
|
||||
## When to use a readiness probe
|
||||
|
||||
- To indicate failure due to an external cause
|
||||
- To indicate temporary failures
|
||||
|
||||
- database is down or unreachable
|
||||
- the application can only service *N* parallel connections
|
||||
|
||||
- mandatory auth or other backend service unavailable
|
||||
- the runtime is busy doing garbage collection or initial data load
|
||||
|
||||
- To indicate temporary failure or unavailability
|
||||
- The container is marked as "not ready" after `failureThreshold` failed attempts
|
||||
|
||||
- application can only service *N* parallel connections
|
||||
(3 by default)
|
||||
|
||||
- runtime is busy doing garbage collection or initial data load
|
||||
- It is marked again as "ready" after `successThreshold` successful attempts
|
||||
|
||||
- For processes that take a long time to start
|
||||
|
||||
(more on that later)
|
||||
|
||||
---
|
||||
|
||||
## Dependencies
|
||||
|
||||
- If a web server depends on a database to function, and the database is down:
|
||||
|
||||
- the web server's liveness probe should succeed
|
||||
|
||||
- the web server's readiness probe should fail
|
||||
|
||||
- Same thing for any hard dependency (without which the container can't work)
|
||||
|
||||
.warning[**Do not** fail liveness probes for problems that are external to the container]
|
||||
|
||||
---
|
||||
|
||||
## Timing and thresholds
|
||||
|
||||
- Probes are executed at intervals of `periodSeconds` (default: 10)
|
||||
|
||||
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
|
||||
|
||||
.warning[If a probe takes longer than that, it is considered as a FAIL]
|
||||
|
||||
- A probe is considered successful after `successThreshold` successes (default: 1)
|
||||
|
||||
- A probe is considered failing after `failureThreshold` failures (default: 3)
|
||||
|
||||
- A probe can have an `initialDelaySeconds` parameter (default: 0)
|
||||
|
||||
- Kubernetes will wait that amount of time before running the probe for the first time
|
||||
|
||||
(this is important to avoid killing services that take a long time to start)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Startup probe
|
||||
|
||||
- Kubernetes 1.16 introduces a third type of probe: `startupProbe`
|
||||
|
||||
(it is in `alpha` in Kubernetes 1.16)
|
||||
|
||||
- It can be used to indicate "container not ready *yet*"
|
||||
|
||||
- process is still starting
|
||||
|
||||
- loading external data, priming caches
|
||||
|
||||
- Before Kubernetes 1.16, we had to use the `initialDelaySeconds` parameter
|
||||
|
||||
(available for both liveness and readiness probes)
|
||||
|
||||
- `initialDelaySeconds` is a rigid delay (always wait X before running probes)
|
||||
|
||||
- `startupProbe` works better when a container start time can vary a lot
|
||||
(1 by default)
|
||||
|
||||
---
|
||||
|
||||
@@ -174,12 +112,10 @@ class: extra-details
|
||||
|
||||
(instead of serving errors or timeouts)
|
||||
|
||||
- Unavailable backends get removed from load balancer rotation
|
||||
- Overloaded backends get removed from load balancer rotation
|
||||
|
||||
(thus improving response times across the board)
|
||||
|
||||
- If a probe is not defined, it's as if there was an "always successful" probe
|
||||
|
||||
---
|
||||
|
||||
## Example: HTTP probe
|
||||
@@ -229,56 +165,14 @@ If the Redis process becomes unresponsive, it will be killed.
|
||||
|
||||
---
|
||||
|
||||
## Questions to ask before adding healthchecks
|
||||
## Details about liveness and readiness probes
|
||||
|
||||
- Do we want liveness, readiness, both?
|
||||
- Probes are executed at intervals of `periodSeconds` (default: 10)
|
||||
|
||||
(sometimes, we can use the same check, but with different failure thresholds)
|
||||
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
|
||||
|
||||
- Do we have existing HTTP endpoints that we can use?
|
||||
- A probe is considered successful after `successThreshold` successes (default: 1)
|
||||
|
||||
- Do we need to add new endpoints, or perhaps use something else?
|
||||
- A probe is considered failing after `failureThreshold` failures (default: 3)
|
||||
|
||||
- Are our healthchecks likely to use resources and/or slow down the app?
|
||||
|
||||
- Do they depend on additional services?
|
||||
|
||||
(this can be particularly tricky, see next slide)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks and dependencies
|
||||
|
||||
- Liveness checks should not be influenced by the state of external services
|
||||
|
||||
- All checks should reply quickly (by default, less than 1 second)
|
||||
|
||||
- Otherwise, they are considered to fail
|
||||
|
||||
- This might require to check the health of dependencies asynchronously
|
||||
|
||||
(e.g. if a database or API might be healthy but still take more than
|
||||
1 second to reply, we should check the status asynchronously and report
|
||||
a cached status)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks for workers
|
||||
|
||||
(In that context, worker = process that doesn't accept connections)
|
||||
|
||||
- Readiness isn't useful
|
||||
|
||||
(because workers aren't backends for a service)
|
||||
|
||||
- Liveness may help us restart a broken worker, but how can we check it?
|
||||
|
||||
- Embedding an HTTP server is a (potentially expensive) option
|
||||
|
||||
- Using a "lease" file can be relatively easy:
|
||||
|
||||
- touch a file during each iteration of the main loop
|
||||
|
||||
- check the timestamp of that file from an exec probe
|
||||
|
||||
- Writing logs (and checking them from the probe) also works
|
||||
- If a probe is not defined, it's as if there was an "always successful" probe
|
||||
|
||||
@@ -1,239 +0,0 @@
|
||||
# Helm chart format
|
||||
|
||||
- What exactly is a chart?
|
||||
|
||||
- What's in it?
|
||||
|
||||
- What would be involved in creating a chart?
|
||||
|
||||
(we won't create a chart, but we'll see the required steps)
|
||||
|
||||
---
|
||||
|
||||
## What is a chart
|
||||
|
||||
- A chart is a set of files
|
||||
|
||||
- Some of these files are mandatory for the chart to be viable
|
||||
|
||||
(more on that later)
|
||||
|
||||
- These files are typically packed in a tarball
|
||||
|
||||
- These tarballs are stored in "repos"
|
||||
|
||||
(which can be static HTTP servers)
|
||||
|
||||
- We can install from a repo, from a local tarball, or an unpacked tarball
|
||||
|
||||
(the latter option is preferred when developing a chart)
|
||||
|
||||
---
|
||||
|
||||
## What's in a chart
|
||||
|
||||
- A chart must have at least:
|
||||
|
||||
- a `templates` directory, with YAML manifests for Kubernetes resources
|
||||
|
||||
- a `values.yaml` file, containing (tunable) parameters for the chart
|
||||
|
||||
- a `Chart.yaml` file, containing metadata (name, version, description ...)
|
||||
|
||||
- Let's look at a simple chart, `stable/tomcat`
|
||||
|
||||
---
|
||||
|
||||
## Downloading a chart
|
||||
|
||||
- We can use `helm pull` to download a chart from a repo
|
||||
|
||||
.exercise[
|
||||
|
||||
- Download the tarball for `stable/tomcat`:
|
||||
```bash
|
||||
helm pull stable/tomcat
|
||||
```
|
||||
(This will create a file named `tomcat-X.Y.Z.tgz`.)
|
||||
|
||||
- Or, download + untar `stable/tomcat`:
|
||||
```bash
|
||||
helm pull stable/tomcat --untar
|
||||
```
|
||||
(This will create a directory named `tomcat`.)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Looking at the chart's content
|
||||
|
||||
- Let's look at the files and directories in the `tomcat` chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display the tree structure of the chart we just downloaded:
|
||||
```bash
|
||||
tree tomcat
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We see the components mentioned above: `Chart.yaml`, `templates/`, `values.yaml`.
|
||||
|
||||
---
|
||||
|
||||
## Templates
|
||||
|
||||
- The `templates/` directory contains YAML manifests for Kubernetes resources
|
||||
|
||||
(Deployments, Services, etc.)
|
||||
|
||||
- These manifests can contain template tags
|
||||
|
||||
(using the standard Go template library)
|
||||
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the template file for the tomcat Service resource:
|
||||
```bash
|
||||
cat tomcat/templates/appsrv-svc.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Analyzing the template file
|
||||
|
||||
- Tags are identified by `{{ ... }}`
|
||||
|
||||
- `{{ template "x.y" }}` expands a [named template](https://helm.sh/docs/chart_template_guide/named_templates/#declaring-and-using-templates-with-define-and-template)
|
||||
|
||||
(previously defined with `{{ define "x.y "}}...stuff...{{ end }}`)
|
||||
|
||||
- The `.` in `{{ template "x.y" . }}` is the *context* for that named template
|
||||
|
||||
(so that the named template block can access variables from the local context)
|
||||
|
||||
- `{{ .Release.xyz }}` refers to [built-in variables](https://helm.sh/docs/chart_template_guide/builtin_objects/) initialized by Helm
|
||||
|
||||
(indicating the chart name, version, whether we are installing or upgrading ...)
|
||||
|
||||
- `{{ .Values.xyz }}` refers to tunable/settable [values](https://helm.sh/docs/chart_template_guide/values_files/)
|
||||
|
||||
(more on that in a minute)
|
||||
|
||||
---
|
||||
|
||||
## Values
|
||||
|
||||
- Each chart comes with a
|
||||
[values file](https://helm.sh/docs/chart_template_guide/values_files/)
|
||||
|
||||
- It's a YAML file containing a set of default parameters for the chart
|
||||
|
||||
- The values can be accessed in templates with e.g. `{{ .Values.x.y }}`
|
||||
|
||||
(corresponding to field `y` in map `x` in the values file)
|
||||
|
||||
- The values can be set or overridden when installing or ugprading a chart:
|
||||
|
||||
- with `--set x.y=z` (can be used multiple times to set multiple values)
|
||||
|
||||
- with `--values some-yaml-file.yaml` (set a bunch of values from a file)
|
||||
|
||||
- Charts following best practices will have values following specific patterns
|
||||
|
||||
(e.g. having a `service` map allowing to set `service.type` etc.)
|
||||
|
||||
---
|
||||
|
||||
## Other useful tags
|
||||
|
||||
- `{{ if x }} y {{ end }}` allows to include `y` if `x` evaluates to `true`
|
||||
|
||||
(can be used for e.g. healthchecks, annotations, or even an entire resource)
|
||||
|
||||
- `{{ range x }} y {{ end }}` iterates over `x`, evaluating `y` each time
|
||||
|
||||
(the elements of `x` are assigned to `.` in the range scope)
|
||||
|
||||
- `{{- x }}`/`{{ x -}}` will remove whitespace on the left/right
|
||||
|
||||
- The whole [Sprig](http://masterminds.github.io/sprig/) library, with additions:
|
||||
|
||||
`lower` `upper` `quote` `trim` `default` `b64enc` `b64dec` `sha256sum` `indent` `toYaml` ...
|
||||
|
||||
---
|
||||
|
||||
## Pipelines
|
||||
|
||||
- `{{ quote blah }}` can also be expressed as `{{ blah | quote }}`
|
||||
|
||||
- With multiple arguments, `{{ x y z }}` can be expressed as `{{ z | x y }}`)
|
||||
|
||||
- Example: `{{ .Values.annotations | toYaml | indent 4 }}`
|
||||
|
||||
- transforms the map under `annotations` into a YAML string
|
||||
|
||||
- indents it with 4 spaces (to match the surrounding context)
|
||||
|
||||
- Pipelines are not specific to Helm, but a feature of Go templates
|
||||
|
||||
(check the [Go text/template documentation](https://golang.org/pkg/text/template/) for more details and examples)
|
||||
|
||||
---
|
||||
|
||||
## README and NOTES.txt
|
||||
|
||||
- At the top-level of the chart, it's a good idea to have a README
|
||||
|
||||
- It will be viewable with e.g. `helm show readme stable/tomcat`
|
||||
|
||||
- In the `templates/` directory, we can also have a `NOTES.txt` file
|
||||
|
||||
- When the template is installed (or upgraded), `NOTES.txt` is processed too
|
||||
|
||||
(i.e. its `{{ ... }}` tags are evaluated)
|
||||
|
||||
- It gets displayed after the install or upgrade
|
||||
|
||||
- It's a great place to generate messages to tell the user:
|
||||
|
||||
- how to connect to the release they just deployed
|
||||
|
||||
- any passwords or other thing that we generated for them
|
||||
|
||||
---
|
||||
|
||||
## Additional files
|
||||
|
||||
- We can place arbitrary files in the chart (outside of the `templates/` directory)
|
||||
|
||||
- They can be accessed in templates with `.Files`
|
||||
|
||||
- They can be transformed into ConfigMaps or Secrets with `AsConfig` and `AsSecrets`
|
||||
|
||||
(see [this example](https://helm.sh/docs/chart_template_guide/accessing_files/#configmap-and-secrets-utility-functions) in the Helm docs)
|
||||
|
||||
---
|
||||
|
||||
## Hooks and tests
|
||||
|
||||
- We can define *hooks* in our templates
|
||||
|
||||
- Hooks are resources annotated with `"helm.sh/hook": NAME-OF-HOOK`
|
||||
|
||||
- Hook names include `pre-install`, `post-install`, `test`, [and much more](https://helm.sh/docs/topics/charts_hooks/#the-available-hooks)
|
||||
|
||||
- The resources defined in hooks are loaded at a specific time
|
||||
|
||||
- Hook execution is *synchronous*
|
||||
|
||||
(if the resource is a Job or Pod, Helm will wait for its completion)
|
||||
|
||||
- This can be use for database migrations, backups, notifications, smoke tests ...
|
||||
|
||||
- Hooks named `test` are executed only when running `helm test RELEASE-NAME`
|
||||
@@ -1,220 +0,0 @@
|
||||
# Creating a basic chart
|
||||
|
||||
- We are going to show a way to create a *very simplified* chart
|
||||
|
||||
- In a real chart, *lots of things* would be templatized
|
||||
|
||||
(Resource names, service types, number of replicas...)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a sample chart:
|
||||
```bash
|
||||
helm create dockercoins
|
||||
```
|
||||
|
||||
- Move away the sample templates and create an empty template directory:
|
||||
```bash
|
||||
mv dockercoins/templates dockercoins/default-templates
|
||||
mkdir dockercoins/templates
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Exporting the YAML for our application
|
||||
|
||||
- The following section assumes that DockerCoins is currently running
|
||||
|
||||
- If DockerCoins is not running, see next slide
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create one YAML file for each resource that we need:
|
||||
.small[
|
||||
```bash
|
||||
|
||||
while read kind name; do
|
||||
kubectl get -o yaml $kind $name > dockercoins/templates/$name-$kind.yaml
|
||||
done <<EOF
|
||||
deployment worker
|
||||
deployment hasher
|
||||
daemonset rng
|
||||
deployment webui
|
||||
deployment redis
|
||||
service hasher
|
||||
service rng
|
||||
service webui
|
||||
service redis
|
||||
EOF
|
||||
```
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Obtaining DockerCoins YAML
|
||||
|
||||
- If DockerCoins is not running, we can also obtain the YAML from a public repository
|
||||
|
||||
.exercise[
|
||||
|
||||
- Clone the kubercoins repository:
|
||||
```bash
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
- Copy the YAML files to the `templates/` directory:
|
||||
```bash
|
||||
cp kubercoins/*.yaml dockercoins/templates/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our helm chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's install our helm chart!
|
||||
```
|
||||
helm install helmcoins dockercoins
|
||||
```
|
||||
(`helmcoins` is the name of the release; `dockercoins` is the local path of the chart)
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- Since the application is already deployed, this will fail:
|
||||
```
|
||||
Error: rendered manifests contain a resource that already exists.
|
||||
Unable to continue with install: existing resource conflict:
|
||||
kind: Service, namespace: default, name: hasher
|
||||
```
|
||||
|
||||
- To avoid naming conflicts, we will deploy the application in another *namespace*
|
||||
|
||||
---
|
||||
|
||||
## Switching to another namespace
|
||||
|
||||
- We need create a new namespace
|
||||
|
||||
(Helm 2 creates namespaces automatically; Helm 3 doesn't anymore)
|
||||
|
||||
- We need to tell Helm which namespace to use
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace helmcoins
|
||||
```
|
||||
|
||||
- Deploy our chart in that namespace:
|
||||
```bash
|
||||
helm install helmcoins dockercoins --namespace=helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Helm releases are namespaced
|
||||
|
||||
- Let's try to see the release that we just deployed
|
||||
|
||||
.exercise[
|
||||
|
||||
- List Helm releases:
|
||||
```bash
|
||||
helm list
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Our release doesn't show up!
|
||||
|
||||
We have to specify its namespace (or switch to that namespace).
|
||||
|
||||
---
|
||||
|
||||
## Specifying the namespace
|
||||
|
||||
- Try again, with the correct namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- List Helm releases in `helmcoins`:
|
||||
```bash
|
||||
helm list --namespace=helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=helmcoins
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
|
||||
---
|
||||
|
||||
## Discussion, shortcomings
|
||||
|
||||
- Helm (and Kubernetes) best practices recommend to add a number of annotations
|
||||
|
||||
(e.g. `app.kubernetes.io/name`, `helm.sh/chart`, `app.kubernetes.io/instance` ...)
|
||||
|
||||
- Our basic chart doesn't have any of these
|
||||
|
||||
- Our basic chart doesn't use any template tag
|
||||
|
||||
- Does it make sense to use Helm in that case?
|
||||
|
||||
- *Yes,* because Helm will:
|
||||
|
||||
- track the resources created by the chart
|
||||
|
||||
- save successive revisions, allowing us to rollback
|
||||
|
||||
[Helm docs](https://helm.sh/docs/topics/chart_best_practices/labels/)
|
||||
and [Kubernetes docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/)
|
||||
have details about recommended annotations and labels.
|
||||
|
||||
---
|
||||
|
||||
## Cleaning up
|
||||
|
||||
- Let's remove that chart before moving on
|
||||
|
||||
.exercise[
|
||||
|
||||
- Delete the release (don't forget to specify the namespace):
|
||||
```bash
|
||||
helm delete helmcoins --namespace=helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
@@ -1,579 +0,0 @@
|
||||
# Creating better Helm charts
|
||||
|
||||
- We are going to create a chart with the helper `helm create`
|
||||
|
||||
- This will give us a chart implementing lots of Helm best practices
|
||||
|
||||
(labels, annotations, structure of the `values.yaml` file ...)
|
||||
|
||||
- We will use that chart as a generic Helm chart
|
||||
|
||||
- We will use it to deploy DockerCoins
|
||||
|
||||
- Each component of DockerCoins will have its own *release*
|
||||
|
||||
- In other words, we will "install" that Helm chart multiple times
|
||||
|
||||
(one time per component of DockerCoins)
|
||||
|
||||
---
|
||||
|
||||
## Creating a generic chart
|
||||
|
||||
- Rather than starting from scratch, we will use `helm create`
|
||||
|
||||
- This will give us a basic chart that we will customize
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a basic chart:
|
||||
```bash
|
||||
cd ~
|
||||
helm create helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
This creates a basic chart in the directory `helmcoins`.
|
||||
|
||||
---
|
||||
|
||||
## What's in the basic chart?
|
||||
|
||||
- The basic chart will create a Deployment and a Service
|
||||
|
||||
- Optionally, it will also include an Ingress
|
||||
|
||||
- If we don't pass any values, it will deploy the `nginx` image
|
||||
|
||||
- We can override many things in that chart
|
||||
|
||||
- Let's try to deploy DockerCoins components with that chart!
|
||||
|
||||
---
|
||||
|
||||
## Writing `values.yaml` for our components
|
||||
|
||||
- We need to write one `values.yaml` file for each component
|
||||
|
||||
(hasher, redis, rng, webui, worker)
|
||||
|
||||
- We will start with the `values.yaml` of the chart, and remove what we don't need
|
||||
|
||||
- We will create 5 files:
|
||||
|
||||
hasher.yaml, redis.yaml, rng.yaml, webui.yaml, worker.yaml
|
||||
|
||||
- In each file, we want to have:
|
||||
```yaml
|
||||
image:
|
||||
repository: IMAGE-REPOSITORY-NAME
|
||||
tag: IMAGE-TAG
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Getting started
|
||||
|
||||
- For component X, we want to use the image dockercoins/X:v0.1
|
||||
|
||||
(for instance, for rng, we want to use the image dockercoins/rng:v0.1)
|
||||
|
||||
- Exception: for redis, we want to use the official image redis:latest
|
||||
|
||||
.exercise[
|
||||
|
||||
- Write YAML files for the 5 components, with the following model:
|
||||
```yaml
|
||||
image:
|
||||
repository: `IMAGE-REPOSITORY-NAME` (e.g. dockercoins/worker)
|
||||
tag: `IMAGE-TAG` (e.g. v0.1)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying DockerCoins components
|
||||
|
||||
- For convenience, let's work in a separate namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace (if it doesn't already exist):
|
||||
```bash
|
||||
kubectl create namespace helmcoins
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the chart
|
||||
|
||||
- To install a chart, we can use the following command:
|
||||
```bash
|
||||
helm install COMPONENT-NAME CHART-DIRECTORY
|
||||
```
|
||||
|
||||
- We can also use the following command, which is idempotent:
|
||||
```bash
|
||||
helm upgrade COMPONENT-NAME CHART-DIRECTORY --install
|
||||
```
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the 5 components of DockerCoins:
|
||||
```bash
|
||||
for COMPONENT in hasher redis rng webui worker; do
|
||||
helm upgrade $COMPONENT helmcoins --install --values=$COMPONENT.yaml
|
||||
done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- Let's see if DockerCoins is working!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of the worker:
|
||||
```bash
|
||||
stern worker
|
||||
```
|
||||
|
||||
- Look at the resources that were created:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
There are *many* issues to fix!
|
||||
|
||||
---
|
||||
|
||||
## Can't pull image
|
||||
|
||||
- It looks like our images can't be found
|
||||
|
||||
.exercise[
|
||||
|
||||
- Use `kubectl describe` on any of the pods in error
|
||||
|
||||
]
|
||||
|
||||
- We're trying to pull `rng:1.16.0` instead of `rng:v0.1`!
|
||||
|
||||
- Where does that `1.16.0` tag come from?
|
||||
|
||||
---
|
||||
|
||||
## Inspecting our template
|
||||
|
||||
- Let's look at the `templates/` directory
|
||||
|
||||
(and try to find the one generating the Deployment resource)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Show the structure of the `helmcoins` chart that Helm generated:
|
||||
```bash
|
||||
tree helmcoins
|
||||
```
|
||||
|
||||
- Check the file `helmcoins/templates/deployment.yaml`
|
||||
|
||||
- Look for the `image:` parameter
|
||||
|
||||
]
|
||||
|
||||
*The image tag references `{{ .Chart.AppVersion }}`. Where does that come from?*
|
||||
|
||||
---
|
||||
|
||||
## The `.Chart` variable
|
||||
|
||||
- `.Chart` is a map corresponding to the values in `Chart.yaml`
|
||||
|
||||
- Let's look for `AppVersion` there!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the file `helmcoins/Chart.yaml`
|
||||
|
||||
- Look for the `appVersion:` parameter
|
||||
|
||||
]
|
||||
|
||||
(Yes, the case is different between the template and the Chart file.)
|
||||
|
||||
---
|
||||
|
||||
## Using the correct tags
|
||||
|
||||
- If we change `AppVersion` to `v0.1`, it will change for *all* deployments
|
||||
|
||||
(including redis)
|
||||
|
||||
- Instead, let's change the *template* to use `{{ .Values.image.tag }}`
|
||||
|
||||
(to match what we've specified in our values YAML files)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `helmcoins/templates/deployment.yaml`
|
||||
|
||||
- Replace `{{ .Chart.AppVersion }}` with `{{ .Values.image.tag }}`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Upgrading to use the new template
|
||||
|
||||
- Technically, we just made a new version of the *chart*
|
||||
|
||||
- To use the new template, we need to *upgrade* the release to use that chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Upgrade all components:
|
||||
```bash
|
||||
for COMPONENT in hasher redis rng webui worker; do
|
||||
helm upgrade $COMPONENT helmcoins
|
||||
done
|
||||
```
|
||||
|
||||
- Check how our pods are doing:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see all pods "Running". But ... not all of them are READY.
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting readiness
|
||||
|
||||
- `hasher`, `rng`, `webui` should show up as `1/1 READY`
|
||||
|
||||
- But `redis` and `worker` should show up as `0/1 READY`
|
||||
|
||||
- Why?
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting pods
|
||||
|
||||
- The easiest way to troubleshoot pods is to look at *events*
|
||||
|
||||
- We can look at all the events on the cluster (with `kubectl get events`)
|
||||
|
||||
- Or we can use `kubectl describe` on the objects that have problems
|
||||
|
||||
(`kubectl describe` will retrieve the events related to the object)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the events for the redis pods:
|
||||
```bash
|
||||
kubectl describe pod -l app.kubernetes.io/name=redis
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
It's failing both its liveness and readiness probes!
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks
|
||||
|
||||
- The default chart defines healthchecks doing HTTP requests on port 80
|
||||
|
||||
- That won't work for redis and worker
|
||||
|
||||
(redis is not HTTP, and not on port 80; worker doesn't even listen)
|
||||
|
||||
--
|
||||
|
||||
- We could remove or comment out the healthchecks
|
||||
|
||||
- We could also make them conditional
|
||||
|
||||
- This sounds more interesting, let's do that!
|
||||
|
||||
---
|
||||
|
||||
## Conditionals
|
||||
|
||||
- We need to enclose the healthcheck block with:
|
||||
|
||||
`{{ if false }}` at the beginning (we can change the condition later)
|
||||
|
||||
`{{ end }}` at the end
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `helmcoins/templates/deployment.yaml`
|
||||
|
||||
- Add `{{ if false }}` on the line before `livenessProbe`
|
||||
|
||||
- Add `{{ end }}` after the `readinessProbe` section
|
||||
|
||||
(see next slide for details)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
This is what the new YAML should look like (added lines in yellow):
|
||||
|
||||
```yaml
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
`{{ if false }}`
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
`{{ end }}`
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing the new chart
|
||||
|
||||
- We need to upgrade all the services again to use the new chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Upgrade all components:
|
||||
```bash
|
||||
for COMPONENT in hasher redis rng webui worker; do
|
||||
helm upgrade $COMPONENT helmcoins
|
||||
done
|
||||
```
|
||||
|
||||
- Check how our pods are doing:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Everything should now be running!
|
||||
|
||||
---
|
||||
|
||||
## What's next?
|
||||
|
||||
- Is this working now?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's check the logs of the worker:
|
||||
```bash
|
||||
stern worker
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
This error might look familiar ... The worker can't resolve `redis`.
|
||||
|
||||
Typically, that error means that the `redis` service doesn't exist.
|
||||
|
||||
---
|
||||
|
||||
## Checking services
|
||||
|
||||
- What about the services created by our chart?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the list of services:
|
||||
```bash
|
||||
kubectl get services
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
They are named `COMPONENT-helmcoins` instead of just `COMPONENT`.
|
||||
|
||||
We need to change that!
|
||||
|
||||
---
|
||||
|
||||
## Where do the service names come from?
|
||||
|
||||
- Look at the YAML template used for the services
|
||||
|
||||
- It should be using `{{ include "helmcoins.fullname" }}`
|
||||
|
||||
- `include` indicates a *template block* defined somewhere else
|
||||
|
||||
.exercise[
|
||||
|
||||
- Find where that `fullname` thing is defined:
|
||||
```bash
|
||||
grep define.*fullname helmcoins/templates/*
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
It should be in `_helpers.tpl`.
|
||||
|
||||
We can look at the definition, but it's fairly complex ...
|
||||
|
||||
---
|
||||
|
||||
## Changing service names
|
||||
|
||||
- Instead of that `{{ include }}` tag, let's use the name of the release
|
||||
|
||||
- The name of the release is available as `{{ .Release.Name }}`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `helmcoins/templates/service.yaml`
|
||||
|
||||
- Replace the service name with `{{ .Release.Name }}`
|
||||
|
||||
- Upgrade all the releases to use the new chart
|
||||
|
||||
- Confirm that the services now have the right names
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Is it working now?
|
||||
|
||||
- If we look at the worker logs, it appears that the worker is still stuck
|
||||
|
||||
- What could be happening?
|
||||
|
||||
--
|
||||
|
||||
- The redis service is not on port 80!
|
||||
|
||||
- Let's see how the port number is set
|
||||
|
||||
- We need to look at both the *deployment* template and the *service* template
|
||||
|
||||
---
|
||||
|
||||
## Service template
|
||||
|
||||
- In the service template, we have the following section:
|
||||
```yaml
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
```
|
||||
|
||||
- `port` is the port on which the service is "listening"
|
||||
|
||||
(i.e. to which our code needs to connect)
|
||||
|
||||
- `targetPort` is the port on which the pods are listening
|
||||
|
||||
- The `name` is not important (it's OK if it's `http` even for non-HTTP traffic)
|
||||
|
||||
---
|
||||
|
||||
## Setting the redis port
|
||||
|
||||
- Let's add a `service.port` value to the redis release
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `redis.yaml` to add:
|
||||
```yaml
|
||||
service:
|
||||
port: 6379
|
||||
```
|
||||
|
||||
- Apply the new values file:
|
||||
```bash
|
||||
helm upgrade redis helmcoins --values=redis.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deployment template
|
||||
|
||||
- If we look at the deployment template, we see this section:
|
||||
```yaml
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
```
|
||||
|
||||
- The container port is hard-coded to 80
|
||||
|
||||
- We'll change it to use the port number specified in the values
|
||||
|
||||
---
|
||||
|
||||
## Changing the deployment template
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `helmcoins/templates/deployment.yaml`
|
||||
|
||||
- The line with `containerPort` should be:
|
||||
```yaml
|
||||
containerPort: {{ .Values.service.port }}
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Apply changes
|
||||
|
||||
- Re-run the for loop to execute `helm upgrade` one more time
|
||||
|
||||
- Check the worker logs
|
||||
|
||||
- This time, it should be working!
|
||||
|
||||
---
|
||||
|
||||
## Extra steps
|
||||
|
||||
- We don't need to create a service for the worker
|
||||
|
||||
- We can put the whole service block in a conditional
|
||||
|
||||
(this will require additional changes in other files referencing the service)
|
||||
|
||||
- We can set the webui to be a NodePort service
|
||||
|
||||
- We can change the number of workers with `replicaCount`
|
||||
|
||||
- And much more!
|
||||
@@ -1,419 +0,0 @@
|
||||
# Managing stacks with Helm
|
||||
|
||||
- We created our first resources with `kubectl run`, `kubectl expose` ...
|
||||
|
||||
- We have also created resources by loading YAML files with `kubectl apply -f`
|
||||
|
||||
- For larger stacks, managing thousands of lines of YAML is unreasonable
|
||||
|
||||
- These YAML bundles need to be customized with variable parameters
|
||||
|
||||
(E.g.: number of replicas, image version to use ...)
|
||||
|
||||
- It would be nice to have an organized, versioned collection of bundles
|
||||
|
||||
- It would be nice to be able to upgrade/rollback these bundles carefully
|
||||
|
||||
- [Helm](https://helm.sh/) is an open source project offering all these things!
|
||||
|
||||
---
|
||||
|
||||
## Helm concepts
|
||||
|
||||
- `helm` is a CLI tool
|
||||
|
||||
- It is used to find, install, upgrade *charts*
|
||||
|
||||
- A chart is an archive containing templatized YAML bundles
|
||||
|
||||
- Charts are versioned
|
||||
|
||||
- Charts can be stored on private or public repositories
|
||||
|
||||
---
|
||||
|
||||
## Differences between charts and packages
|
||||
|
||||
- A package (deb, rpm...) contains binaries, libraries, etc.
|
||||
|
||||
- A chart contains YAML manifests
|
||||
|
||||
(the binaries, libraries, etc. are in the images referenced by the chart)
|
||||
|
||||
- On most distributions, a package can only be installed once
|
||||
|
||||
(installing another version replaces the installed one)
|
||||
|
||||
- A chart can be installed multiple times
|
||||
|
||||
- Each installation is called a *release*
|
||||
|
||||
- This allows to install e.g. 10 instances of MongoDB
|
||||
|
||||
(with potentially different versions and configurations)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Wait a minute ...
|
||||
|
||||
*But, on my Debian system, I have Python 2 **and** Python 3.
|
||||
<br/>
|
||||
Also, I have multiple versions of the Postgres database engine!*
|
||||
|
||||
Yes!
|
||||
|
||||
But they have different package names:
|
||||
|
||||
- `python2.7`, `python3.8`
|
||||
|
||||
- `postgresql-10`, `postgresql-11`
|
||||
|
||||
Good to know: the Postgres package in Debian includes
|
||||
provisions to deploy multiple Postgres servers on the
|
||||
same system, but it's an exception (and it's a lot of
|
||||
work done by the package maintainer, not by the `dpkg`
|
||||
or `apt` tools).
|
||||
|
||||
---
|
||||
|
||||
## Helm 2 vs Helm 3
|
||||
|
||||
- Helm 3 was released [November 13, 2019](https://helm.sh/blog/helm-3-released/)
|
||||
|
||||
- Charts remain compatible between Helm 2 and Helm 3
|
||||
|
||||
- The CLI is very similar (with minor changes to some commands)
|
||||
|
||||
- The main difference is that Helm 2 uses `tiller`, a server-side component
|
||||
|
||||
- Helm 3 doesn't use `tiller` at all, making it simpler (yay!)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## With or without `tiller`
|
||||
|
||||
- With Helm 3:
|
||||
|
||||
- the `helm` CLI communicates directly with the Kubernetes API
|
||||
|
||||
- it creates resources (deployments, services...) with our credentials
|
||||
|
||||
- With Helm 2:
|
||||
|
||||
- the `helm` CLI communicates with `tiller`, telling `tiller` what to do
|
||||
|
||||
- `tiller` then communicates with the Kubernetes API, using its own credentials
|
||||
|
||||
- This indirect model caused significant permissions headaches
|
||||
|
||||
(`tiller` required very broad permissions to function)
|
||||
|
||||
- `tiller` was removed in Helm 3 to simplify the security aspects
|
||||
|
||||
---
|
||||
|
||||
## Installing Helm
|
||||
|
||||
- If the `helm` CLI is not installed in your environment, install it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check if `helm` is installed:
|
||||
```bash
|
||||
helm
|
||||
```
|
||||
|
||||
- If it's not installed, run the following command:
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 \
|
||||
| bash
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(To install Helm 2, replace `get-helm-3` with `get`.)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Only if using Helm 2 ...
|
||||
|
||||
- We need to install Tiller and give it some permissions
|
||||
|
||||
- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace
|
||||
|
||||
- They can be managed (installed, upgraded...) with the `helm` CLI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy Tiller:
|
||||
```bash
|
||||
helm init
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
At the end of the install process, you will see:
|
||||
|
||||
```
|
||||
Happy Helming!
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Only if using Helm 2 ...
|
||||
|
||||
- Tiller needs permissions to create Kubernetes resources
|
||||
|
||||
- In a more realistic deployment, you might create per-user or per-team
|
||||
service accounts, roles, and role bindings
|
||||
|
||||
.exercise[
|
||||
|
||||
- Grant `cluster-admin` role to `kube-system:default` service account:
|
||||
```bash
|
||||
kubectl create clusterrolebinding add-on-cluster-admin \
|
||||
--clusterrole=cluster-admin --serviceaccount=kube-system:default
|
||||
```
|
||||
|
||||
|
||||
]
|
||||
|
||||
(Defining the exact roles and permissions on your cluster requires
|
||||
a deeper knowledge of Kubernetes' RBAC model. The command above is
|
||||
fine for personal and development clusters.)
|
||||
|
||||
---
|
||||
|
||||
## Charts and repositories
|
||||
|
||||
- A *repository* (or repo in short) is a collection of charts
|
||||
|
||||
- It's just a bunch of files
|
||||
|
||||
(they can be hosted by a static HTTP server, or on a local directory)
|
||||
|
||||
- We can add "repos" to Helm, giving them a nickname
|
||||
|
||||
- The nickname is used when referring to charts on that repo
|
||||
|
||||
(for instance, if we try to install `hello/world`, that
|
||||
means the chart `world` on the repo `hello`; and that repo
|
||||
`hello` might be something like https://blahblah.hello.io/charts/)
|
||||
|
||||
---
|
||||
|
||||
## Managing repositories
|
||||
|
||||
- Let's check what repositories we have, and add the `stable` repo
|
||||
|
||||
(the `stable` repo contains a set of official-ish charts)
|
||||
|
||||
.exercise[
|
||||
|
||||
- List our repos:
|
||||
```bash
|
||||
helm repo list
|
||||
```
|
||||
|
||||
- Add the `stable` repo:
|
||||
```bash
|
||||
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Adding a repo can take a few seconds (it downloads the list of charts from the repo).
|
||||
|
||||
It's OK to add a repo that already exists (it will merely update it).
|
||||
|
||||
---
|
||||
|
||||
## Search available charts
|
||||
|
||||
- We can search available charts with `helm search`
|
||||
|
||||
- We need to specify where to search (only our repos, or Helm Hub)
|
||||
|
||||
- Let's search for all charts mentioning tomcat!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Search for tomcat in the repo that we added earlier:
|
||||
```bash
|
||||
helm search repo tomcat
|
||||
```
|
||||
|
||||
- Search for tomcat on the Helm Hub:
|
||||
```bash
|
||||
helm search hub tomcat
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
[Helm Hub](https://hub.helm.sh/) indexes many repos, using the [Monocular](https://github.com/helm/monocular) server.
|
||||
|
||||
---
|
||||
|
||||
## Charts and releases
|
||||
|
||||
- "Installing a chart" means creating a *release*
|
||||
|
||||
- We need to name that release
|
||||
|
||||
(or use the `--generate-name` to get Helm to generate one for us)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the tomcat chart that we found earlier:
|
||||
```bash
|
||||
helm install java4ever stable/tomcat
|
||||
```
|
||||
|
||||
- List the releases:
|
||||
```bash
|
||||
helm list
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Searching and installing with Helm 2
|
||||
|
||||
- Helm 2 doesn't have support for the Helm Hub
|
||||
|
||||
- The `helm search` command only takes a search string argument
|
||||
|
||||
(e.g. `helm search tomcat`)
|
||||
|
||||
- With Helm 2, the name is optional:
|
||||
|
||||
`helm install stable/tomcat` will automatically generate a name
|
||||
|
||||
`helm install --name java4ever stable/tomcat` will specify a name
|
||||
|
||||
---
|
||||
|
||||
## Viewing resources of a release
|
||||
|
||||
- This specific chart labels all its resources with a `release` label
|
||||
|
||||
- We can use a selector to see these resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- List all the resources created by this release:
|
||||
```bash
|
||||
kubectl get all --selector=release=java4ever
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: this `release` label wasn't added automatically by Helm.
|
||||
<br/>
|
||||
It is defined in that chart. In other words, not all charts will provide this label.
|
||||
|
||||
---
|
||||
|
||||
## Configuring a release
|
||||
|
||||
- By default, `stable/tomcat` creates a service of type `LoadBalancer`
|
||||
|
||||
- We would like to change that to a `NodePort`
|
||||
|
||||
- We could use `kubectl edit service java4ever-tomcat`, but ...
|
||||
|
||||
... our changes would get overwritten next time we update that chart!
|
||||
|
||||
- Instead, we are going to *set a value*
|
||||
|
||||
- Values are parameters that the chart can use to change its behavior
|
||||
|
||||
- Values have default values
|
||||
|
||||
- Each chart is free to define its own values and their defaults
|
||||
|
||||
---
|
||||
|
||||
## Checking possible values
|
||||
|
||||
- We can inspect a chart with `helm show` or `helm inspect`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the README for tomcat:
|
||||
```bash
|
||||
helm show readme stable/tomcat
|
||||
```
|
||||
|
||||
- Look at the values and their defaults:
|
||||
```bash
|
||||
helm show values stable/tomcat
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The `values` may or may not have useful comments.
|
||||
|
||||
The `readme` may or may not have (accurate) explanations for the values.
|
||||
|
||||
(If we're unlucky, there won't be any indication about how to use the values!)
|
||||
|
||||
---
|
||||
|
||||
## Setting values
|
||||
|
||||
- Values can be set when installing a chart, or when upgrading it
|
||||
|
||||
- We are going to update `java4ever` to change the type of the service
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update `java4ever`:
|
||||
```bash
|
||||
helm upgrade java4ever stable/tomcat --set service.type=NodePort
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note that we have to specify the chart that we use (`stable/tomcat`),
|
||||
even if we just want to update some values.
|
||||
|
||||
We can set multiple values. If we want to set many values, we can use `-f`/`--values` and pass a YAML file with all the values.
|
||||
|
||||
All unspecified values will take the default values defined in the chart.
|
||||
|
||||
---
|
||||
|
||||
## Connecting to tomcat
|
||||
|
||||
- Let's check the tomcat server that we just installed
|
||||
|
||||
- Note: its readiness probe has a 60s delay
|
||||
|
||||
(so it will take 60s after the initial deployment before the service works)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the node port allocated to the service:
|
||||
```bash
|
||||
kubectl get service java4ever-tomcat
|
||||
PORT=$(kubectl get service java4ever-tomcat -o jsonpath={..nodePort})
|
||||
```
|
||||
|
||||
- Connect to it, checking the demo app on `/sample/`:
|
||||
```bash
|
||||
curl localhost:$PORT/sample/
|
||||
```
|
||||
|
||||
]
|
||||
@@ -1,234 +0,0 @@
|
||||
# Helm secrets
|
||||
|
||||
- Helm can do *rollbacks*:
|
||||
|
||||
- to previously installed charts
|
||||
|
||||
- to previous sets of values
|
||||
|
||||
- How and where does it store the data needed to do that?
|
||||
|
||||
- Let's investigate!
|
||||
|
||||
---
|
||||
|
||||
## We need a release
|
||||
|
||||
- We need to install something with Helm
|
||||
|
||||
- Let's use the `stable/tomcat` chart as an example
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install a release called `tomcat` with the chart `stable/tomcat`:
|
||||
```bash
|
||||
helm upgrade tomcat stable/tomcat --install
|
||||
```
|
||||
|
||||
- Let's upgrade that release, and change a value:
|
||||
```bash
|
||||
helm upgrade tomcat stable/tomcat --set ingress.enabled=true
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Release history
|
||||
|
||||
- Helm stores successive revisions of each release
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the history for that release:
|
||||
```bash
|
||||
helm history tomcat
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Where does that come from?
|
||||
|
||||
---
|
||||
|
||||
## Investigate
|
||||
|
||||
- Possible options:
|
||||
|
||||
- local filesystem (no, because history is visible from other machines)
|
||||
|
||||
- persistent volumes (no, Helm works even without them)
|
||||
|
||||
- ConfigMaps, Secrets?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look for ConfigMaps and Secrets:
|
||||
```bash
|
||||
kubectl get configmaps,secrets
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
We should see a number of secrets with TYPE `helm.sh/release.v1`.
|
||||
|
||||
---
|
||||
|
||||
## Unpacking a secret
|
||||
|
||||
- Let's find out what is in these Helm secrets
|
||||
|
||||
.exercise[
|
||||
|
||||
- Examine the secret corresponding to the second release of `tomcat`:
|
||||
```bash
|
||||
kubectl describe secret sh.helm.release.v1.tomcat.v2
|
||||
```
|
||||
(`v1` is the secret format; `v2` means revision 2 of the `tomcat` release)
|
||||
|
||||
]
|
||||
|
||||
There is a key named `release`.
|
||||
|
||||
---
|
||||
|
||||
## Unpacking the release data
|
||||
|
||||
- Let's see what's in this `release` thing!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Dump the secret:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
-o go-template='{{ .data.release }}'
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Secrets are encoded in base64. We need to decode that!
|
||||
|
||||
---
|
||||
|
||||
## Decoding base64
|
||||
|
||||
- We can pipe the output through `base64 -d` or use go-template's `base64decode`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Decode the secret:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
-o go-template='{{ .data.release | base64decode }}'
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
... Wait, this *still* looks like base64. What's going on?
|
||||
|
||||
--
|
||||
|
||||
Let's try one more round of decoding!
|
||||
|
||||
---
|
||||
|
||||
## Decoding harder
|
||||
|
||||
- Just add one more base64 decode filter
|
||||
|
||||
.exercise[
|
||||
|
||||
- Decode it twice:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
-o go-template='{{ .data.release | base64decode | base64decode }}'
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
... OK, that was *a lot* of binary data. What sould we do with it?
|
||||
|
||||
---
|
||||
|
||||
## Guessing data type
|
||||
|
||||
- We could use `file` to figure out the data type
|
||||
|
||||
.exercise[
|
||||
|
||||
- Pipe the decoded release through `file -`:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
-o go-template='{{ .data.release | base64decode | base64decode }}' \
|
||||
| file -
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
Gzipped data! It can be decoded with `gunzip -c`.
|
||||
|
||||
---
|
||||
|
||||
## Uncompressing the data
|
||||
|
||||
- Let's uncompress the data and save it to a file
|
||||
|
||||
.exercise[
|
||||
|
||||
- Rerun the previous command, but with `| gunzip -c > release-info` :
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
-o go-template='{{ .data.release | base64decode | base64decode }}' \
|
||||
| gunzip -c > release-info
|
||||
```
|
||||
|
||||
- Look at `release-info`:
|
||||
```bash
|
||||
cat release-info
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
It's a bundle of ~~YAML~~ JSON.
|
||||
|
||||
---
|
||||
|
||||
## Looking at the JSON
|
||||
|
||||
If we inspect that JSON (e.g. with `jq keys release-info`), we see:
|
||||
|
||||
- `chart` (contains the entire chart used for that release)
|
||||
- `config` (contains the values that we've set)
|
||||
- `info` (date of deployment, status messages)
|
||||
- `manifest` (YAML generated from the templates)
|
||||
- `name` (name of the release, so `tomcat`)
|
||||
- `namespace` (namespace where we deployed the release)
|
||||
- `version` (revision number within that release; starts at 1)
|
||||
|
||||
The chart is in a structured format, but it's entirely captured in this JSON.
|
||||
|
||||
---
|
||||
|
||||
## Conclusions
|
||||
|
||||
- Helm stores each release information in a Secret in the namespace of the release
|
||||
|
||||
- The secret is JSON object (gzipped and encoded in base64)
|
||||
|
||||
- It contains the manifests generated for that release
|
||||
|
||||
- ... And everything needed to rebuild these manifests
|
||||
|
||||
(including the full source of the chart, and the values used)
|
||||
|
||||
- This allows arbitrary rollbacks, as well as tweaking values even without having access to the source of the chart (or the chart repo) used for deployment
|
||||
178
slides/k8s/helm.md
Normal file
178
slides/k8s/helm.md
Normal file
@@ -0,0 +1,178 @@
|
||||
# Managing stacks with Helm
|
||||
|
||||
- We created our first resources with `kubectl run`, `kubectl expose` ...
|
||||
|
||||
- We have also created resources by loading YAML files with `kubectl apply -f`
|
||||
|
||||
- For larger stacks, managing thousands of lines of YAML is unreasonable
|
||||
|
||||
- These YAML bundles need to be customized with variable parameters
|
||||
|
||||
(E.g.: number of replicas, image version to use ...)
|
||||
|
||||
- It would be nice to have an organized, versioned collection of bundles
|
||||
|
||||
- It would be nice to be able to upgrade/rollback these bundles carefully
|
||||
|
||||
- [Helm](https://helm.sh/) is an open source project offering all these things!
|
||||
|
||||
---
|
||||
|
||||
## Helm concepts
|
||||
|
||||
- `helm` is a CLI tool
|
||||
|
||||
- `tiller` is its companion server-side component
|
||||
|
||||
- A "chart" is an archive containing templatized YAML bundles
|
||||
|
||||
- Charts are versioned
|
||||
|
||||
- Charts can be stored on private or public repositories
|
||||
|
||||
---
|
||||
|
||||
## Installing Helm
|
||||
|
||||
- If the `helm` CLI is not installed in your environment, install it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check if `helm` is installed:
|
||||
```bash
|
||||
helm
|
||||
```
|
||||
|
||||
- If it's not installed, run the following command:
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Installing Tiller
|
||||
|
||||
- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace
|
||||
|
||||
- They can be managed (installed, upgraded...) with the `helm` CLI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy Tiller:
|
||||
```bash
|
||||
helm init
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If Tiller was already installed, don't worry: this won't break it.
|
||||
|
||||
At the end of the install process, you will see:
|
||||
|
||||
```
|
||||
Happy Helming!
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Fix account permissions
|
||||
|
||||
- Helm permission model requires us to tweak permissions
|
||||
|
||||
- In a more realistic deployment, you might create per-user or per-team
|
||||
service accounts, roles, and role bindings
|
||||
|
||||
.exercise[
|
||||
|
||||
- Grant `cluster-admin` role to `kube-system:default` service account:
|
||||
```bash
|
||||
kubectl create clusterrolebinding add-on-cluster-admin \
|
||||
--clusterrole=cluster-admin --serviceaccount=kube-system:default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(Defining the exact roles and permissions on your cluster requires
|
||||
a deeper knowledge of Kubernetes' RBAC model. The command above is
|
||||
fine for personal and development clusters.)
|
||||
|
||||
---
|
||||
|
||||
## View available charts
|
||||
|
||||
- A public repo is pre-configured when installing Helm
|
||||
|
||||
- We can view available charts with `helm search` (and an optional keyword)
|
||||
|
||||
.exercise[
|
||||
|
||||
- View all available charts:
|
||||
```bash
|
||||
helm search
|
||||
```
|
||||
|
||||
- View charts related to `prometheus`:
|
||||
```bash
|
||||
helm search prometheus
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Install a chart
|
||||
|
||||
- Most charts use `LoadBalancer` service types by default
|
||||
|
||||
- Most charts require persistent volumes to store data
|
||||
|
||||
- We need to relax these requirements a bit
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the Prometheus metrics collector on our cluster:
|
||||
```bash
|
||||
helm install stable/prometheus \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.persistentVolume.enabled=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Where do these `--set` options come from?
|
||||
|
||||
---
|
||||
|
||||
## Inspecting a chart
|
||||
|
||||
- `helm inspect` shows details about a chart (including available options)
|
||||
|
||||
.exercise[
|
||||
|
||||
- See the metadata and all available options for `stable/prometheus`:
|
||||
```bash
|
||||
helm inspect stable/prometheus
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The chart's metadata includes a URL to the project's home page.
|
||||
|
||||
(Sometimes it conveniently points to the documentation for the chart.)
|
||||
|
||||
---
|
||||
|
||||
## Viewing installed charts
|
||||
|
||||
- Helm keeps track of what we've installed
|
||||
|
||||
.exercise[
|
||||
|
||||
- List installed Helm charts:
|
||||
```bash
|
||||
helm list
|
||||
```
|
||||
|
||||
]
|
||||
@@ -105,36 +105,19 @@
|
||||
|
||||
- Monitor pod CPU usage:
|
||||
```bash
|
||||
watch kubectl top pods -l app=busyhttp
|
||||
watch kubectl top pods
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait NAME```
|
||||
```tmux split-pane -v```
|
||||
```bash CLUSTERIP=$(kubectl get svc busyhttp -o jsonpath={.spec.clusterIP})```
|
||||
-->
|
||||
|
||||
- Monitor service latency:
|
||||
```bash
|
||||
httping http://`$CLUSTERIP`/
|
||||
httping http://`ClusterIP`/
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait connected to```
|
||||
```tmux split-pane -v```
|
||||
-->
|
||||
|
||||
- Monitor cluster events:
|
||||
```bash
|
||||
kubectl get events -w
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Normal```
|
||||
```tmux split-pane -v```
|
||||
```bash CLUSTERIP=$(kubectl get svc busyhttp -o jsonpath={.spec.clusterIP})```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
@@ -147,15 +130,9 @@
|
||||
|
||||
- Send a lot of requests to the service, with a concurrency level of 3:
|
||||
```bash
|
||||
ab -c 3 -n 100000 http://`$CLUSTERIP`/
|
||||
ab -c 3 -n 100000 http://`ClusterIP`/
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait be patient```
|
||||
```tmux split-pane -v```
|
||||
```tmux selectl even-vertical```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
The latency (reported by `httping`) should increase above 3s.
|
||||
@@ -216,20 +193,6 @@ This can also be set with `--cpu-percent=`.
|
||||
kubectl edit deployment busyhttp
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Please edit```
|
||||
```keys /resources```
|
||||
```key ^J```
|
||||
```keys $xxxo requests:```
|
||||
```key ^J```
|
||||
```key Space```
|
||||
```key Space```
|
||||
```keys cpu: "1"```
|
||||
```key Escape```
|
||||
```keys :wq```
|
||||
```key ^J```
|
||||
-->
|
||||
|
||||
- In the `containers` list, add the following block:
|
||||
```yaml
|
||||
resources:
|
||||
@@ -280,29 +243,3 @@ This can also be set with `--cpu-percent=`.
|
||||
- The metrics provided by metrics server are standard; everything else is custom
|
||||
|
||||
- For more details, see [this great blog post](https://medium.com/uptime-99/kubernetes-hpa-autoscaling-with-custom-and-external-metrics-da7f41ff7846) or [this talk](https://www.youtube.com/watch?v=gSiGFH4ZnS8)
|
||||
|
||||
---
|
||||
|
||||
## Cleanup
|
||||
|
||||
- Since `busyhttp` uses CPU cycles, let's stop it before moving on
|
||||
|
||||
.exercise[
|
||||
|
||||
- Delete the `busyhttp` Deployment:
|
||||
```bash
|
||||
kubectl delete deployment busyhttp
|
||||
```
|
||||
|
||||
<!--
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
@@ -120,13 +120,19 @@
|
||||
|
||||
- We want our ingress load balancer to be available on port 80
|
||||
|
||||
- The best way to do that would be with a `LoadBalancer` service
|
||||
- We could do that with a `LoadBalancer` service
|
||||
|
||||
... but it requires support from the underlying infrastructure
|
||||
|
||||
- Instead, we are going to use the `hostNetwork` mode on the Traefik pods
|
||||
- We could use pods specifying `hostPort: 80`
|
||||
|
||||
- Let's see what this `hostNetwork` mode is about ...
|
||||
... but with most CNI plugins, this [doesn't work or requires additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
|
||||
|
||||
- We could use a `NodePort` service
|
||||
|
||||
... but that requires [changing the `--service-node-port-range` flag in the API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/)
|
||||
|
||||
- Last resort: the `hostNetwork` mode
|
||||
|
||||
---
|
||||
|
||||
@@ -164,26 +170,6 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Other techniques to expose port 80
|
||||
|
||||
- We could use pods specifying `hostPort: 80`
|
||||
|
||||
... but with most CNI plugins, this [doesn't work or requires additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
|
||||
|
||||
- We could use a `NodePort` service
|
||||
|
||||
... but that requires [changing the `--service-node-port-range` flag in the API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/)
|
||||
|
||||
- We could create a service with an external IP
|
||||
|
||||
... this would work, but would require a few extra steps
|
||||
|
||||
(figuring out the IP address and adding it to the service)
|
||||
|
||||
---
|
||||
|
||||
## Running Traefik
|
||||
|
||||
- The [Traefik documentation](https://docs.traefik.io/user-guide/kubernetes/#deploy-trfik-using-a-deployment-or-daemonset) tells us to pick between Deployment and Daemon Set
|
||||
@@ -429,7 +415,7 @@ This is normal: we haven't provided any ingress rule yet.
|
||||
Here is a minimal host-based ingress resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: cheddar
|
||||
@@ -537,184 +523,4 @@ spec:
|
||||
|
||||
- This should eventually stabilize
|
||||
|
||||
(remember that ingresses are currently `apiVersion: networking.k8s.io/v1beta1`)
|
||||
|
||||
---
|
||||
|
||||
## A special feature in action
|
||||
|
||||
- We're going to see how to implement *canary releases* with Traefik
|
||||
|
||||
- This feature is available on multiple ingress controllers
|
||||
|
||||
- ... But it is configured very differently on each of them
|
||||
|
||||
---
|
||||
|
||||
## Canary releases
|
||||
|
||||
- A *canary release* (or canary launch or canary deployment) is a release that will process only a small fraction of the workload
|
||||
|
||||
- After deploying the canary, we compare its metrics to the normal release
|
||||
|
||||
- If the metrics look good, the canary will progressively receive more traffic
|
||||
|
||||
(until it gets 100% and becomes the new normal release)
|
||||
|
||||
- If the metrics aren't good, the canary is automatically removed
|
||||
|
||||
- When we deploy a bad release, only a tiny fraction of traffic is affected
|
||||
|
||||
---
|
||||
|
||||
## Various ways to implement canary
|
||||
|
||||
- Example 1: canary for a microservice
|
||||
|
||||
- 1% of all requests (sampled randomly) are sent to the canary
|
||||
- the remaining 99% are sent to the normal release
|
||||
|
||||
- Example 2: canary for a web app
|
||||
|
||||
- 1% of users are sent to the canary web site
|
||||
- the remaining 99% are sent to the normal release
|
||||
|
||||
- Example 3: canary for shipping physical goods
|
||||
|
||||
- 1% of orders are shipped with the canary process
|
||||
- the reamining 99% are shipped with the normal process
|
||||
|
||||
- We're going to implement example 1 (per-request routing)
|
||||
|
||||
---
|
||||
|
||||
## Canary releases with Traefik
|
||||
|
||||
- We need to deploy the canary and expose it with a separate service
|
||||
|
||||
- Then, in the Ingress resource, we need:
|
||||
|
||||
- multiple `paths` entries (one for each service, canary and normal)
|
||||
|
||||
- an extra annotation indicating the weight of each service
|
||||
|
||||
- If we want, we can send requests to more than 2 services
|
||||
|
||||
- Let's send requests to our 3 cheesy services!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the resource shown on the next slide
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## The Ingress resource
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: cheeseplate
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/service-weights: |
|
||||
cheddar: 50%
|
||||
wensleydale: 25%
|
||||
stilton: 25%
|
||||
spec:
|
||||
rules:
|
||||
- host: cheeseplate.`A.B.C.D`.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cheddar
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: wensledale
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: stilton
|
||||
servicePort: 80
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing the canary
|
||||
|
||||
- Let's check the percentage of requests going to each service
|
||||
|
||||
.exercise[
|
||||
|
||||
- Continuously send HTTP requests to the new ingress:
|
||||
```bash
|
||||
while sleep 0.1; do
|
||||
curl -s http://cheeseplate.A.B.C.D.nip.io/
|
||||
done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see a 50/25/25 request mix.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Load balancing fairness
|
||||
|
||||
Note: if we use odd request ratios, the load balancing algorithm might appear to be broken on a small scale (when sending a small number of requests), but on a large scale (with many requests) it will be fair.
|
||||
|
||||
For instance, with a 11%/89% ratio, we can see 79 requests going to the 89%-weighted service, and then requests alternating between the two services; then 79 requests again, etc.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Other ingress controllers
|
||||
|
||||
*Just to illustrate how different things are ...*
|
||||
|
||||
- With the NGINX ingress controller:
|
||||
|
||||
- define two ingress ressources
|
||||
<br/>
|
||||
(specifying rules with the same host+path)
|
||||
|
||||
- add `nginx.ingress.kubernetes.io/canary` annotations on each
|
||||
|
||||
|
||||
- With Linkerd2:
|
||||
|
||||
- define two services
|
||||
|
||||
- define an extra service for the weighted aggregate of the two
|
||||
|
||||
- define a TrafficSplit (this is a CRD introduced by the SMI spec)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## We need more than that
|
||||
|
||||
What we saw is just one of the multiple building blocks that we need to achieve a canary release.
|
||||
|
||||
We also need:
|
||||
|
||||
- metrics (latency, performance ...) for our releases
|
||||
|
||||
- automation to alter canary weights
|
||||
|
||||
(increase canary weight if metrics look good; decrease otherwise)
|
||||
|
||||
- a mechanism to manage the lifecycle of the canary releases
|
||||
|
||||
(create them, promote them, delete them ...)
|
||||
|
||||
For inspiration, check [flagger by Weave](https://github.com/weaveworks/flagger).
|
||||
(remember that ingresses are currently `apiVersion: extensions/v1beta1`)
|
||||
|
||||
@@ -1,131 +1,55 @@
|
||||
# Exposing containers
|
||||
|
||||
- We can connect to our pods using their IP address
|
||||
- `kubectl expose` creates a *service* for existing pods
|
||||
|
||||
- Then we need to figure out a lot of things:
|
||||
- A *service* is a stable address for a pod (or a bunch of pods)
|
||||
|
||||
- how do we look up the IP address of the pod(s)?
|
||||
- If we want to connect to our pod(s), we need to create a *service*
|
||||
|
||||
- how do we connect from outside the cluster?
|
||||
- Once a service is created, CoreDNS will allow us to resolve it by name
|
||||
|
||||
- how do we load balance traffic?
|
||||
(i.e. after creating service `hello`, the name `hello` will resolve to something)
|
||||
|
||||
- what if a pod fails?
|
||||
|
||||
- Kubernetes has a resource type named *Service*
|
||||
|
||||
- Services address all these questions!
|
||||
|
||||
---
|
||||
|
||||
## Services in a nutshell
|
||||
|
||||
- Services give us a *stable endpoint* to connect to a pod or a group of pods
|
||||
|
||||
- An easy way to create a service is to use `kubectl expose`
|
||||
|
||||
- If we have a deployment named `my-little-deploy`, we can run:
|
||||
|
||||
`kubectl expose deployment my-little-deploy --port=80`
|
||||
|
||||
... and this will create a service with the same name (`my-little-deploy`)
|
||||
|
||||
- Services are automatically added to an internal DNS zone
|
||||
|
||||
(in the example above, our code can now connect to http://my-little-deploy/)
|
||||
|
||||
---
|
||||
|
||||
## Advantages of services
|
||||
|
||||
- We don't need to look up the IP address of the pod(s)
|
||||
|
||||
(we resolve the IP address of the service using DNS)
|
||||
|
||||
- There are multiple service types; some of them allow external traffic
|
||||
|
||||
(e.g. `LoadBalancer` and `NodePort`)
|
||||
|
||||
- Services provide load balancing
|
||||
|
||||
(for both internal and external traffic)
|
||||
|
||||
- Service addresses are independent from pods' addresses
|
||||
|
||||
(when a pod fails, the service seamlessly sends traffic to its replacement)
|
||||
|
||||
---
|
||||
|
||||
## Many kinds and flavors of service
|
||||
|
||||
- There are different types of services:
|
||||
- There are different types of services, detailed on the following slides:
|
||||
|
||||
`ClusterIP`, `NodePort`, `LoadBalancer`, `ExternalName`
|
||||
|
||||
- There are also *headless services*
|
||||
---
|
||||
|
||||
- Services can also have optional *external IPs*
|
||||
## Basic service types
|
||||
|
||||
- There is also another resource type called *Ingress*
|
||||
- `ClusterIP` (default type)
|
||||
|
||||
(specifically for HTTP services)
|
||||
- a virtual IP address is allocated for the service (in an internal, private range)
|
||||
- this IP address is reachable only from within the cluster (nodes and pods)
|
||||
- our code can connect to the service using the original port number
|
||||
|
||||
- Wow, that's a lot! Let's start with the basics ...
|
||||
- `NodePort`
|
||||
|
||||
- a port is allocated for the service (by default, in the 30000-32768 range)
|
||||
- that port is made available *on all our nodes* and anybody can connect to it
|
||||
- our code must be changed to connect to that new port number
|
||||
|
||||
These service types are always available.
|
||||
|
||||
Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` rules.
|
||||
|
||||
---
|
||||
|
||||
## `ClusterIP`
|
||||
## More service types
|
||||
|
||||
- It's the default service type
|
||||
- `LoadBalancer`
|
||||
|
||||
- A virtual IP address is allocated for the service
|
||||
- an external load balancer is allocated for the service
|
||||
- the load balancer is configured accordingly
|
||||
<br/>(e.g.: a `NodePort` service is created, and the load balancer sends traffic to that port)
|
||||
- available only when the underlying infrastructure provides some "load balancer as a service"
|
||||
<br/>(e.g. AWS, Azure, GCE, OpenStack...)
|
||||
|
||||
(in an internal, private range; e.g. 10.96.0.0/12)
|
||||
- `ExternalName`
|
||||
|
||||
- This IP address is reachable only from within the cluster (nodes and pods)
|
||||
|
||||
- Our code can connect to the service using the original port number
|
||||
|
||||
- Perfect for internal communication, within the cluster
|
||||
|
||||
---
|
||||
|
||||
## `LoadBalancer`
|
||||
|
||||
- An external load balancer is allocated for the service
|
||||
|
||||
(typically a cloud load balancer, e.g. ELB on AWS, GLB on GCE ...)
|
||||
|
||||
- This is available only when the underlying infrastructure provides some kind of
|
||||
"load balancer as a service"
|
||||
|
||||
- Each service of that type will typically cost a little bit of money
|
||||
|
||||
(e.g. a few cents per hour on AWS or GCE)
|
||||
|
||||
- Ideally, traffic would flow directly from the load balancer to the pods
|
||||
|
||||
- In practice, it will often flow through a `NodePort` first
|
||||
|
||||
---
|
||||
|
||||
## `NodePort`
|
||||
|
||||
- A port number is allocated for the service
|
||||
|
||||
(by default, in the 30000-32767 range)
|
||||
|
||||
- That port is made available *on all our nodes* and anybody can connect to it
|
||||
|
||||
(we can connect to any node on that port to reach the service)
|
||||
|
||||
- Our code needs to be changed to connect to that new port number
|
||||
|
||||
- Under the hood: `kube-proxy` sets up a bunch of `iptables` rules on our nodes
|
||||
|
||||
- Sometimes, it's the only available option for external traffic
|
||||
|
||||
(e.g. most clusters deployed with kubeadm or on-premises)
|
||||
- the DNS entry managed by CoreDNS will just be a `CNAME` to a provided record
|
||||
- no port, no IP address, no nothing else is allocated
|
||||
|
||||
---
|
||||
|
||||
@@ -162,10 +86,7 @@
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait NAME```
|
||||
```tmux split-pane -h```
|
||||
-->
|
||||
<!-- ```keys ^C``` -->
|
||||
|
||||
- Create a deployment for this very lightweight HTTP server:
|
||||
```bash
|
||||
@@ -213,7 +134,9 @@
|
||||
|
||||
- As a result: you *have to* indicate the port number for your service
|
||||
|
||||
(with some exceptions, like `ExternalName` or headless services, covered later)
|
||||
- Running services with arbitrary port (or port ranges) requires hacks
|
||||
|
||||
(e.g. host networking mode)
|
||||
|
||||
---
|
||||
|
||||
@@ -230,8 +153,6 @@
|
||||
|
||||
<!--
|
||||
```hide kubectl wait deploy httpenv --for condition=available```
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
- Send a few requests:
|
||||
@@ -254,48 +175,7 @@ Try it a few times! Our requests are load balanced across multiple pods.
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `ExternalName`
|
||||
|
||||
- Services of type `ExternalName` are quite different
|
||||
|
||||
- No load balancer (internal or external) is created
|
||||
|
||||
- Only a DNS entry gets added to the DNS managed by Kubernetes
|
||||
|
||||
- That DNS entry will just be a `CNAME` to a provided record
|
||||
|
||||
Example:
|
||||
```bash
|
||||
kubectl create service externalname k8s --external-name kubernetes.io
|
||||
```
|
||||
*Creates a CNAME `k8s` pointing to `kubernetes.io`*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## External IPs
|
||||
|
||||
- We can add an External IP to a service, e.g.:
|
||||
```bash
|
||||
kubectl expose deploy my-little-deploy --port=80 --external-ip=1.2.3.4
|
||||
```
|
||||
|
||||
- `1.2.3.4` should be the address of one of our nodes
|
||||
|
||||
(it could also be a virtual address, service address, or VIP, shared by multiple nodes)
|
||||
|
||||
- Connections to `1.2.3.4:80` will be sent to our service
|
||||
|
||||
- External IPs will also show up on services of type `LoadBalancer`
|
||||
|
||||
(they will be added automatically by the process provisioning the load balancer)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Headless services
|
||||
## If we don't need a load balancer
|
||||
|
||||
- Sometimes, we want to access our scaled services directly:
|
||||
|
||||
@@ -315,7 +195,7 @@ class: extra-details
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Creating a headless services
|
||||
## Headless services
|
||||
|
||||
- A headless service is obtained by setting the `clusterIP` field to `None`
|
||||
|
||||
@@ -399,42 +279,18 @@ error: the server doesn't have a resource type "endpoint"
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
## Exposing services to the outside world
|
||||
|
||||
## The DNS zone
|
||||
- The default type (ClusterIP) only works for internal traffic
|
||||
|
||||
- In the `kube-system` namespace, there should be a service named `kube-dns`
|
||||
- If we want to accept external traffic, we can use one of these:
|
||||
|
||||
- This is the internal DNS server that can resolve service names
|
||||
- NodePort (expose a service on a TCP port between 30000-32768)
|
||||
|
||||
- The default domain name for the service we created is `default.svc.cluster.local`
|
||||
- LoadBalancer (provision a cloud load balancer for our service)
|
||||
|
||||
.exercise[
|
||||
- ExternalIP (use one node's external IP address)
|
||||
|
||||
- Get the IP address of the internal DNS server:
|
||||
```bash
|
||||
IP=$(kubectl -n kube-system get svc kube-dns -o jsonpath={.spec.clusterIP})
|
||||
```
|
||||
- Ingress (a special mechanism for HTTP services)
|
||||
|
||||
- Resolve the cluster IP for the `httpenv` service:
|
||||
```bash
|
||||
host httpenv.default.svc.cluster.local $IP
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `Ingress`
|
||||
|
||||
- Ingresses are another type (kind) of resource
|
||||
|
||||
- They are specifically for HTTP services
|
||||
|
||||
(not TCP or UDP)
|
||||
|
||||
- They can also handle TLS certificates, URL rewriting ...
|
||||
|
||||
- They require an *Ingress Controller* to function
|
||||
*We'll see NodePorts and Ingresses more in detail later.*
|
||||
|
||||
@@ -20,50 +20,6 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl` is the new SSH
|
||||
|
||||
- We often start managing servers with SSH
|
||||
|
||||
(installing packages, troubleshooting ...)
|
||||
|
||||
- At scale, it becomes tedious, repetitive, error-prone
|
||||
|
||||
- Instead, we use config management, central logging, etc.
|
||||
|
||||
- In many cases, we still need SSH:
|
||||
|
||||
- as the underlying access method (e.g. Ansible)
|
||||
|
||||
- to debug tricky scenarios
|
||||
|
||||
- to inspect and poke at things
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The parallel with `kubectl`
|
||||
|
||||
- We often start managing Kubernetes clusters with `kubectl`
|
||||
|
||||
(deploying applications, troubleshooting ...)
|
||||
|
||||
- At scale (with many applications or clusters), it becomes tedious, repetitive, error-prone
|
||||
|
||||
- Instead, we use automated pipelines, observability tooling, etc.
|
||||
|
||||
- In many cases, we still need `kubectl`:
|
||||
|
||||
- to debug tricky scenarios
|
||||
|
||||
- to inspect and poke at things
|
||||
|
||||
- The Kubernetes API is always the underlying access method
|
||||
|
||||
---
|
||||
|
||||
## `kubectl get`
|
||||
|
||||
- Let's look at our `Node` resources with `kubectl get`!
|
||||
@@ -115,7 +71,7 @@ class: extra-details
|
||||
|
||||
- Show the capacity of all our nodes as a stream of JSON objects:
|
||||
```bash
|
||||
kubectl get nodes -o json |
|
||||
kubectl get nodes -o json |
|
||||
jq ".items[] | {name:.metadata.name} + .status.capacity"
|
||||
```
|
||||
|
||||
@@ -226,6 +182,53 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Services
|
||||
|
||||
- A *service* is a stable endpoint to connect to "something"
|
||||
|
||||
(In the initial proposal, they were called "portals")
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the services on our cluster with one of these commands:
|
||||
```bash
|
||||
kubectl get services
|
||||
kubectl get svc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
There is already one service on our cluster: the Kubernetes API itself.
|
||||
|
||||
---
|
||||
|
||||
## ClusterIP services
|
||||
|
||||
- A `ClusterIP` service is internal, available from the cluster only
|
||||
|
||||
- This is useful for introspection from within containers
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to connect to the API:
|
||||
```bash
|
||||
curl -k https://`10.96.0.1`
|
||||
```
|
||||
|
||||
- `-k` is used to skip certificate verification
|
||||
|
||||
- Make sure to replace 10.96.0.1 with the CLUSTER-IP shown by `kubectl get svc`
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
The error that we see is expected: the Kubernetes API requires authentication.
|
||||
|
||||
---
|
||||
|
||||
## Listing running containers
|
||||
|
||||
- Containers are manipulated through *pods*
|
||||
@@ -464,117 +467,3 @@ class: extra-details
|
||||
|
||||
[KEP-0009]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0009-node-heartbeat.md
|
||||
[node controller documentation]: https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller
|
||||
|
||||
---
|
||||
|
||||
## Services
|
||||
|
||||
- A *service* is a stable endpoint to connect to "something"
|
||||
|
||||
(In the initial proposal, they were called "portals")
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the services on our cluster with one of these commands:
|
||||
```bash
|
||||
kubectl get services
|
||||
kubectl get svc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
There is already one service on our cluster: the Kubernetes API itself.
|
||||
|
||||
---
|
||||
|
||||
## ClusterIP services
|
||||
|
||||
- A `ClusterIP` service is internal, available from the cluster only
|
||||
|
||||
- This is useful for introspection from within containers
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to connect to the API:
|
||||
```bash
|
||||
curl -k https://`10.96.0.1`
|
||||
```
|
||||
|
||||
- `-k` is used to skip certificate verification
|
||||
|
||||
- Make sure to replace 10.96.0.1 with the CLUSTER-IP shown by `kubectl get svc`
|
||||
|
||||
]
|
||||
|
||||
The command above should either time out, or show an authentication error. Why?
|
||||
|
||||
---
|
||||
|
||||
## Time out
|
||||
|
||||
- Connections to ClusterIP services only work *from within the cluster*
|
||||
|
||||
- If we are outside the cluster, the `curl` command will probably time out
|
||||
|
||||
(Because the IP address, e.g. 10.96.0.1, isn't routed properly outside the cluster)
|
||||
|
||||
- This is the case with most "real" Kubernetes clusters
|
||||
|
||||
- To try the connection from within the cluster, we can use [shpod](https://github.com/jpetazzo/shpod)
|
||||
|
||||
---
|
||||
|
||||
## Authentication error
|
||||
|
||||
This is what we should see when connecting from within the cluster:
|
||||
```json
|
||||
$ curl -k https://10.96.0.1
|
||||
{
|
||||
"kind": "Status",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
|
||||
},
|
||||
"status": "Failure",
|
||||
"message": "forbidden: User \"system:anonymous\" cannot get path \"/\"",
|
||||
"reason": "Forbidden",
|
||||
"details": {
|
||||
|
||||
},
|
||||
"code": 403
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- We can see `kind`, `apiVersion`, `metadata`
|
||||
|
||||
- These are typical of a Kubernetes API reply
|
||||
|
||||
- Because we *are* talking to the Kubernetes API
|
||||
|
||||
- The Kubernetes API tells us "Forbidden"
|
||||
|
||||
(because it requires authentication)
|
||||
|
||||
- The Kubernetes API is reachable from within the cluster
|
||||
|
||||
(many apps integrating with Kubernetes will use this)
|
||||
|
||||
---
|
||||
|
||||
## DNS integration
|
||||
|
||||
- Each service also gets a DNS record
|
||||
|
||||
- The Kubernetes DNS resolver is available *from within pods*
|
||||
|
||||
(and sometimes, from within nodes, depending on configuration)
|
||||
|
||||
- Code running in pods can connect to services using their name
|
||||
|
||||
(e.g. https://kubernetes/...)
|
||||
|
||||
@@ -101,7 +101,7 @@ If we wanted to talk to the API, we would need to:
|
||||
|
||||
<!--
|
||||
```wait /version```
|
||||
```key ^J```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- Terminate the proxy:
|
||||
@@ -115,22 +115,6 @@ The output is a list of available API routes.
|
||||
|
||||
---
|
||||
|
||||
## OpenAPI (fka Swagger)
|
||||
|
||||
- The Kubernetes API serves an OpenAPI Specification
|
||||
|
||||
(OpenAPI was formerly known as Swagger)
|
||||
|
||||
- OpenAPI has many advantages
|
||||
|
||||
(generate client library code, generate test code ...)
|
||||
|
||||
- For us, this means we can explore the API with [Swagger UI](https://swagger.io/tools/swagger-ui/)
|
||||
|
||||
(for instance with the [Swagger UI add-on for Firefox](https://addons.mozilla.org/en-US/firefox/addon/swagger-ui-ff/))
|
||||
|
||||
---
|
||||
|
||||
## `kubectl proxy` is intended for local use
|
||||
|
||||
- By default, the proxy listens on port 8001
|
||||
@@ -151,8 +135,6 @@ The output is a list of available API routes.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running `kubectl proxy` on a remote machine
|
||||
|
||||
- If we wanted to connect to the proxy from another machine, we would need to:
|
||||
@@ -170,8 +152,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Security considerations
|
||||
|
||||
- Running `kubectl proxy` openly is a huge security risk
|
||||
|
||||
@@ -20,9 +20,10 @@
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's ping the address of `localhost`, the loopback interface:
|
||||
- Let's ping `1.1.1.1`, Cloudflare's
|
||||
[public DNS resolver](https://blog.cloudflare.com/announcing-1111/):
|
||||
```bash
|
||||
kubectl run pingpong --image alpine ping 127.0.0.1
|
||||
kubectl run pingpong --image alpine ping 1.1.1.1
|
||||
```
|
||||
|
||||
<!-- ```hide kubectl wait deploy/pingpong --for condition=available``` -->
|
||||
@@ -152,11 +153,9 @@ pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m
|
||||
kubectl logs deploy/pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
- Leave that command running, so that we can keep an eye on these logs
|
||||
|
||||
<!--
|
||||
```wait seq=3```
|
||||
```tmux split-pane -h```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
@@ -187,54 +186,6 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
|
||||
---
|
||||
|
||||
## Log streaming
|
||||
|
||||
- Let's look again at the output of `kubectl logs`
|
||||
|
||||
(the one we started before scaling up)
|
||||
|
||||
- `kubectl logs` shows us one line per second
|
||||
|
||||
- We could expect 3 lines per second
|
||||
|
||||
(since we should now have 3 pods running `ping`)
|
||||
|
||||
- Let's try to figure out what's happening!
|
||||
|
||||
---
|
||||
|
||||
## Streaming logs of multiple pods
|
||||
|
||||
- What happens if we restart `kubectl logs`?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Interrupt `kubectl logs` (with Ctrl-C)
|
||||
|
||||
<!--
|
||||
```tmux last-pane```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
- Restart it:
|
||||
```bash
|
||||
kubectl logs deploy/pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait using pod/pingpong-```
|
||||
```tmux last-pane```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
`kubectl logs` will warn us that multiple pods were found, and that it's showing us only one of them.
|
||||
|
||||
Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Resilience
|
||||
|
||||
- The *deployment* `pingpong` watches its *replica set*
|
||||
@@ -245,56 +196,27 @@ Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
.exercise[
|
||||
|
||||
- In a separate window, watch the list of pods:
|
||||
- In a separate window, list pods, and keep watching them:
|
||||
```bash
|
||||
watch kubectl get pods
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Every 2.0s```
|
||||
```tmux split-pane -v```
|
||||
```wait Running```
|
||||
```keys ^C```
|
||||
```hide kubectl wait deploy pingpong --for condition=available```
|
||||
```keys kubectl delete pod ping```
|
||||
```copypaste pong-..........-.....```
|
||||
-->
|
||||
|
||||
- Destroy the pod currently shown by `kubectl logs`:
|
||||
- Destroy a pod:
|
||||
```
|
||||
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
|
||||
```
|
||||
|
||||
<!--
|
||||
```tmux select-pane -t 0```
|
||||
```copy pingpong-[^-]*-.....```
|
||||
```tmux last-pane```
|
||||
```keys kubectl delete pod ```
|
||||
```paste```
|
||||
```key ^J```
|
||||
```check```
|
||||
```key ^D```
|
||||
```tmux select-pane -t 1```
|
||||
```key ^C```
|
||||
```key ^D```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What happened?
|
||||
|
||||
- `kubectl delete pod` terminates the pod gracefully
|
||||
|
||||
(sending it the TERM signal and waiting for it to shutdown)
|
||||
|
||||
- As soon as the pod is in "Terminating" state, the Replica Set replaces it
|
||||
|
||||
- But we can still see the output of the "Terminating" pod in `kubectl logs`
|
||||
|
||||
- Until 30 seconds later, when the grace period expires
|
||||
|
||||
- The pod is then killed, and `kubectl logs` exits
|
||||
|
||||
---
|
||||
|
||||
|
||||
## What if we wanted something different?
|
||||
|
||||
- What if we wanted to start a "one-shot" container that *doesn't* get restarted?
|
||||
@@ -312,73 +234,6 @@ Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
---
|
||||
|
||||
## Scheduling periodic background work
|
||||
|
||||
- A Cron Job is a job that will be executed at specific intervals
|
||||
|
||||
(the name comes from the traditional cronjobs executed by the UNIX crond)
|
||||
|
||||
- It requires a *schedule*, represented as five space-separated fields:
|
||||
|
||||
- minute [0,59]
|
||||
- hour [0,23]
|
||||
- day of the month [1,31]
|
||||
- month of the year [1,12]
|
||||
- day of the week ([0,6] with 0=Sunday)
|
||||
|
||||
- `*` means "all valid values"; `/N` means "every N"
|
||||
|
||||
- Example: `*/3 * * * *` means "every three minutes"
|
||||
|
||||
---
|
||||
|
||||
## Creating a Cron Job
|
||||
|
||||
- Let's create a simple job to be executed every three minutes
|
||||
|
||||
- Cron Jobs need to terminate, otherwise they'd run forever
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the Cron Job:
|
||||
```bash
|
||||
kubectl run every3mins --schedule="*/3 * * * *" --restart=OnFailure \
|
||||
--image=alpine sleep 10
|
||||
```
|
||||
|
||||
- Check the resource that was created:
|
||||
```bash
|
||||
kubectl get cronjobs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cron Jobs in action
|
||||
|
||||
- At the specified schedule, the Cron Job will create a Job
|
||||
|
||||
- The Job will create a Pod
|
||||
|
||||
- The Job will make sure that the Pod completes
|
||||
|
||||
(re-creating another one if it fails, for instance if its node fails)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the Jobs that are created:
|
||||
```bash
|
||||
kubectl get jobs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(It will take a few minutes before the first job is scheduled.)
|
||||
|
||||
---
|
||||
|
||||
|
||||
## What about that deprecation warning?
|
||||
|
||||
- As we can see from the previous slide, `kubectl run` can do many things
|
||||
@@ -402,12 +257,12 @@ Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
## Various ways of creating resources
|
||||
|
||||
- `kubectl run`
|
||||
- `kubectl run`
|
||||
|
||||
- easy way to get started
|
||||
- versatile
|
||||
|
||||
- `kubectl create <resource>`
|
||||
- `kubectl create <resource>`
|
||||
|
||||
- explicit, but lacks some features
|
||||
- can't create a CronJob before Kubernetes 1.14
|
||||
@@ -454,7 +309,7 @@ Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
<!--
|
||||
```wait seq=```
|
||||
```key ^C```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
@@ -483,8 +338,6 @@ class: extra-details
|
||||
kubectl logs -l run=pingpong --tail 1 -f
|
||||
```
|
||||
|
||||
<!-- ```wait error:``` -->
|
||||
|
||||
]
|
||||
|
||||
We see a message like the following one:
|
||||
@@ -553,36 +406,15 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
## Aren't we flooding 1.1.1.1?
|
||||
|
||||
## Party tricks involving IP addresses
|
||||
- If you're wondering this, good question!
|
||||
|
||||
- It is possible to specify an IP address with less than 4 bytes
|
||||
- Don't worry, though:
|
||||
|
||||
(example: `127.1`)
|
||||
*APNIC's research group held the IP addresses 1.1.1.1 and 1.0.0.1. While the addresses were valid, so many people had entered them into various random systems that they were continuously overwhelmed by a flood of garbage traffic. APNIC wanted to study this garbage traffic but any time they'd tried to announce the IPs, the flood would overwhelm any conventional network.*
|
||||
|
||||
- Zeroes are then inserted in the middle
|
||||
(Source: https://blog.cloudflare.com/announcing-1111/)
|
||||
|
||||
- As a result, `127.1` expands to `127.0.0.1`
|
||||
|
||||
- So we can `ping 127.1` to ping `localhost`!
|
||||
|
||||
(See [this blog post](https://ma.ttias.be/theres-more-than-one-way-to-write-an-ip-address/
|
||||
) for more details.)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## More party tricks with IP addresses
|
||||
|
||||
- We can also ping `1.1`
|
||||
|
||||
- `1.1` will expand to `1.0.0.1`
|
||||
|
||||
- This is one of the addresses of Cloudflare's
|
||||
[public DNS resolver](https://blog.cloudflare.com/announcing-1111/)
|
||||
|
||||
- This is a quick way to check connectivity
|
||||
|
||||
(if we can reach 1.1, we probably have internet access)
|
||||
- It's very unlikely that our concerted pings manage to produce
|
||||
even a modest blip at Cloudflare's NOC!
|
||||
|
||||
@@ -12,9 +12,9 @@
|
||||
|
||||
<!--
|
||||
```wait RESTARTS```
|
||||
```key ^C```
|
||||
```keys ^C```
|
||||
```wait AVAILABLE```
|
||||
```key ^C```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
- Now, create more `worker` replicas:
|
||||
|
||||
@@ -97,8 +97,6 @@
|
||||
ship init https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
<!-- ```wait Open browser``` -->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
@@ -191,11 +189,6 @@
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=kustomcoins
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait units of work done```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user