Compare commits
181 Commits
2020-10-en
...
2021-03-lk
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1292168d4e | ||
|
|
220103f2fd | ||
|
|
372eb2e717 | ||
|
|
6185ad6ff3 | ||
|
|
ee9c114da0 | ||
|
|
edf496df13 | ||
|
|
018f06a409 | ||
|
|
c283d7e7d6 | ||
|
|
cd9f1cc645 | ||
|
|
d74a331a05 | ||
|
|
53a3c8a86a | ||
|
|
9c3ab19918 | ||
|
|
a8ecffbaf0 | ||
|
|
e75e4d7f2c | ||
|
|
84c33b9eae | ||
|
|
e606cd2b21 | ||
|
|
d217e52ab5 | ||
|
|
f3c3646298 | ||
|
|
f25bf60d46 | ||
|
|
6ab11ca91c | ||
|
|
a5d857edd4 | ||
|
|
25d6073b17 | ||
|
|
216fefad23 | ||
|
|
f3eb9ce12f | ||
|
|
a484425c81 | ||
|
|
67806fc592 | ||
|
|
cfcf874bac | ||
|
|
858afc846c | ||
|
|
629b4d1037 | ||
|
|
58f2894b54 | ||
|
|
df1db67e53 | ||
|
|
068c81bdcd | ||
|
|
911d78aede | ||
|
|
305674fa3c | ||
|
|
6bdc687cc7 | ||
|
|
49e3a0b75f | ||
|
|
5acb05dfff | ||
|
|
edaef92b35 | ||
|
|
63fccb495f | ||
|
|
055c8a7267 | ||
|
|
f72847bc81 | ||
|
|
4be82f4f57 | ||
|
|
cb760dbe94 | ||
|
|
f306749f68 | ||
|
|
8d20fa4654 | ||
|
|
249d446ef2 | ||
|
|
fe84dec863 | ||
|
|
ce8dc2cdff | ||
|
|
bc33f1f5df | ||
|
|
8597ca1956 | ||
|
|
2300d0719b | ||
|
|
2e6230a9a0 | ||
|
|
ae17c2479c | ||
|
|
23f7e8cff9 | ||
|
|
f72cf16c82 | ||
|
|
6ec8849da1 | ||
|
|
6c11de207a | ||
|
|
2295e4f3de | ||
|
|
18853b2497 | ||
|
|
426957bdca | ||
|
|
6bc08c0a7e | ||
|
|
88d4e5ff54 | ||
|
|
e3e4d04202 | ||
|
|
be6d982e2c | ||
|
|
04bc8a9f60 | ||
|
|
b0dc1c7c3f | ||
|
|
bb1b225026 | ||
|
|
2160aa7f40 | ||
|
|
8f75a4cd7f | ||
|
|
45213a8f2e | ||
|
|
f03aedd024 | ||
|
|
fcfcb127b4 | ||
|
|
5380b2d52a | ||
|
|
cc5da860b9 | ||
|
|
9e9b17f6c9 | ||
|
|
b9ea938157 | ||
|
|
b23aacdce0 | ||
|
|
c3d6e5e660 | ||
|
|
907adf8075 | ||
|
|
dff505ac76 | ||
|
|
df0ffc4d75 | ||
|
|
02278b3748 | ||
|
|
ab959220ba | ||
|
|
b4576e39d0 | ||
|
|
894dafeecb | ||
|
|
366c656d82 | ||
|
|
a60f929232 | ||
|
|
fdc58cafda | ||
|
|
fc170fe4a7 | ||
|
|
8de186b909 | ||
|
|
b816d075d4 | ||
|
|
6303b67b86 | ||
|
|
4f3bb9beb2 | ||
|
|
1f34da55b3 | ||
|
|
f30792027f | ||
|
|
74679ab77e | ||
|
|
71ce2eb31a | ||
|
|
eb96dd21bb | ||
|
|
b1adca025d | ||
|
|
e82d2812aa | ||
|
|
9c8c3ef537 | ||
|
|
2f2948142a | ||
|
|
2516b2d32b | ||
|
|
42f4b65c87 | ||
|
|
989a62b5ff | ||
|
|
b5eb59ab80 | ||
|
|
10920509c3 | ||
|
|
955149e019 | ||
|
|
111ff30c38 | ||
|
|
6c038a5d33 | ||
|
|
6737a20840 | ||
|
|
1d1060a319 | ||
|
|
93e9a60634 | ||
|
|
de2c0e72c3 | ||
|
|
41204c948b | ||
|
|
553b1f7871 | ||
|
|
bd168f7676 | ||
|
|
3a527649d1 | ||
|
|
ecbbcf8b51 | ||
|
|
29edb1aefe | ||
|
|
bd3c91f342 | ||
|
|
fa709f0cb4 | ||
|
|
543b44fb29 | ||
|
|
536a9cc44b | ||
|
|
2ff3d88bab | ||
|
|
295ee9b6b4 | ||
|
|
17c5f6de01 | ||
|
|
556dbb965c | ||
|
|
32250f8053 | ||
|
|
bdede6de07 | ||
|
|
eefdc21488 | ||
|
|
e145428910 | ||
|
|
76789b6113 | ||
|
|
f9660ba9dc | ||
|
|
c2497508f8 | ||
|
|
b5d3b213b1 | ||
|
|
b4c76ad11d | ||
|
|
b251ff3812 | ||
|
|
ede4ea0dd5 | ||
|
|
2ab06c6dfd | ||
|
|
3a01deb039 | ||
|
|
b88f63e1f7 | ||
|
|
918311ac51 | ||
|
|
73e8110f09 | ||
|
|
ecb5106d59 | ||
|
|
e4d8cd4952 | ||
|
|
c4aedbd327 | ||
|
|
2fb3584b1b | ||
|
|
cb90cc9a1e | ||
|
|
bf28dff816 | ||
|
|
b5cb871c69 | ||
|
|
aa8f538574 | ||
|
|
ebf2e23785 | ||
|
|
0553a1ba8b | ||
|
|
9d47177028 | ||
|
|
9d4a035497 | ||
|
|
6fe74cb35c | ||
|
|
43aa41ed51 | ||
|
|
f6e810f648 | ||
|
|
4c710d6826 | ||
|
|
410c98399e | ||
|
|
19c9843a81 | ||
|
|
69d084e04a | ||
|
|
1300d76890 | ||
|
|
0040313371 | ||
|
|
c9e04b906d | ||
|
|
41f66f4144 | ||
|
|
aced587fd0 | ||
|
|
749b3d1648 | ||
|
|
c40cc71bbc | ||
|
|
69b775ef27 | ||
|
|
3bfc14c5f7 | ||
|
|
97984af8a2 | ||
|
|
9b31c45899 | ||
|
|
c0db28d439 | ||
|
|
0e49bfa837 | ||
|
|
fc9c0a6285 | ||
|
|
d4914fa168 | ||
|
|
e4edd9445c | ||
|
|
ba7deefce5 | ||
|
|
be104f1b44 |
49
dockercoins/Tiltfile
Normal file
@@ -0,0 +1,49 @@
|
||||
k8s_yaml(blob('''
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
name: registry
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
containers:
|
||||
- image: registry
|
||||
name: registry
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
name: registry
|
||||
spec:
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
targetPort: 5000
|
||||
nodePort: 30555
|
||||
selector:
|
||||
app: registry
|
||||
type: NodePort
|
||||
'''))
|
||||
default_registry('localhost:30555')
|
||||
docker_build('dockercoins/hasher', 'hasher')
|
||||
docker_build('dockercoins/rng', 'rng')
|
||||
docker_build('dockercoins/webui', 'webui')
|
||||
docker_build('dockercoins/worker', 'worker')
|
||||
k8s_yaml('../k8s/dockercoins.yaml')
|
||||
|
||||
# Uncomment the following line to let tilt run with the default kubeadm cluster-admin context.
|
||||
#allow_k8s_contexts('kubernetes-admin@kubernetes')
|
||||
|
||||
# While we're here: if you're controlling a remote cluster, uncomment that line.
|
||||
# It will create a port forward so that you can access the remote registry.
|
||||
#k8s_resource(workload='registry', port_forwards='30555:5000')
|
||||
11
k8s/cm-certificate.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: xyz.A.B.C.D.nip.io
|
||||
spec:
|
||||
secretName: xyz.A.B.C.D.nip.io
|
||||
dnsNames:
|
||||
- xyz.A.B.C.D.nip.io
|
||||
issuerRef:
|
||||
name: letsencrypt-staging
|
||||
kind: ClusterIssuer
|
||||
18
k8s/cm-clusterissuer.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
# Remember to update this if you use this manifest to obtain real certificates :)
|
||||
email: hello@example.com
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
# To use the production environment, use the following line instead:
|
||||
#server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: issuer-letsencrypt-staging
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
|
||||
@@ -62,11 +62,8 @@ spec:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
@@ -88,7 +85,4 @@ spec:
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
|
||||
@@ -69,11 +69,8 @@ spec:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- persistentconsul
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
@@ -98,7 +95,4 @@ spec:
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
|
||||
336
k8s/dashboard-with-token.yaml
Normal file
@@ -0,0 +1,336 @@
|
||||
# This file is based on the following manifest:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
# It adds a ServiceAccount that has cluster-admin privileges on the cluster,
|
||||
# and exposes the dashboard on a NodePort. It makes it easier to do quick demos
|
||||
# of the Kubernetes dashboard, without compromising the security too much.
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-cluster-admin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
30
k8s/event-node.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
kind: Event
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
generateName: hello-
|
||||
labels:
|
||||
container.training/test: ""
|
||||
|
||||
#eventTime: "2020-07-04T00:00:00.000000Z"
|
||||
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
|
||||
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
|
||||
#count: 42
|
||||
|
||||
involvedObject:
|
||||
kind: Node
|
||||
apiVersion: v1
|
||||
name: kind-control-plane
|
||||
# Note: the uid should be the Node name (not the uid of the Node).
|
||||
# This might be specific to global objects.
|
||||
uid: kind-control-plane
|
||||
|
||||
type: Warning
|
||||
reason: NodeOverheat
|
||||
message: "Node temperature exceeds critical threshold"
|
||||
action: Hello
|
||||
source:
|
||||
component: thermal-probe
|
||||
#host: node1
|
||||
#reportingComponent: ""
|
||||
#reportingInstance: ""
|
||||
|
||||
36
k8s/event-pod.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
kind: Event
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
# One convention is to use <objectname>.<timestamp>,
|
||||
# where the timestamp is taken with a nanosecond
|
||||
# precision and expressed in hexadecimal.
|
||||
# Example: web-5dcb957ccc-fjvzc.164689730a36ec3d
|
||||
name: hello.1234567890
|
||||
# The label doesn't serve any purpose, except making
|
||||
# it easier to identify or delete that specific event.
|
||||
labels:
|
||||
container.training/test: ""
|
||||
|
||||
#eventTime: "2020-07-04T00:00:00.000000Z"
|
||||
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
|
||||
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
|
||||
#count: 42
|
||||
|
||||
involvedObject:
|
||||
### These 5 lines should be updated to refer to an object.
|
||||
### Make sure to put the correct "uid", because it is what
|
||||
### "kubectl describe" is using to gather relevant events.
|
||||
#apiVersion: v1
|
||||
#kind: Pod
|
||||
#name: magic-bean
|
||||
#namespace: blue
|
||||
#uid: 7f28fda8-6ef4-4580-8d87-b55721fcfc30
|
||||
|
||||
type: Normal
|
||||
reason: BackupSuccessful
|
||||
message: "Object successfully dumped to gitops repository"
|
||||
source:
|
||||
component: gitops-sync
|
||||
#reportingComponent: ""
|
||||
#reportingInstance: ""
|
||||
|
||||
29
k8s/hpa-v2-pa-httplat.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
kind: HorizontalPodAutoscaler
|
||||
apiVersion: autoscaling/v2beta2
|
||||
metadata:
|
||||
name: rng
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: rng
|
||||
minReplicas: 1
|
||||
maxReplicas: 20
|
||||
behavior:
|
||||
scaleUp:
|
||||
stabilizationWindowSeconds: 60
|
||||
scaleDown:
|
||||
stabilizationWindowSeconds: 180
|
||||
metrics:
|
||||
- type: Object
|
||||
object:
|
||||
describedObject:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
name: httplat
|
||||
metric:
|
||||
name: httplat_latency_seconds
|
||||
target:
|
||||
type: Value
|
||||
value: 0.1
|
||||
|
||||
63
k8s/kyverno-namespace-setup.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: setup-namespace
|
||||
spec:
|
||||
rules:
|
||||
- name: setup-limitrange
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: LimitRange
|
||||
name: default-limitrange
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
limits:
|
||||
- type: Container
|
||||
min:
|
||||
cpu: 0.1
|
||||
memory: 0.1
|
||||
max:
|
||||
cpu: 2
|
||||
memory: 2Gi
|
||||
default:
|
||||
cpu: 0.25
|
||||
memory: 500Mi
|
||||
defaultRequest:
|
||||
cpu: 0.25
|
||||
memory: 250Mi
|
||||
- name: setup-resourcequota
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: ResourceQuota
|
||||
name: default-resourcequota
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
hard:
|
||||
requests.cpu: "10"
|
||||
requests.memory: 10Gi
|
||||
limits.cpu: "20"
|
||||
limits.memory: 20Gi
|
||||
- name: setup-networkpolicy
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: NetworkPolicy
|
||||
name: default-networkpolicy
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
|
||||
22
k8s/kyverno-pod-color-1.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-1
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: ensure-pod-color-is-valid
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
selector:
|
||||
matchExpressions:
|
||||
- key: color
|
||||
operator: Exists
|
||||
- key: color
|
||||
operator: NotIn
|
||||
values: [ red, green, blue ]
|
||||
validate:
|
||||
message: "If it exists, the label color must be red, green, or blue."
|
||||
deny: {}
|
||||
21
k8s/kyverno-pod-color-2.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-2
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Once label color has been added, it cannot be changed."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotEqual
|
||||
value: "{{ request.object.metadata.labels.color }}"
|
||||
|
||||
25
k8s/kyverno-pod-color-3.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-3
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-removal
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
selector:
|
||||
matchExpressions:
|
||||
- key: color
|
||||
operator: DoesNotExist
|
||||
validate:
|
||||
message: "Once label color has been added, it cannot be removed."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotIn
|
||||
value: []
|
||||
|
||||
24
k8s/openebs-pod.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: openebs-local-hostpath-pod
|
||||
spec:
|
||||
volumes:
|
||||
- name: storage
|
||||
persistentVolumeClaim:
|
||||
claimName: local-hostpath-pvc
|
||||
containers:
|
||||
- name: better
|
||||
image: alpine
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
while true; do
|
||||
echo "$(date) [$(hostname)] Kubernetes is better with PVs." >> /mnt/storage/greet.txt
|
||||
sleep $(($RANDOM % 5 + 20))
|
||||
done
|
||||
volumeMounts:
|
||||
- mountPath: /mnt/storage
|
||||
name: storage
|
||||
|
||||
@@ -5,8 +5,8 @@ metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
17
k8s/test.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
@@ -98,6 +98,15 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
|
||||
@@ -3,8 +3,6 @@ apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
@@ -26,8 +24,6 @@ apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
@@ -49,8 +45,6 @@ apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
|
||||
13
prepare-eks/10_create_cluster.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
# Create an EKS cluster.
|
||||
# This is not idempotent (each time you run it, it creates a new cluster).
|
||||
|
||||
eksctl create cluster \
|
||||
--node-type=t3.large \
|
||||
--nodes-max=10 \
|
||||
--alb-ingress-access \
|
||||
--asg-access \
|
||||
--ssh-access \
|
||||
--with-oidc \
|
||||
#
|
||||
|
||||
32
prepare-eks/20_create_users.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
# For each user listed in "users.txt", create an IAM user.
|
||||
# Also create AWS API access keys, and store them in "users.keys".
|
||||
# This is idempotent (you can run it multiple times, it will only
|
||||
# create the missing users). However, it will not remove users.
|
||||
# Note that you can remove users from "users.keys" (or even wipe
|
||||
# that file out entirely) and then this script will delete their
|
||||
# keys and generate new keys for them (and add the new keys to
|
||||
# "users.keys".)
|
||||
|
||||
echo "Getting list of existing users ..."
|
||||
aws iam list-users --output json | jq -r .Users[].UserName > users.tmp
|
||||
|
||||
for U in $(cat users.txt); do
|
||||
if ! grep -qw $U users.tmp; then
|
||||
echo "Creating user $U..."
|
||||
aws iam create-user --user-name=$U \
|
||||
--tags=Key=container.training,Value=1
|
||||
fi
|
||||
if ! grep -qw $U users.keys; then
|
||||
echo "Listing keys for user $U..."
|
||||
KEYS=$(aws iam list-access-keys --user=$U | jq -r .AccessKeyMetadata[].AccessKeyId)
|
||||
for KEY in $KEYS; do
|
||||
echo "Deleting key $KEY for user $U..."
|
||||
aws iam delete-access-key --user=$U --access-key-id=$KEY
|
||||
done
|
||||
echo "Creating access key for user $U..."
|
||||
aws iam create-access-key --user=$U --output json \
|
||||
| jq -r '.AccessKey | [ .UserName, .AccessKeyId, .SecretAccessKey ] | @tsv' \
|
||||
>> users.keys
|
||||
fi
|
||||
done
|
||||
51
prepare-eks/30_create_or_update_policy.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
# Create an IAM policy to authorize users to do "aws eks update-kubeconfig".
|
||||
# This is idempotent, which allows to update the policy document below if
|
||||
# you want the users to do other things as well.
|
||||
# Note that each time you run this script, it will actually create a new
|
||||
# version of the policy, set that version as the default version, and
|
||||
# remove all non-default versions. (Because you can only have up to
|
||||
# 5 versions of a given policy, so you need to clean them up.)
|
||||
# After running that script, you will want to attach the policy to our
|
||||
# users (check the other scripts in that directory).
|
||||
|
||||
POLICY_NAME=user.container.training
|
||||
POLICY_DOC='{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"eks:DescribeCluster"
|
||||
],
|
||||
"Resource": "arn:aws:eks:*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
}'
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
|
||||
aws iam create-policy-version \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--policy-document "$POLICY_DOC" \
|
||||
--set-as-default
|
||||
|
||||
# For reference, the command below creates a policy without versioning:
|
||||
#aws iam create-policy \
|
||||
#--policy-name user.container.training \
|
||||
#--policy-document "$JSON"
|
||||
|
||||
for VERSION in $(
|
||||
aws iam list-policy-versions \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--query 'Versions[?!IsDefaultVersion].VersionId' \
|
||||
--output text)
|
||||
do
|
||||
aws iam delete-policy-version \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--version-id "$VERSION"
|
||||
done
|
||||
|
||||
# For reference, the command below shows all users using the policy:
|
||||
#aws iam list-entities-for-policy \
|
||||
#--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
14
prepare-eks/40_attach_policy.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
# Attach our user policy to all the users defined in "users.txt".
|
||||
# This should be idempotent, because attaching the same policy
|
||||
# to the same user multiple times doesn't do anything.
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
POLICY_NAME=user.container.training
|
||||
|
||||
for U in $(cat users.txt); do
|
||||
echo "Attaching policy to user $U ..."
|
||||
aws iam attach-user-policy \
|
||||
--user-name $U \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
done
|
||||
24
prepare-eks/50_aws_auth.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
# Update the aws-auth ConfigMap to map our IAM users to Kubernetes users.
|
||||
# Each user defined in "users.txt" will be mapped to a Kubernetes user
|
||||
# with the same name, and put in the "container.training" group, too.
|
||||
# This is idempotent.
|
||||
# WARNING: this will wipe out the mapUsers component of the aws-auth
|
||||
# ConfigMap, removing all users that aren't in "users.txt".
|
||||
# It won't touch mapRoles, so it shouldn't break the role mappings
|
||||
# put in place by EKS.
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
|
||||
rm -f users.map
|
||||
for U in $(cat users.txt); do
|
||||
echo "\
|
||||
- userarn: arn:aws:iam::$ACCOUNT:user/$U
|
||||
username: $U
|
||||
groups: [ container.training ]\
|
||||
" >> users.map
|
||||
done
|
||||
|
||||
kubectl create --namespace=kube-system configmap aws-auth \
|
||||
--dry-run=client --from-file=mapUsers=users.map -o yaml \
|
||||
| kubectl apply -f-
|
||||
65
prepare-eks/60_setup_rbac_and_ns.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/bin/sh
|
||||
# Create a shared Kubernetes Namespace ("container-training") as well as
|
||||
# individual namespaces for every user in "users.txt", and set up a bunch
|
||||
# of permissions.
|
||||
# Specifically:
|
||||
# - each user gets "view" permissions in the "default" Namespace
|
||||
# - each user gets "edit" permissions in the "container-training" Namespace
|
||||
# - each user gets permissions to list Nodes and Namespaces
|
||||
# - each user gets "admin" permissions in their personal Namespace
|
||||
# Note that since Kubernetes Namespaces can't have dots in their names,
|
||||
# if a user has dots, dots will be mapped to dashes.
|
||||
# So user "ada.lovelace" will get namespace "ada-lovelace".
|
||||
# This is kind of idempotent (but will raise a bunch of errors for objects
|
||||
# that already exist).
|
||||
# TODO: if this needs to evolve, replace all the "create" operations by
|
||||
# "apply" operations. But this is good enough for now.
|
||||
|
||||
kubectl create rolebinding --namespace default container.training \
|
||||
--group=container.training --clusterrole=view
|
||||
|
||||
kubectl create clusterrole view-nodes \
|
||||
--verb=get,list,watch --resource=node
|
||||
kubectl create clusterrolebinding view-nodes \
|
||||
--group=container.training --clusterrole=view-nodes
|
||||
|
||||
kubectl create clusterrole view-namespaces \
|
||||
--verb=get,list,watch --resource=namespace
|
||||
kubectl create clusterrolebinding view-namespaces \
|
||||
--group=container.training --clusterrole=view-namespaces
|
||||
|
||||
kubectl create namespace container-training
|
||||
kubectl create rolebinding --namespace container-training edit \
|
||||
--group=container.training --clusterrole=edit
|
||||
|
||||
# Note: API calls to EKS tend to be fairly slow. To optimize things a bit,
|
||||
# instead of running "kubectl" N times, we generate a bunch of YAML and
|
||||
# apply it. It will still generate a lot of API calls but it's much faster
|
||||
# than calling "kubectl" N times. It might be possible to make this even
|
||||
# faster by generating a "kind: List" (I don't know if this would issue
|
||||
# a single API calls or multiple ones; TBD!)
|
||||
for U in $(cat users.txt); do
|
||||
NS=$(echo $U | tr . -)
|
||||
cat <<EOF
|
||||
---
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: $NS
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: admin
|
||||
namespace: $NS
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: $U
|
||||
EOF
|
||||
done | kubectl create -f-
|
||||
|
||||
76
prepare-eks/70_oidc.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/sh
|
||||
# Create an IAM role to be used by a Kubernetes ServiceAccount.
|
||||
# The role isn't given any permissions yet (this has to be done by
|
||||
# another script in this series), but a properly configured Pod
|
||||
# should still be able to execute "aws sts get-caller-identity"
|
||||
# and confirm that it's using that role.
|
||||
# This requires the cluster to have an attached OIDC provider.
|
||||
# This should be the case if the cluster has been created with
|
||||
# the scripts in this directory; otherwise, this can be done with
|
||||
# the subsequent command, which is idempotent:
|
||||
# eksctl utils associate-iam-oidc-provider --cluster cluster-name-12341234 --approve
|
||||
# The policy document used below will authorize all ServiceAccounts
|
||||
# in the "container-training" Namespace to use that role.
|
||||
# This script will also annotate the container-training:default
|
||||
# ServiceAccount so that it can use that role.
|
||||
# This script is not quite idempotent: if you want to use a new
|
||||
# trust policy, some work will be required. (You can delete the role,
|
||||
# but that requires detaching the associated policies. There might also
|
||||
# be a way to update the trust policy directly; we didn't investigate this
|
||||
# further at this point.)
|
||||
|
||||
if [ "$1" ]; then
|
||||
CLUSTER="$1"
|
||||
else
|
||||
echo "Please indicate cluster to use. Available clusters:"
|
||||
aws eks list-clusters --output table
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
OIDC=$(aws eks describe-cluster --name $CLUSTER --query cluster.identity.oidc.issuer --output text | cut -d/ -f3-)
|
||||
ROLE_NAME=s3-reader-container-training
|
||||
TRUST_POLICY=$(envsubst <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Federated": "arn:aws:iam::${ACCOUNT}:oidc-provider/${OIDC}"
|
||||
},
|
||||
"Action": "sts:AssumeRoleWithWebIdentity",
|
||||
"Condition": {
|
||||
"StringLike": {
|
||||
"${OIDC}:sub": ["system:serviceaccount:container-training:*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
aws iam create-role \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--assume-role-policy-document "$TRUST_POLICY"
|
||||
|
||||
kubectl annotate serviceaccounts \
|
||||
--namespace container-training default \
|
||||
"eks.amazonaws.com/role-arn=arn:aws:iam::$ACCOUNT:role/$ROLE_NAME" \
|
||||
--overwrite
|
||||
|
||||
exit
|
||||
|
||||
# Here are commands to delete the role:
|
||||
for POLICY_ARN in $(aws iam list-attached-role-policies --role-name $ROLE_NAME --query 'AttachedPolicies[*].PolicyArn' --output text); do aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn $POLICY_ARN; done
|
||||
aws iam delete-role --role-name $ROLE_NAME
|
||||
|
||||
# Merging the policy with the existing policies:
|
||||
{
|
||||
aws iam get-role --role-name s3-reader-container-training | jq -r .Role.AssumeRolePolicyDocument.Statement[]
|
||||
echo "$TRUST_POLICY" | jq -r .Statement[]
|
||||
} | jq -s '{"Version": "2012-10-17", "Statement": .}' > /tmp/policy.json
|
||||
aws iam update-assume-role-policy \
|
||||
--role-name $ROLE_NAME \
|
||||
--policy-document file:///tmp/policy.json
|
||||
54
prepare-eks/80_s3_bucket.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/sh
|
||||
# Create an S3 bucket with two objects in it:
|
||||
# - public.txt (world-readable)
|
||||
# - private.txt (private)
|
||||
# Also create an IAM policy granting read-only access to the bucket
|
||||
# (and therefore, to the private object).
|
||||
# Finally, attach the policy to an IAM role (for instance, the role
|
||||
# created by another script in this directory).
|
||||
# This isn't idempotent, but it can be made idempotent by replacing the
|
||||
# "aws iam create-policy" call with "aws iam create-policy-version" and
|
||||
# a bit of extra elbow grease. (See other scripts in this directory for
|
||||
# an example).
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
BUCKET=container.training
|
||||
ROLE_NAME=s3-reader-container-training
|
||||
POLICY_NAME=s3-reader-container-training
|
||||
POLICY_DOC=$(envsubst <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket",
|
||||
"s3:GetObject*"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::$BUCKET",
|
||||
"arn:aws:s3:::$BUCKET/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
aws iam create-policy \
|
||||
--policy-name $POLICY_NAME \
|
||||
--policy-doc "$POLICY_DOC"
|
||||
|
||||
aws s3 mb s3://container.training
|
||||
|
||||
echo "this is a public object" \
|
||||
| aws s3 cp - s3://container.training/public.txt \
|
||||
--acl public-read
|
||||
|
||||
echo "this is a private object" \
|
||||
| aws s3 cp - s3://container.training/private.txt \
|
||||
--acl private
|
||||
|
||||
aws iam attach-role-policy \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
50
prepare-eks/users.txt
Normal file
@@ -0,0 +1,50 @@
|
||||
ada.lovelace
|
||||
adele.goldstine
|
||||
amanda.jones
|
||||
anita.borg
|
||||
ann.kiessling
|
||||
barbara.mcclintock
|
||||
beatrice.worsley
|
||||
bessie.blount
|
||||
betty.holberton
|
||||
beulah.henry
|
||||
carleen.hutchins
|
||||
caroline.herschel
|
||||
dona.bailey
|
||||
dorothy.hodgkin
|
||||
ellen.ochoa
|
||||
edith.clarke
|
||||
elisha.collier
|
||||
elizabeth.feinler
|
||||
emily.davenport
|
||||
erna.hoover
|
||||
frances.spence
|
||||
gertrude.blanch
|
||||
grace.hopper
|
||||
grete.hermann
|
||||
giuliana.tesoro
|
||||
harriet.tubman
|
||||
hedy.lamarr
|
||||
irma.wyman
|
||||
jane.goodall
|
||||
jean.bartik
|
||||
joy.mangano
|
||||
josephine.cochrane
|
||||
katherine.blodgett
|
||||
kathleen.antonelli
|
||||
lynn.conway
|
||||
margaret.hamilton
|
||||
maria.beasley
|
||||
marie.curie
|
||||
marjorie.joyner
|
||||
marlyn.meltzer
|
||||
mary.kies
|
||||
melitta.bentz
|
||||
milly.koss
|
||||
radia.perlman
|
||||
rosalind.franklin
|
||||
ruth.teitelbaum
|
||||
sarah.mather
|
||||
sophie.wilson
|
||||
stephanie.kwolek
|
||||
yvonne.brill
|
||||
@@ -1 +1,3 @@
|
||||
INFRACLASS=scaleway
|
||||
#SCW_INSTANCE_TYPE=DEV1-L
|
||||
#SCW_ZONE=fr-par-2
|
||||
|
||||
@@ -69,11 +69,14 @@ _cmd_deploy() {
|
||||
echo deploying > tags/$TAG/status
|
||||
sep "Deploying tag $TAG"
|
||||
|
||||
# Wait for cloudinit to be done
|
||||
# If this VM image is using cloud-init,
|
||||
# wait for cloud-init to be done
|
||||
pssh "
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done"
|
||||
if [ -d /var/lib/cloud ]; then
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi"
|
||||
|
||||
# Special case for scaleway since it doesn't come with sudo
|
||||
if [ "$INFRACLASS" = "scaleway" ]; then
|
||||
@@ -102,6 +105,12 @@ _cmd_deploy() {
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install -y python-yaml"
|
||||
|
||||
# If there is no "python" binary, symlink to python3
|
||||
#pssh "
|
||||
#if ! which python; then
|
||||
# ln -s $(which python3) /usr/local/bin/python
|
||||
#fi"
|
||||
|
||||
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
|
||||
pssh -I tee /tmp/postprep.py <lib/postprep.py
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
|
||||
@@ -167,7 +176,7 @@ _cmd_kubebins() {
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
##VERSION##
|
||||
curl -L https://dl.k8s.io/v1.18.8/kubernetes-server-linux-amd64.tar.gz \
|
||||
curl -L https://dl.k8s.io/v1.18.10/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx \
|
||||
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
|
||||
fi
|
||||
@@ -204,9 +213,18 @@ _cmd_kube() {
|
||||
pssh --timeout 200 "
|
||||
sudo apt-get update -q &&
|
||||
sudo apt-get install -qy kubelet$EXTRA_APTGET kubeadm$EXTRA_APTGET kubectl$EXTRA_APTGET &&
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
|
||||
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
|
||||
|
||||
# Initialize kube master
|
||||
# Disable swap
|
||||
# (note that this won't survive across node reboots!)
|
||||
if [ "$INFRACLASS" = "linode" ]; then
|
||||
pssh "
|
||||
sudo swapoff -a"
|
||||
fi
|
||||
|
||||
# Initialize kube control plane
|
||||
pssh --timeout 200 "
|
||||
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token &&
|
||||
@@ -243,6 +261,12 @@ _cmd_kube() {
|
||||
if i_am_first_node; then
|
||||
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd kubetools "Install a bunch of CLI tools for Kubernetes"
|
||||
_cmd_kubetools() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# Install kubectx and kubens
|
||||
pssh "
|
||||
@@ -304,7 +328,54 @@ EOF"
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
# Install the krew package manager
|
||||
pssh "
|
||||
if [ ! -d /home/docker/.krew ]; then
|
||||
cd /tmp &&
|
||||
curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.tar.gz |
|
||||
tar -zxf- &&
|
||||
sudo -u docker -H ./krew-linux_amd64 install krew &&
|
||||
echo export PATH=/home/docker/.krew/bin:\\\$PATH | sudo -u docker tee -a /home/docker/.bashrc
|
||||
fi"
|
||||
|
||||
# Install k9s and popeye
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/k9s ]; then
|
||||
FILENAME=k9s_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin k9s
|
||||
fi
|
||||
if [ ! -x /usr/local/bin/popeye ]; then
|
||||
FILENAME=popeye_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin popeye
|
||||
fi"
|
||||
|
||||
# Install Tilt
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/tilt ]; then
|
||||
curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
|
||||
fi"
|
||||
|
||||
# Install Skaffold
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/skaffold ]; then
|
||||
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 &&
|
||||
sudo install skaffold /usr/local/bin/
|
||||
fi"
|
||||
|
||||
# Install Kompose
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kompose ]; then
|
||||
curl -Lo kompose https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-amd64 &&
|
||||
sudo install kompose /usr/local/bin
|
||||
fi"
|
||||
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubeseal ]; then
|
||||
curl -Lo kubeseal https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.13.1/kubeseal-linux-amd64 &&
|
||||
sudo install kubeseal /usr/local/bin
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
|
||||
@@ -493,6 +564,17 @@ _cmd_remap_nodeports() {
|
||||
if i_am_first_node && ! grep -q '$ADD_LINE' $MANIFEST_FILE; then
|
||||
sudo sed -i 's/\($FIND_LINE\)\$/\1\n$ADD_LINE/' $MANIFEST_FILE
|
||||
fi"
|
||||
|
||||
info "If you have manifests hard-coding nodePort values,"
|
||||
info "you might want to patch them with a command like:"
|
||||
info "
|
||||
|
||||
if i_am_first_node; then
|
||||
kubectl -n kube-system patch svc prometheus-server \\
|
||||
-p 'spec: { ports: [ {port: 80, nodePort: 10101} ]}'
|
||||
fi
|
||||
|
||||
"
|
||||
}
|
||||
|
||||
_cmd quotas "Check our infrastructure quotas (max instances)"
|
||||
@@ -516,7 +598,7 @@ _cmd_start() {
|
||||
case "$1" in
|
||||
--infra) INFRA=$2; shift 2;;
|
||||
--settings) SETTINGS=$2; shift 2;;
|
||||
--count) COUNT=$2; shift 2;;
|
||||
--count) die "Flag --count is deprecated; please use --students instead." ;;
|
||||
--tag) TAG=$2; shift 2;;
|
||||
--students) STUDENTS=$2; shift 2;;
|
||||
*) die "Unrecognized parameter: $1."
|
||||
@@ -590,6 +672,8 @@ _cmd_start() {
|
||||
done
|
||||
sep
|
||||
info "Deployment successful."
|
||||
info "To log into the first machine of that batch, you can run:"
|
||||
info "$0 ssh $TAG"
|
||||
info "To terminate these instances, you can run:"
|
||||
info "$0 stop $TAG"
|
||||
}
|
||||
@@ -653,8 +737,8 @@ _cmd_helmprom() {
|
||||
need_tag
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
sudo -u docker -H helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
sudo -u docker -H helm install prometheus stable/prometheus \
|
||||
sudo -u docker -H helm repo add prometheus-community https://prometheus-community.github.io/helm-charts/
|
||||
sudo -u docker -H helm install prometheus prometheus-community/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
|
||||
58
prepare-vms/lib/infra/linode.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
if ! command -v linode-cli >/dev/null; then
|
||||
warn "Linode CLI (linode-cli) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/linode-cli ]; then
|
||||
warn "~/.config/linode-cli not found."
|
||||
fi
|
||||
|
||||
# To view available regions: "linode-cli regions list"
|
||||
LINODE_REGION=${LINODE_REGION-us-west}
|
||||
|
||||
# To view available types: "linode-cli linodes types"
|
||||
LINODE_TYPE=${LINODE_TYPE-g6-standard-2}
|
||||
|
||||
infra_list() {
|
||||
linode-cli linodes list --json |
|
||||
jq -r '.[] | [.id, .label, .status, .type] | @tsv'
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
for I in $(seq 1 $COUNT); do
|
||||
NAME=$(printf "%s-%03d" $TAG $I)
|
||||
sep "Starting instance $I/$COUNT"
|
||||
info " Zone: $LINODE_REGION"
|
||||
info " Name: $NAME"
|
||||
info " Instance type: $LINODE_TYPE"
|
||||
ROOT_PASS="$(base64 /dev/urandom | cut -c1-20 | head -n 1)"
|
||||
linode-cli linodes create \
|
||||
--type=${LINODE_TYPE} --region=${LINODE_REGION} \
|
||||
--image=linode/ubuntu18.04 \
|
||||
--authorized_keys="${LINODE_SSHKEY}" \
|
||||
--root_pass="${ROOT_PASS}" \
|
||||
--tags=${TAG} --label=${NAME}
|
||||
done
|
||||
sep
|
||||
|
||||
linode_get_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
info "Counting instances..."
|
||||
linode_get_ids_by_tag $TAG | wc -l
|
||||
info "Deleting instances..."
|
||||
linode_get_ids_by_tag $TAG |
|
||||
xargs -n1 -P10 \
|
||||
linode-cli linodes delete
|
||||
}
|
||||
|
||||
linode_get_ids_by_tag() {
|
||||
TAG=$1
|
||||
linode-cli linodes list --tags $TAG --json | jq -r ".[].id"
|
||||
}
|
||||
|
||||
linode_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
linode-cli linodes list --tags $TAG --json | jq -r ".[].ipv4[0]"
|
||||
}
|
||||
@@ -5,6 +5,9 @@ if ! [ -f ~/.config/scw/config.yaml ]; then
|
||||
warn "~/.config/scw/config.yaml not found."
|
||||
fi
|
||||
|
||||
SCW_INSTANCE_TYPE=${SCW_INSTANCE_TYPE-DEV1-M}
|
||||
SCW_ZONE=${SCW_ZONE-fr-par-1}
|
||||
|
||||
infra_list() {
|
||||
scw instance server list -o json |
|
||||
jq -r '.[] | [.id, .name, .state, .commercial_type] | @tsv'
|
||||
@@ -13,9 +16,6 @@ infra_list() {
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
SCW_INSTANCE_TYPE=${SCW_INSTANCE_TYPE-DEV1-M}
|
||||
SCW_ZONE=${SCW_ZONE-fr-par-1}
|
||||
|
||||
for I in $(seq 1 $COUNT); do
|
||||
NAME=$(printf "%s-%03d" $TAG $I)
|
||||
sep "Starting instance $I/$COUNT"
|
||||
@@ -36,16 +36,16 @@ infra_stop() {
|
||||
scw_get_ids_by_tag $TAG | wc -l
|
||||
info "Deleting instances..."
|
||||
scw_get_ids_by_tag $TAG |
|
||||
xargs -n1 -P10 -I@@ \
|
||||
scw instance server delete force-shutdown=true server-id=@@
|
||||
xargs -n1 -P10 \
|
||||
scw instance server delete zone=${SCW_ZONE} force-shutdown=true with-ip=true
|
||||
}
|
||||
|
||||
scw_get_ids_by_tag() {
|
||||
TAG=$1
|
||||
scw instance server list name=$TAG -o json | jq -r .[].id
|
||||
scw instance server list zone=${SCW_ZONE} name=$TAG -o json | jq -r .[].id
|
||||
}
|
||||
|
||||
scw_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
scw instance server list name=$TAG -o json | jq -r .[].public_ip.address
|
||||
scw instance server list zone=${SCW_ZONE} name=$TAG -o json | jq -r .[].public_ip.address
|
||||
}
|
||||
|
||||
@@ -18,11 +18,11 @@ pssh() {
|
||||
echo "[parallel-ssh] $@"
|
||||
export PSSH=$(which pssh || which parallel-ssh)
|
||||
|
||||
if [ "$INFRACLASS" = hetzner ]; then
|
||||
LOGIN=root
|
||||
else
|
||||
LOGIN=ubuntu
|
||||
fi
|
||||
case "$INFRACLASS" in
|
||||
hetzner) LOGIN=root ;;
|
||||
linode) LOGIN=root ;;
|
||||
*) LOGIN=ubuntu ;;
|
||||
esac
|
||||
|
||||
$PSSH -h $HOSTFILE -l $LOGIN \
|
||||
--par 100 \
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
"""
|
||||
There are two ways to use this script:
|
||||
|
||||
1. Pass a tag name as a single argument.
|
||||
It will then take the clusters corresponding to that tag, and assign one
|
||||
domain name per cluster. Currently it gets the domains from a hard-coded
|
||||
path. There should be more domains than clusters.
|
||||
Example: ./map-dns.py 2020-08-15-jp
|
||||
1. Pass a file name and a tag name as a single argument.
|
||||
It will load a list of domains from the given file (one per line),
|
||||
and assign them to the clusters corresponding to that tag.
|
||||
There should be more domains than clusters.
|
||||
Example: ./map-dns.py domains.txt 2020-08-15-jp
|
||||
|
||||
2. Pass a domain as the 1st argument, and IP addresses then.
|
||||
It will configure the domain with the listed IP addresses.
|
||||
@@ -19,55 +19,53 @@ import requests
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
# configurable stuff
|
||||
domains_file = "../../plentydomains/domains.txt"
|
||||
# This can be tweaked if necessary.
|
||||
config_file = os.path.join(
|
||||
os.environ["HOME"], ".config/gandi/config.yaml")
|
||||
tag = None
|
||||
os.environ["HOME"], ".config/gandi/config.yaml")
|
||||
apiurl = "https://dns.api.gandi.net/api/v5/domains"
|
||||
|
||||
if len(sys.argv) == 2:
|
||||
tag = sys.argv[1]
|
||||
domains = open(domains_file).read().split()
|
||||
domains = [ d for d in domains if not d.startswith('#') ]
|
||||
ips = open(f"tags/{tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
else:
|
||||
domains = [sys.argv[1]]
|
||||
ips = sys.argv[2:]
|
||||
clustersize = len(ips)
|
||||
|
||||
# inferred stuff
|
||||
apikey = yaml.safe_load(open(config_file))["apirest"]["key"]
|
||||
|
||||
# now do the fucking work
|
||||
while domains and ips:
|
||||
domain = domains[0]
|
||||
domains = domains[1:]
|
||||
cluster = ips[:clustersize]
|
||||
ips = ips[clustersize:]
|
||||
print(f"{domain} => {cluster}")
|
||||
zone = ""
|
||||
node = 0
|
||||
for ip in cluster:
|
||||
node += 1
|
||||
zone += f"@ 300 IN A {ip}\n"
|
||||
zone += f"* 300 IN A {ip}\n"
|
||||
zone += f"node{node} 300 IN A {ip}\n"
|
||||
r = requests.put(
|
||||
f"{apiurl}/{domain}/records",
|
||||
headers={"x-api-key": apikey},
|
||||
data=zone)
|
||||
print(r.text)
|
||||
# Figure out if we're called for a bunch of domains, or just one.
|
||||
domain_or_domain_file = sys.argv[1]
|
||||
if os.path.isfile(domain_or_domain_file):
|
||||
domains = open(domain_or_domain_file).read().split()
|
||||
domains = [ d for d in domains if not d.startswith('#') ]
|
||||
tag = sys.argv[2]
|
||||
ips = open(f"tags/{tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
else:
|
||||
domains = [domain_or_domain_file]
|
||||
ips = sys.argv[2:]
|
||||
clustersize = len(ips)
|
||||
|
||||
#r = requests.get(
|
||||
# f"{apiurl}/{domain}/records",
|
||||
# headers={"x-api-key": apikey},
|
||||
# )
|
||||
# Now, do the work.
|
||||
while domains and ips:
|
||||
domain = domains[0]
|
||||
domains = domains[1:]
|
||||
cluster = ips[:clustersize]
|
||||
ips = ips[clustersize:]
|
||||
print(f"{domain} => {cluster}")
|
||||
zone = ""
|
||||
node = 0
|
||||
for ip in cluster:
|
||||
node += 1
|
||||
zone += f"@ 300 IN A {ip}\n"
|
||||
zone += f"* 300 IN A {ip}\n"
|
||||
zone += f"node{node} 300 IN A {ip}\n"
|
||||
r = requests.put(
|
||||
f"{apiurl}/{domain}/records",
|
||||
headers={"x-api-key": apikey},
|
||||
data=zone)
|
||||
print(r.text)
|
||||
|
||||
#r = requests.get(
|
||||
# f"{apiurl}/{domain}/records",
|
||||
# headers={"x-api-key": apikey},
|
||||
# )
|
||||
|
||||
if domains:
|
||||
print(f"Good, we have {len(domains)} domains left.")
|
||||
print(f"Good, we have {len(domains)} domains left.")
|
||||
|
||||
if ips:
|
||||
print(f"Crap, we have {len(ips)} IP addresses left.")
|
||||
print(f"Crap, we have {len(ips)} IP addresses left.")
|
||||
|
||||
@@ -25,5 +25,6 @@ steps:
|
||||
- webssh
|
||||
- tailhist
|
||||
- kube
|
||||
- kubetools
|
||||
- cards
|
||||
- kubetest
|
||||
|
||||
@@ -35,6 +35,8 @@ TAG=$PREFIX-$SETTINGS
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl disabledocker $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kubenet
|
||||
@@ -48,6 +50,8 @@ TAG=$PREFIX-$SETTINGS
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kuberouter
|
||||
@@ -61,6 +65,8 @@ TAG=$PREFIX-$SETTINGS
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
#INFRA=infra/aws-us-west-1
|
||||
@@ -76,5 +82,7 @@ TAG=$PREFIX-$SETTINGS
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kube $TAG 1.15.9
|
||||
retry 5 ./workshopctl kube $TAG 1.17.13
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
67
slides/1.yml
@@ -1,67 +0,0 @@
|
||||
title: |
|
||||
Docker Intensif
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-202010-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2020-10-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
#- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
#- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Container_Networking_Basics.md
|
||||
#- containers/Network_Drivers.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Multi_Stage_Builds.md
|
||||
#- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Advanced_Dockerfiles.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
#- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
98
slides/2.yml
@@ -1,98 +0,0 @@
|
||||
title: |
|
||||
Fondamentaux Kubernetes
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-202010-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2020-10-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
-
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
-
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/yamldeploy.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
-
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/ingress.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/batch-jobs.md
|
||||
#- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
#- k8s/whatsnext.md
|
||||
#- k8s/lastwords.md
|
||||
- shared/thankyou.md
|
||||
- k8s/links.md
|
||||
-
|
||||
- |
|
||||
# (Bonus)
|
||||
- k8s/record.md
|
||||
- k8s/dryrun.md
|
||||
- k8s/ingress-tls.md
|
||||
36
slides/3.yml
@@ -1,36 +0,0 @@
|
||||
title: |
|
||||
Packaging d'applications
|
||||
pour Kubernetes
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-202010-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2020-10-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
- shared/thankyou.md
|
||||
- k8s/links.md
|
||||
47
slides/4.yml
@@ -1,47 +0,0 @@
|
||||
title: |
|
||||
Kubernetes Avancé
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-202010-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2020-10-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
-
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
-
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/prometheus.md
|
||||
-
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- shared/thankyou.md
|
||||
-
|
||||
- |
|
||||
# (Bonus material)
|
||||
- k8s/podsecuritypolicy.md
|
||||
- k8s/operators-design.md
|
||||
48
slides/5.yml
@@ -1,48 +0,0 @@
|
||||
title: |
|
||||
Opérer Kubernetes
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-202010-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2020-10-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
-
|
||||
- k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
-
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/interco.md
|
||||
-
|
||||
- k8s/apilb.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/staticpods.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
#- k8s/cloud-controller-manager.md
|
||||
-
|
||||
- k8s/podsecuritypolicy.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- shared/thankyou.md
|
||||
@@ -2,14 +2,15 @@
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /lke.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
#/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
|
||||
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
|
||||
/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=DOCKERALLDAY
|
||||
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=DOCKERALLDAY
|
||||
/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
|
||||
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
|
||||
#/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=DOCKERALLDAY
|
||||
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=DOCKERALLDAY
|
||||
|
||||
# Shortlink for the QRCode
|
||||
/q /qrcode.html 200
|
||||
@@ -19,4 +20,5 @@
|
||||
/next https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
|
||||
/hi5 https://enix.io/fr/services/formation/online/
|
||||
|
||||
/ /highfive.html 200!
|
||||
# Survey form
|
||||
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform
|
||||
|
||||
298
slides/autopilot/package-lock.json
generated
@@ -24,14 +24,9 @@
|
||||
"integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
|
||||
},
|
||||
"arraybuffer.slice": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz",
|
||||
"integrity": "sha1-8zshWfBTKj8xB6JywMz70a0peco="
|
||||
},
|
||||
"async-limiter": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz",
|
||||
"integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg=="
|
||||
"version": "0.0.7",
|
||||
"resolved": "https://registry.npmjs.org/arraybuffer.slice/-/arraybuffer.slice-0.0.7.tgz",
|
||||
"integrity": "sha512-wGUIVQXuehL5TCqQun8OW81jGzAWycqzFF8lFp+GOM5BXLYj3bKNsYC4daB7n6XjCqxQA/qgTJ+8ANR3acjrog=="
|
||||
},
|
||||
"backo2": {
|
||||
"version": "1.0.2",
|
||||
@@ -39,27 +34,19 @@
|
||||
"integrity": "sha1-MasayLEpNjRj41s+u2n038+6eUc="
|
||||
},
|
||||
"base64-arraybuffer": {
|
||||
"version": "0.1.5",
|
||||
"resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz",
|
||||
"integrity": "sha1-c5JncZI7Whl0etZmqlzUv5xunOg="
|
||||
"version": "0.1.4",
|
||||
"resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-0.1.4.tgz",
|
||||
"integrity": "sha1-mBjHngWbE1X5fgQooBfIOOkLqBI="
|
||||
},
|
||||
"base64id": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/base64id/-/base64id-1.0.0.tgz",
|
||||
"integrity": "sha1-R2iMuZu2gE8OBtPnY7HDLlfY5rY="
|
||||
},
|
||||
"better-assert": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/better-assert/-/better-assert-1.0.2.tgz",
|
||||
"integrity": "sha1-QIZrnhueC1W0gYlDEeaPr/rrxSI=",
|
||||
"requires": {
|
||||
"callsite": "1.0.0"
|
||||
}
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz",
|
||||
"integrity": "sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog=="
|
||||
},
|
||||
"blob": {
|
||||
"version": "0.0.4",
|
||||
"resolved": "https://registry.npmjs.org/blob/-/blob-0.0.4.tgz",
|
||||
"integrity": "sha1-vPEwUspURj8w+fx+lbmkdjCpSSE="
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/blob/-/blob-0.0.5.tgz",
|
||||
"integrity": "sha512-gaqbzQPqOoamawKg0LGVd7SzLgXS+JH61oWprSLH+P+abTczqJbhTR8CmJ2u9/bUYNmHTGJx/UEmn6doAvvuig=="
|
||||
},
|
||||
"body-parser": {
|
||||
"version": "1.18.2",
|
||||
@@ -83,20 +70,15 @@
|
||||
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
|
||||
"integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
|
||||
},
|
||||
"callsite": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/callsite/-/callsite-1.0.0.tgz",
|
||||
"integrity": "sha1-KAOY5dZkvXQDi28JBRU+borxvCA="
|
||||
},
|
||||
"component-bind": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/component-bind/-/component-bind-1.0.0.tgz",
|
||||
"integrity": "sha1-AMYIq33Nk4l8AAllGx06jh5zu9E="
|
||||
},
|
||||
"component-emitter": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz",
|
||||
"integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY="
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
|
||||
"integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg=="
|
||||
},
|
||||
"component-inherit": {
|
||||
"version": "0.0.3",
|
||||
@@ -152,58 +134,76 @@
|
||||
"integrity": "sha1-eePVhlU0aQn+bw9Fpd5oEDspTSA="
|
||||
},
|
||||
"engine.io": {
|
||||
"version": "3.1.4",
|
||||
"resolved": "https://registry.npmjs.org/engine.io/-/engine.io-3.1.4.tgz",
|
||||
"integrity": "sha1-PQIRtwpVLOhB/8fahiezAamkFi4=",
|
||||
"version": "3.5.0",
|
||||
"resolved": "https://registry.npmjs.org/engine.io/-/engine.io-3.5.0.tgz",
|
||||
"integrity": "sha512-21HlvPUKaitDGE4GXNtQ7PLP0Sz4aWLddMPw2VTyFz1FVZqu/kZsJUO8WNpKuE/OCL7nkfRaOui2ZCJloGznGA==",
|
||||
"requires": {
|
||||
"accepts": "1.3.3",
|
||||
"base64id": "1.0.0",
|
||||
"cookie": "0.3.1",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-parser": "2.1.1",
|
||||
"uws": "0.14.5",
|
||||
"ws": "3.3.3"
|
||||
"accepts": "~1.3.4",
|
||||
"base64id": "2.0.0",
|
||||
"cookie": "~0.4.1",
|
||||
"debug": "~4.1.0",
|
||||
"engine.io-parser": "~2.2.0",
|
||||
"ws": "~7.4.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"accepts": {
|
||||
"version": "1.3.3",
|
||||
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.3.tgz",
|
||||
"integrity": "sha1-w8p0NJOGSMPg2cHjKN1otiLChMo=",
|
||||
"cookie": {
|
||||
"version": "0.4.1",
|
||||
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.1.tgz",
|
||||
"integrity": "sha512-ZwrFkGJxUR3EIoXtO+yVE69Eb7KlixbaeAWfBQB9vVsNn/o+Yw69gBWSSDK825hQNdN+wF8zELf3dFNl/kxkUA=="
|
||||
},
|
||||
"debug": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
|
||||
"integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
|
||||
"requires": {
|
||||
"mime-types": "2.1.17",
|
||||
"negotiator": "0.6.1"
|
||||
"ms": "^2.1.1"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"engine.io-client": {
|
||||
"version": "3.1.4",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-3.1.4.tgz",
|
||||
"integrity": "sha1-T88TcLRxY70s6b4nM5ckMDUNTqE=",
|
||||
"version": "3.5.0",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-3.5.0.tgz",
|
||||
"integrity": "sha512-12wPRfMrugVw/DNyJk34GQ5vIVArEcVMXWugQGGuw2XxUSztFNmJggZmv8IZlLyEdnpO1QB9LkcjeWewO2vxtA==",
|
||||
"requires": {
|
||||
"component-emitter": "1.2.1",
|
||||
"component-emitter": "~1.3.0",
|
||||
"component-inherit": "0.0.3",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-parser": "2.1.1",
|
||||
"debug": "~3.1.0",
|
||||
"engine.io-parser": "~2.2.0",
|
||||
"has-cors": "1.1.0",
|
||||
"indexof": "0.0.1",
|
||||
"parseqs": "0.0.5",
|
||||
"parseuri": "0.0.5",
|
||||
"ws": "3.3.3",
|
||||
"xmlhttprequest-ssl": "1.5.4",
|
||||
"parseqs": "0.0.6",
|
||||
"parseuri": "0.0.6",
|
||||
"ws": "~7.4.2",
|
||||
"xmlhttprequest-ssl": "~1.5.4",
|
||||
"yeast": "0.1.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
|
||||
"integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"engine.io-parser": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-2.1.1.tgz",
|
||||
"integrity": "sha1-4Ps/DgRi9/WLt3waUun1p+JuRmg=",
|
||||
"version": "2.2.1",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-2.2.1.tgz",
|
||||
"integrity": "sha512-x+dN/fBH8Ro8TFwJ+rkB2AmuVw9Yu2mockR/p3W8f8YtExwFgDvBDi0GWyb4ZLkpahtDGZgtr3zLovanJghPqg==",
|
||||
"requires": {
|
||||
"after": "0.8.2",
|
||||
"arraybuffer.slice": "0.0.6",
|
||||
"base64-arraybuffer": "0.1.5",
|
||||
"blob": "0.0.4",
|
||||
"has-binary2": "1.0.2"
|
||||
"arraybuffer.slice": "~0.0.7",
|
||||
"base64-arraybuffer": "0.1.4",
|
||||
"blob": "0.0.5",
|
||||
"has-binary2": "~1.0.2"
|
||||
}
|
||||
},
|
||||
"escape-html": {
|
||||
@@ -278,9 +278,9 @@
|
||||
"integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
|
||||
},
|
||||
"has-binary2": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-binary2/-/has-binary2-1.0.2.tgz",
|
||||
"integrity": "sha1-6D26SfC5vk0CbSc2U1DZ8D9Uvpg=",
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/has-binary2/-/has-binary2-1.0.3.tgz",
|
||||
"integrity": "sha512-G1LWKhDSvhGeAQ8mPVQlqNcOB2sJdwATtZKl2pDKKHfpf/rYj24lkinxf69blJbnsvtqqNU+L3SL50vzZhXOnw==",
|
||||
"requires": {
|
||||
"isarray": "2.0.1"
|
||||
}
|
||||
@@ -376,11 +376,6 @@
|
||||
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
|
||||
"integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk="
|
||||
},
|
||||
"object-component": {
|
||||
"version": "0.0.3",
|
||||
"resolved": "https://registry.npmjs.org/object-component/-/object-component-0.0.3.tgz",
|
||||
"integrity": "sha1-8MaapQ78lbhmwYb0AKM3acsvEpE="
|
||||
},
|
||||
"on-finished": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
|
||||
@@ -390,20 +385,14 @@
|
||||
}
|
||||
},
|
||||
"parseqs": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/parseqs/-/parseqs-0.0.5.tgz",
|
||||
"integrity": "sha1-1SCKNzjkZ2bikbouoXNoSSGouJ0=",
|
||||
"requires": {
|
||||
"better-assert": "1.0.2"
|
||||
}
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/parseqs/-/parseqs-0.0.6.tgz",
|
||||
"integrity": "sha512-jeAGzMDbfSHHA091hr0r31eYfTig+29g3GKKE/PPbEQ65X0lmMwlEoqmhzu0iztID5uJpZsFlUPDP8ThPL7M8w=="
|
||||
},
|
||||
"parseuri": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/parseuri/-/parseuri-0.0.5.tgz",
|
||||
"integrity": "sha1-gCBKUNTbt3m/3G6+J3jZDkvOMgo=",
|
||||
"requires": {
|
||||
"better-assert": "1.0.2"
|
||||
}
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/parseuri/-/parseuri-0.0.6.tgz",
|
||||
"integrity": "sha512-AUjen8sAkGgao7UyCX6Ahv0gIK2fABKmYjvP4xmy5JaKvcbTRueIqIPHLAfq30xJddqSE033IOMUSOMCcK3Sow=="
|
||||
},
|
||||
"parseurl": {
|
||||
"version": "1.3.2",
|
||||
@@ -487,51 +476,104 @@
|
||||
"integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
|
||||
},
|
||||
"socket.io": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/socket.io/-/socket.io-2.0.4.tgz",
|
||||
"integrity": "sha1-waRZDO/4fs8TxyZS8Eb3FrKeYBQ=",
|
||||
"version": "2.4.0",
|
||||
"resolved": "https://registry.npmjs.org/socket.io/-/socket.io-2.4.0.tgz",
|
||||
"integrity": "sha512-9UPJ1UTvKayuQfVv2IQ3k7tCQC/fboDyIK62i99dAQIyHKaBsNdTpwHLgKJ6guRWxRtC9H+138UwpaGuQO9uWQ==",
|
||||
"requires": {
|
||||
"debug": "2.6.9",
|
||||
"engine.io": "3.1.4",
|
||||
"socket.io-adapter": "1.1.1",
|
||||
"socket.io-client": "2.0.4",
|
||||
"socket.io-parser": "3.1.2"
|
||||
"debug": "~4.1.0",
|
||||
"engine.io": "~3.5.0",
|
||||
"has-binary2": "~1.0.2",
|
||||
"socket.io-adapter": "~1.1.0",
|
||||
"socket.io-client": "2.4.0",
|
||||
"socket.io-parser": "~3.4.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
|
||||
"integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
|
||||
"requires": {
|
||||
"ms": "^2.1.1"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"socket.io-adapter": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-1.1.1.tgz",
|
||||
"integrity": "sha1-KoBeihTWNyEk3ZFZrUUC+MsH8Gs="
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-1.1.2.tgz",
|
||||
"integrity": "sha512-WzZRUj1kUjrTIrUKpZLEzFZ1OLj5FwLlAFQs9kuZJzJi5DKdU7FsWc36SNmA8iDOtwBQyT8FkrriRM8vXLYz8g=="
|
||||
},
|
||||
"socket.io-client": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-2.0.4.tgz",
|
||||
"integrity": "sha1-CRilUkBtxeVAs4Dc2Xr8SmQzL44=",
|
||||
"version": "2.4.0",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-2.4.0.tgz",
|
||||
"integrity": "sha512-M6xhnKQHuuZd4Ba9vltCLT9oa+YvTsP8j9NcEiLElfIg8KeYPyhWOes6x4t+LTAC8enQbE/995AdTem2uNyKKQ==",
|
||||
"requires": {
|
||||
"backo2": "1.0.2",
|
||||
"base64-arraybuffer": "0.1.5",
|
||||
"component-bind": "1.0.0",
|
||||
"component-emitter": "1.2.1",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-client": "3.1.4",
|
||||
"has-cors": "1.1.0",
|
||||
"component-emitter": "~1.3.0",
|
||||
"debug": "~3.1.0",
|
||||
"engine.io-client": "~3.5.0",
|
||||
"has-binary2": "~1.0.2",
|
||||
"indexof": "0.0.1",
|
||||
"object-component": "0.0.3",
|
||||
"parseqs": "0.0.5",
|
||||
"parseuri": "0.0.5",
|
||||
"socket.io-parser": "3.1.2",
|
||||
"parseqs": "0.0.6",
|
||||
"parseuri": "0.0.6",
|
||||
"socket.io-parser": "~3.3.0",
|
||||
"to-array": "0.1.4"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
|
||||
"integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"socket.io-parser": {
|
||||
"version": "3.3.2",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.3.2.tgz",
|
||||
"integrity": "sha512-FJvDBuOALxdCI9qwRrO/Rfp9yfndRtc1jSgVgV8FDraihmSP/MLGD5PEuJrNfjALvcQ+vMDM/33AWOYP/JSjDg==",
|
||||
"requires": {
|
||||
"component-emitter": "~1.3.0",
|
||||
"debug": "~3.1.0",
|
||||
"isarray": "2.0.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"socket.io-parser": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.1.2.tgz",
|
||||
"integrity": "sha1-28IoIVH8T6675Aru3Ady66YZ9/I=",
|
||||
"version": "3.4.1",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.4.1.tgz",
|
||||
"integrity": "sha512-11hMgzL+WCLWf1uFtHSNvliI++tcRUWdoeYuwIl+Axvwy9z2gQM+7nJyN3STj1tLj5JyIUH8/gpDGxzAlDdi0A==",
|
||||
"requires": {
|
||||
"component-emitter": "1.2.1",
|
||||
"debug": "2.6.9",
|
||||
"has-binary2": "1.0.2",
|
||||
"debug": "~4.1.0",
|
||||
"isarray": "2.0.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"component-emitter": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz",
|
||||
"integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY="
|
||||
},
|
||||
"debug": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
|
||||
"integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
|
||||
"requires": {
|
||||
"ms": "^2.1.1"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"statuses": {
|
||||
@@ -553,11 +595,6 @@
|
||||
"mime-types": "2.1.17"
|
||||
}
|
||||
},
|
||||
"ultron": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz",
|
||||
"integrity": "sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og=="
|
||||
},
|
||||
"unpipe": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
|
||||
@@ -568,31 +605,20 @@
|
||||
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
|
||||
"integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
|
||||
},
|
||||
"uws": {
|
||||
"version": "0.14.5",
|
||||
"resolved": "https://registry.npmjs.org/uws/-/uws-0.14.5.tgz",
|
||||
"integrity": "sha1-Z6rzPEaypYel9mZtAPdpEyjxSdw=",
|
||||
"optional": true
|
||||
},
|
||||
"vary": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
|
||||
"integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
|
||||
},
|
||||
"ws": {
|
||||
"version": "3.3.3",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-3.3.3.tgz",
|
||||
"integrity": "sha512-nnWLa/NwZSt4KQJu51MYlCcSQ5g7INpOrOMt4XV8j4dqTXdmlUmSHQ8/oLC069ckre0fRsgfvsKwbTdtKLCDkA==",
|
||||
"requires": {
|
||||
"async-limiter": "1.0.0",
|
||||
"safe-buffer": "5.1.1",
|
||||
"ultron": "1.1.1"
|
||||
}
|
||||
"version": "7.4.2",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-7.4.2.tgz",
|
||||
"integrity": "sha512-T4tewALS3+qsrpGI/8dqNMLIVdq/g/85U98HPMa6F0m6xTbvhXU6RCQLqPH3+SlomNV/LdY6RXEbBpMH6EOJnA=="
|
||||
},
|
||||
"xmlhttprequest-ssl": {
|
||||
"version": "1.5.4",
|
||||
"resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.4.tgz",
|
||||
"integrity": "sha1-BPVgkVcks4kIhxXMDteBPpZ3v1c="
|
||||
"version": "1.5.5",
|
||||
"resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.5.tgz",
|
||||
"integrity": "sha1-wodrBhaKrcQOV9l+gRkayPQ5iz4="
|
||||
},
|
||||
"yeast": {
|
||||
"version": "0.1.2",
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
"version": "0.0.1",
|
||||
"dependencies": {
|
||||
"express": "^4.16.2",
|
||||
"socket.io": "^2.0.4"
|
||||
"socket.io": "^2.4.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -329,4 +329,4 @@ This is ideal to debug regressions, do side-by-side comparisons, etc.
|
||||
:EN:- Connecting services together with a *Compose file*
|
||||
|
||||
:FR:- Utiliser Compose pour décrire son environnement
|
||||
:FR:- Écrire un *Compose file* pour connecter les services entre eux
|
||||
:FR:- Écrire un *Compose file* pour connecter les services entre eux
|
||||
|
||||
@@ -307,6 +307,8 @@ Let's remove the `redis` container:
|
||||
$ docker rm -f redis
|
||||
```
|
||||
|
||||
* `-f`: Force the removal of a running container (uses SIGKILL)
|
||||
|
||||
And create one that doesn't block the `redis` name:
|
||||
|
||||
```bash
|
||||
@@ -740,3 +742,15 @@ class: extra-details
|
||||
* This may be used to access an internal package repository.
|
||||
|
||||
(But try to use a multi-stage build instead, if possible!)
|
||||
|
||||
???
|
||||
|
||||
:EN:Container networking essentials
|
||||
:EN:- The Container Network Model
|
||||
:EN:- Container isolation
|
||||
:EN:- Service discovery
|
||||
|
||||
:FR:Mettre ses conteneurs en réseau
|
||||
:FR:- Le "Container Network Model"
|
||||
:FR:- Isolation des conteneurs
|
||||
:FR:- *Service discovery*
|
||||
|
||||
@@ -229,10 +229,5 @@ containers together without exposing their ports.
|
||||
|
||||
???
|
||||
|
||||
:EN:Connecting containers
|
||||
:EN:- Container networking basics
|
||||
:EN:- Exposing a container
|
||||
|
||||
:FR:Connecter les conteneurs
|
||||
:FR:- Description du modèle réseau des conteneurs
|
||||
:FR:- Exposer un conteneur
|
||||
:EN:- Exposing single containers
|
||||
:FR:- Exposer un conteneur isolé
|
||||
|
||||
@@ -101,5 +101,5 @@ Success!
|
||||
|
||||
???
|
||||
|
||||
:EN:- The build cache
|
||||
:EN:- Leveraging the build cache for faster builds
|
||||
:FR:- Tirer parti du cache afin d'optimiser la vitesse de *build*
|
||||
|
||||
@@ -424,7 +424,7 @@ services:
|
||||
|
||||
- In this chapter, we showed many ways to write Dockerfiles.
|
||||
|
||||
- These Dockerfiles use sometimes diametrally opposed techniques.
|
||||
- These Dockerfiles use sometimes diametrically opposed techniques.
|
||||
|
||||
- Yet, they were the "right" ones *for a specific situation.*
|
||||
|
||||
@@ -434,5 +434,12 @@ services:
|
||||
|
||||
???
|
||||
|
||||
:EN:Optimizing images
|
||||
:EN:- Dockerfile tips, tricks, and best practices
|
||||
:FR:- Bonnes pratiques pour la construction des images
|
||||
:EN:- Reducing build time
|
||||
:EN:- Reducing image size
|
||||
|
||||
:FR:Optimiser ses images
|
||||
:FR:- Bonnes pratiques, trucs et astuces
|
||||
:FR:- Réduire le temps de build
|
||||
:FR:- Réduire la taille des images
|
||||
|
||||
@@ -82,3 +82,12 @@ Use cases:
|
||||
* Those containers can communicate over their `lo` interface.
|
||||
<br/>(i.e. one can bind to 127.0.0.1 and the others can connect to it.)
|
||||
|
||||
???
|
||||
|
||||
:EN:Advanced container networking
|
||||
:EN:- Transparent network access with the "host" driver
|
||||
:EN:- Sharing is caring with the "container" driver
|
||||
|
||||
:FR:Paramétrage réseau avancé
|
||||
:FR:- Accès transparent au réseau avec le mode "host"
|
||||
:FR:- Partage de la pile réseau avece le mode "container"
|
||||
|
||||
@@ -119,7 +119,7 @@ Nano and LinuxKit VMs in Hyper-V!)
|
||||
|
||||
- golang, mongo, python, redis, hello-world ... and more being added
|
||||
|
||||
- you should still use `--plaform` with multi-os images to be certain
|
||||
- you should still use `--platform` with multi-os images to be certain
|
||||
|
||||
- Windows Containers now support `localhost` accessible containers (July 2018)
|
||||
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<html>
|
||||
<head>
|
||||
<style>
|
||||
td {
|
||||
background: #ccc;
|
||||
padding: 1em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Lundi 5 octobre 2020</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 6 octobre 2020</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 7 octobre 2020</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 8 octobre 2020</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 9 octobre 2020</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lundi 12 octobre 2020</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 13 octobre 2020</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 14 octobre 2020</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lundi 19 octobre 2020</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 20 octobre 2020</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
3921
slides/images/control-planes/advanced-control-plane-split-events.svg
Normal file
|
After Width: | Height: | Size: 231 KiB |
3596
slides/images/control-planes/advanced-control-plane.svg
Normal file
|
After Width: | Height: | Size: 208 KiB |
1294
slides/images/control-planes/managed-kubernetes.svg
Normal file
|
After Width: | Height: | Size: 71 KiB |
3132
slides/images/control-planes/non-dedicated-stacked-nodes.svg
Normal file
|
After Width: | Height: | Size: 167 KiB |
1611
slides/images/control-planes/single-control-and-workers.svg
Normal file
|
After Width: | Height: | Size: 90 KiB |
914
slides/images/control-planes/single-node-dev.svg
Normal file
@@ -0,0 +1,914 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="1600"
|
||||
height="900"
|
||||
viewBox="0 0 1600 900"
|
||||
version="1.1"
|
||||
id="svg696"
|
||||
sodipodi:docname="single-node-dev.svg"
|
||||
inkscape:version="1.0.2 (e86c870879, 2021-01-15)"
|
||||
enable-background="new">
|
||||
<metadata
|
||||
id="metadata700">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title>how-does-k8s-work</dc:title>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<sodipodi:namedview
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1"
|
||||
objecttolerance="10"
|
||||
gridtolerance="10"
|
||||
guidetolerance="10"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1080"
|
||||
id="namedview698"
|
||||
showgrid="false"
|
||||
inkscape:zoom="0.64"
|
||||
inkscape:cx="133.80574"
|
||||
inkscape:cy="440.39529"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="1080"
|
||||
inkscape:window-maximized="0"
|
||||
inkscape:current-layer="how-does-k8s-work"
|
||||
units="px"
|
||||
inkscape:snap-object-midpoints="true"
|
||||
inkscape:document-rotation="0" />
|
||||
<title
|
||||
id="title304">how-does-k8s-work</title>
|
||||
<style
|
||||
type="text/css"
|
||||
id="style5"><![CDATA[
|
||||
@font-face {
|
||||
font-family: "Droid Serif";
|
||||
src: url(https://fonts.gstatic.com/s/droidserif/v9/tDbI2oqRg1oM3QBjjcaDkOr9rAU.woff2) format("woff2");
|
||||
}
|
||||
]]></style>
|
||||
<defs
|
||||
id="defs483">
|
||||
<marker
|
||||
inkscape:stockid="TriangleOutS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker4502"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path4500"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker4492"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path4490"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker3758"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleOutS">
|
||||
<path
|
||||
transform="scale(0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path3756" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker3586"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path3584" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2794"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleOutS">
|
||||
<path
|
||||
transform="scale(0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2792" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2634"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2632" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2202"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleOutS">
|
||||
<path
|
||||
transform="scale(0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2200" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2054"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2052" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2781"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleOutS">
|
||||
<path
|
||||
transform="scale(0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2779" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2657"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2655" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleOutS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker2327"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path2325"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker2181"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path2179"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleOutS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker2026"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true"
|
||||
inkscape:collect="always">
|
||||
<path
|
||||
id="path2024"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker1880"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true"
|
||||
inkscape:collect="always">
|
||||
<path
|
||||
id="path1878"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleOutS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker1725"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path1723"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker1613"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path1611"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<linearGradient
|
||||
id="linearGradient15544"
|
||||
osb:paint="solid">
|
||||
<stop
|
||||
style="stop-color:#f7fe9a;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop15542" />
|
||||
</linearGradient>
|
||||
<marker
|
||||
inkscape:stockid="TriangleOutS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker15078"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path15076"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker14924"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path14922"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker6635"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleOutS">
|
||||
<path
|
||||
transform="scale(0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path6633" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker6541"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path6539" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker6297"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true"
|
||||
inkscape:collect="always">
|
||||
<path
|
||||
id="path6295"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker4353"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS"
|
||||
inkscape:collect="always">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path4351" />
|
||||
</marker>
|
||||
<filter
|
||||
x="-0.039000001"
|
||||
y="-0.096999995"
|
||||
width="1.077"
|
||||
height="1.181"
|
||||
filterUnits="objectBoundingBox"
|
||||
id="filter-1">
|
||||
<feOffset
|
||||
dx="0"
|
||||
dy="2"
|
||||
in="SourceAlpha"
|
||||
result="shadowOffsetOuter1"
|
||||
id="feOffset308" />
|
||||
<feGaussianBlur
|
||||
stdDeviation="2"
|
||||
in="shadowOffsetOuter1"
|
||||
result="shadowBlurOuter1"
|
||||
id="feGaussianBlur310" />
|
||||
<feColorMatrix
|
||||
values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"
|
||||
type="matrix"
|
||||
in="shadowBlurOuter1"
|
||||
result="shadowMatrixOuter1"
|
||||
id="feColorMatrix312" />
|
||||
<feMerge
|
||||
id="feMerge318">
|
||||
<feMergeNode
|
||||
in="shadowMatrixOuter1"
|
||||
id="feMergeNode314" />
|
||||
<feMergeNode
|
||||
in="SourceGraphic"
|
||||
id="feMergeNode316" />
|
||||
</feMerge>
|
||||
</filter>
|
||||
<filter
|
||||
x="-0.039000001"
|
||||
y="-0.096999995"
|
||||
width="1.077"
|
||||
height="1.181"
|
||||
filterUnits="objectBoundingBox"
|
||||
id="filter-1-3">
|
||||
<feOffset
|
||||
dx="0"
|
||||
dy="2"
|
||||
in="SourceAlpha"
|
||||
result="shadowOffsetOuter1"
|
||||
id="feOffset308-6" />
|
||||
<feGaussianBlur
|
||||
stdDeviation="2"
|
||||
in="shadowOffsetOuter1"
|
||||
result="shadowBlurOuter1"
|
||||
id="feGaussianBlur310-7" />
|
||||
<feColorMatrix
|
||||
values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"
|
||||
type="matrix"
|
||||
in="shadowBlurOuter1"
|
||||
result="shadowMatrixOuter1"
|
||||
id="feColorMatrix312-5" />
|
||||
<feMerge
|
||||
id="feMerge318-3">
|
||||
<feMergeNode
|
||||
in="shadowMatrixOuter1"
|
||||
id="feMergeNode314-5" />
|
||||
<feMergeNode
|
||||
in="SourceGraphic"
|
||||
id="feMergeNode316-6" />
|
||||
</feMerge>
|
||||
</filter>
|
||||
<filter
|
||||
inkscape:collect="always"
|
||||
style="color-interpolation-filters:sRGB"
|
||||
id="filter1101"
|
||||
x="-0.023413722"
|
||||
width="1.0468274"
|
||||
y="-0.023627247"
|
||||
height="1.0472545">
|
||||
<feGaussianBlur
|
||||
inkscape:collect="always"
|
||||
stdDeviation="6.3996521"
|
||||
id="feGaussianBlur1103" />
|
||||
</filter>
|
||||
</defs>
|
||||
<g
|
||||
id="how-does-k8s-work"
|
||||
style="display:inline;fill:none;fill-rule:evenodd;stroke:none;stroke-width:1"
|
||||
transform="translate(240,90)">
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path3461"
|
||||
d="m 550.17888,-14.918735 c -5.79916,0.29836 -11.4811,1.76683 -16.7125,4.31926 L 305.41221,100.68854 c -11.95688,5.8319 -20.64156,16.86146 -23.59583,29.96674 l -56.2625,249.981 c -2.62478,11.6363 -0.48906,23.8532 5.92083,33.869 0.7693,1.2119 1.59668,2.3849 2.47917,3.515 l 157.85,200.44354 c 8.27676,10.5066 20.82591,16.6243 34.09583,16.6217 l 253.13751,-0.06 c 13.26496,0.01 25.81322,-6.0964 34.09583,-16.5919 L 870.92472,417.96068 c 8.28119,-10.5119 11.38389,-24.2726 8.42917,-37.384 l -56.35,-249.98098 c -2.95427,-13.10528 -11.63895,-24.13483 -23.59583,-29.96674 L 571.32472,-10.599475 c -6.58031,-3.21076 -13.85136,-4.69595 -21.14584,-4.31926 z"
|
||||
style="display:inline;opacity:1;mix-blend-mode:overlay;vector-effect:none;fill:#4285f4;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:2.78722;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter1101)"
|
||||
transform="matrix(1.1552713,0,0,1.1552713,-85.780113,43.857391)"
|
||||
inkscape:label="control plane" />
|
||||
<text
|
||||
id="text3581"
|
||||
y="763.69812"
|
||||
x="502.07855"
|
||||
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:32.6588px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;filter:url(#filter-1)"
|
||||
xml:space="preserve"
|
||||
transform="matrix(1.4029438,0,0,1.4029438,-157.63347,-1100.6682)"
|
||||
inkscape:label="control plane label"><tspan
|
||||
y="763.69812"
|
||||
x="502.07855"
|
||||
id="tspan3579"
|
||||
sodipodi:role="line">SINGLE-NODE CLUSTER (FOR DEVELOPMENT)</tspan></text>
|
||||
<g
|
||||
id="apiserver"
|
||||
transform="translate(-160.72924,-102.29405)">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,99.00177,261.15864)"
|
||||
ry="5.617908"
|
||||
y="135.0636"
|
||||
x="427.27243"
|
||||
height="125.52966"
|
||||
width="231.99153"
|
||||
id="rect3668"
|
||||
style="display:inline;vector-effect:none;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
inkscape:label="API server" />
|
||||
<text
|
||||
id="text4504"
|
||||
y="438.31876"
|
||||
x="552.05261"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
y="438.31876"
|
||||
x="552.05261"
|
||||
id="tspan4502"
|
||||
sodipodi:role="line">API server</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="controller-manager"
|
||||
transform="translate(-200,22)">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,205.00177,315.15864)"
|
||||
style="display:inline;vector-effect:none;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
id="rect4506"
|
||||
width="231.99153"
|
||||
height="125.52966"
|
||||
x="426.37198"
|
||||
y="298.2099"
|
||||
ry="5.617908"
|
||||
inkscape:label="controller manager" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
x="656.36871"
|
||||
y="606.2431"
|
||||
id="text4510"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
sodipodi:role="line"
|
||||
id="tspan4508"
|
||||
x="656.36871"
|
||||
y="606.2431">controller</tspan><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
id="tspan4524"
|
||||
sodipodi:role="line"
|
||||
x="656.36871"
|
||||
y="647.06647">manager</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="scheduler"
|
||||
transform="translate(-100,-118)">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,-4.99823,167.15864)"
|
||||
ry="5.617908"
|
||||
y="475.73566"
|
||||
x="427.94846"
|
||||
height="125.52966"
|
||||
width="231.99153"
|
||||
id="rect4512"
|
||||
style="display:inline;vector-effect:none;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
inkscape:label="scheduler" />
|
||||
<text
|
||||
id="text4516"
|
||||
y="628.66296"
|
||||
x="447.62476"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
y="628.66296"
|
||||
x="447.62476"
|
||||
id="tspan4514"
|
||||
sodipodi:role="line">scheduler</tspan></text>
|
||||
</g>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
x="560.76428"
|
||||
y="764.29028"
|
||||
id="text1649"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
sodipodi:role="line"
|
||||
id="tspan1647"
|
||||
x="560.76428"
|
||||
y="764.29028">VM or container</tspan></text>
|
||||
<text
|
||||
id="text3666"
|
||||
y="281.34979"
|
||||
x="-192.40442"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:71.7295px;line-height:1.25;font-family:'Noto Color Emoji';-inkscape-font-specification:'Noto Color Emoji, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.79323"
|
||||
xml:space="preserve"
|
||||
inkscape:label="emojis"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:71.7295px;font-family:'Noto Color Emoji';-inkscape-font-specification:'Noto Color Emoji, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;writing-mode:lr-tb;text-anchor:start;stroke-width:1.79323"
|
||||
y="281.34979"
|
||||
x="-192.40442"
|
||||
id="tspan3664"
|
||||
sodipodi:role="line">👩🏼💻👨🏾💻🤖</tspan></text>
|
||||
<rect
|
||||
transform="matrix(0.74849003,0,0,0.42877044,-44.82304,220.38115)"
|
||||
y="149.33455"
|
||||
x="-217.52838"
|
||||
height="357.51495"
|
||||
width="435.94931"
|
||||
id="rect3662"
|
||||
style="display:inline;vector-effect:none;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1-3)"
|
||||
inkscape:label="terminal" />
|
||||
<text
|
||||
inkscape:label="commands"
|
||||
id="text3656"
|
||||
y="331.70175"
|
||||
x="-189.80005"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:35.8105px;line-height:1.25;font-family:Consolas;-inkscape-font-specification:'Consolas, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;display:inline;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.895262"
|
||||
xml:space="preserve"><tspan
|
||||
style="stroke-width:0.895262"
|
||||
y="331.70175"
|
||||
x="-189.80005"
|
||||
sodipodi:role="line"
|
||||
id="tspan1145">$ kubectl ...</tspan></text>
|
||||
<text
|
||||
inkscape:label="thumbsup"
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:71.7295px;line-height:1.25;font-family:'Noto Color Emoji';-inkscape-font-specification:'Noto Color Emoji, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.79323"
|
||||
x="207.5956"
|
||||
y="359.34979"
|
||||
id="text5150"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan5148"
|
||||
x="207.5956"
|
||||
y="359.34979"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:71.7295px;font-family:'Noto Color Emoji';-inkscape-font-specification:'Noto Color Emoji, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;writing-mode:lr-tb;text-anchor:start;stroke-width:1.79323" /></text>
|
||||
<path
|
||||
inkscape:label="arrow kubectl"
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path1135"
|
||||
d="m 198.47677,432.58136 237.89885,-0.39724"
|
||||
style="display:none;vector-effect:none;fill:#cccccc;fill-opacity:1;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker1613);marker-end:url(#marker1725);paint-order:normal" />
|
||||
<path
|
||||
inkscape:label="arrow scheduler"
|
||||
style="display:inline;fill:#cccccc;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#marker4353);paint-order:normal"
|
||||
d="m 414.32586,398.25642 -3.32843,50.64656"
|
||||
id="path4349"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<path
|
||||
inkscape:label="arrow controller manager"
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path6293"
|
||||
d="m 469.44118,396.89114 20.93237,189.75864"
|
||||
style="display:inline;fill:#cccccc;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#marker6297);paint-order:normal" />
|
||||
<path
|
||||
inkscape:label="node top"
|
||||
style="vector-effect:none;fill:#cccccc;fill-opacity:1;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker4502);paint-order:normal"
|
||||
d="M 117.18356,331.36155 274.9691,326.32941"
|
||||
id="path4488"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<g
|
||||
id="etcd"
|
||||
transform="translate(1.971505,-80.740088)">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,99.00177,181.15864)"
|
||||
style="display:inline;opacity:1;vector-effect:none;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
id="rect4518"
|
||||
width="231.99153"
|
||||
height="125.52966"
|
||||
x="427.27246"
|
||||
y="-4.9364014"
|
||||
ry="5.617908"
|
||||
inkscape:label="etcd" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.834657"
|
||||
x="552.46265"
|
||||
y="161.46683"
|
||||
id="text4522"
|
||||
transform="translate(0,80)"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
sodipodi:role="line"
|
||||
id="tspan4520"
|
||||
x="552.46265"
|
||||
y="161.46683">etcd</tspan></text>
|
||||
<g
|
||||
id="g3228"
|
||||
inkscape:label="storage"
|
||||
style="display:inline"
|
||||
transform="matrix(0.24039167,0,0,0.24784672,397.27503,204.48707)">
|
||||
<ellipse
|
||||
cx="374.0946"
|
||||
cy="234.48322"
|
||||
rx="92.65731"
|
||||
ry="25.358843"
|
||||
style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:5;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;paint-order:normal"
|
||||
id="ellipse9871" />
|
||||
<path
|
||||
sodipodi:nodetypes="cccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="rect9873"
|
||||
d="M 281.43729,235.006 V -24.92734 H 466.75191 V 235.006"
|
||||
style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:5;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;paint-order:normal" />
|
||||
<ellipse
|
||||
ry="25.358843"
|
||||
rx="92.65731"
|
||||
cy="-24.750473"
|
||||
cx="374.0946"
|
||||
id="path9869"
|
||||
style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:5;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;paint-order:normal" />
|
||||
</g>
|
||||
</g>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:40px;line-height:1.25;font-family:'Noto Color Emoji';-inkscape-font-specification:'Noto Color Emoji, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none"
|
||||
x="-314.87122"
|
||||
y="76.790283"
|
||||
id="text5696"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan5694"
|
||||
x="-314.87122"
|
||||
y="76.790283" /></text>
|
||||
<g
|
||||
id="g6027"
|
||||
transform="translate(173.26362,-123.65545)"
|
||||
inkscape:label="kubelet">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,99.00177,261.15864)"
|
||||
ry="5.617908"
|
||||
y="135.0636"
|
||||
x="427.27243"
|
||||
height="125.52966"
|
||||
width="231.99153"
|
||||
id="rect6021"
|
||||
style="display:inline;vector-effect:none;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
inkscape:label="rect" />
|
||||
<text
|
||||
id="text6025"
|
||||
y="438.31876"
|
||||
x="552.05261"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
y="438.31876"
|
||||
x="552.05261"
|
||||
id="tspan6023"
|
||||
sodipodi:role="line">kubelet</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="g6159"
|
||||
inkscape:label="pod"
|
||||
style="display:inline"
|
||||
transform="translate(-465.32975,388.44365)">
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,188.949 38.7689,-11.2425 38.7688,11.2425 -38.7688,11.24254 z"
|
||||
id="path6139" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,193.25418 v 41.2523 l 36.1218,20.00898 0.1788,-50.46488 z"
|
||||
id="path6141" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1254.6157,193.25418 v 41.2523 l -36.1217,20.00898 -0.1788,-50.46488 z"
|
||||
id="path6143" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:22.0298px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.550744"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
id="text6147"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan6145"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
style="stroke-width:0.550744">pod</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="g7100"
|
||||
inkscape:label="container engine"
|
||||
transform="translate(780.76442,-206.55137)">
|
||||
<g
|
||||
id="g7089"
|
||||
transform="translate(-657.05924,68.771622)">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,205.00177,315.15864)"
|
||||
style="display:inline;vector-effect:none;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
id="rect7081"
|
||||
width="231.99153"
|
||||
height="125.52966"
|
||||
x="426.37198"
|
||||
y="298.2099"
|
||||
ry="5.617908"
|
||||
inkscape:label="controller manager" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
x="656.36871"
|
||||
y="606.2431"
|
||||
id="text7087"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
id="tspan7085"
|
||||
sodipodi:role="line"
|
||||
x="656.36871"
|
||||
y="606.2431">container</tspan><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
sodipodi:role="line"
|
||||
x="656.36871"
|
||||
y="647.06647"
|
||||
id="tspan7093">engine</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<path
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path2048"
|
||||
d="m 718.47061,417.59763 2.84701,-46.58932"
|
||||
style="display:inline;fill:#cccccc;fill-rule:evenodd;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#marker2054);marker-end:url(#marker2202);paint-order:normal"
|
||||
inkscape:label="node top left" />
|
||||
<path
|
||||
inkscape:label="arrow etcd"
|
||||
style="display:inline;fill:#cccccc;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker6635);paint-order:normal"
|
||||
d="m 465.71622,277.64 30.73977,-64.3282"
|
||||
id="path6537"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<path
|
||||
inkscape:label="node top"
|
||||
style="display:inline;vector-effect:none;fill:#cccccc;fill-opacity:1;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker1880);marker-end:url(#marker2026);paint-order:normal"
|
||||
d="m 505.55141,327.11713 103.53017,-3.03009"
|
||||
id="path8569"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<g
|
||||
id="g7154"
|
||||
inkscape:label="pod"
|
||||
style="display:inline"
|
||||
transform="translate(-578.21351,370.84416)">
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,188.949 38.7689,-11.2425 38.7688,11.2425 -38.7688,11.24254 z"
|
||||
id="path7144" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,193.25418 v 41.2523 l 36.1218,20.00898 0.1788,-50.46488 z"
|
||||
id="path7146" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1254.6157,193.25418 v 41.2523 l -36.1217,20.00898 -0.1788,-50.46488 z"
|
||||
id="path7148" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:22.0298px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.550744"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
id="text7152"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan7150"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
style="stroke-width:0.550744">pod</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="g7166"
|
||||
inkscape:label="pod"
|
||||
style="display:inline"
|
||||
transform="translate(-606.70424,238.95491)">
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,188.949 38.7689,-11.2425 38.7688,11.2425 -38.7688,11.24254 z"
|
||||
id="path7156" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,193.25418 v 41.2523 l 36.1218,20.00898 0.1788,-50.46488 z"
|
||||
id="path7158" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1254.6157,193.25418 v 41.2523 l -36.1217,20.00898 -0.1788,-50.46488 z"
|
||||
id="path7160" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:22.0298px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.550744"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
id="text7164"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan7162"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
style="stroke-width:0.550744">pod</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 44 KiB |
3940
slides/images/control-planes/stacked-control-plane.svg
Normal file
|
After Width: | Height: | Size: 234 KiB |
BIN
slides/images/hpa-v2-pa-latency.png
Normal file
|
After Width: | Height: | Size: 66 KiB |
BIN
slides/images/hpa-v2-pa-pods.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
1060
slides/images/k8s-net-0-overview.svg
Normal file
|
After Width: | Height: | Size: 99 KiB |
519
slides/images/k8s-net-1-pod-to-pod.svg
Normal file
@@ -0,0 +1,519 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
version="1.1"
|
||||
viewBox="16 32 880 495"
|
||||
fill="none"
|
||||
stroke="none"
|
||||
stroke-linecap="square"
|
||||
stroke-miterlimit="10"
|
||||
id="svg464"
|
||||
sodipodi:docname="k8s-net-1-pod-to-pod.svg"
|
||||
width="1600"
|
||||
height="900"
|
||||
inkscape:version="1.0.1 (3bc2e813f5, 2020-09-07, custom)">
|
||||
<metadata
|
||||
id="metadata470">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<defs
|
||||
id="defs468" />
|
||||
<sodipodi:namedview
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1"
|
||||
objecttolerance="10"
|
||||
gridtolerance="10"
|
||||
guidetolerance="10"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:window-width="476"
|
||||
inkscape:window-height="1032"
|
||||
id="namedview466"
|
||||
showgrid="false"
|
||||
inkscape:zoom="1.0208333"
|
||||
inkscape:cx="480"
|
||||
inkscape:cy="360"
|
||||
inkscape:window-x="480"
|
||||
inkscape:window-y="18"
|
||||
inkscape:window-maximized="0"
|
||||
inkscape:current-layer="svg464" />
|
||||
<clipPath
|
||||
id="p.0">
|
||||
<path
|
||||
d="M 0,0 H 960 V 720 H 0 Z"
|
||||
clip-rule="nonzero"
|
||||
id="path317" />
|
||||
</clipPath>
|
||||
<g
|
||||
clip-path="url(#p.0)"
|
||||
id="g462">
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="M 0,0 H 960 V 720 H 0 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path320" />
|
||||
<path
|
||||
fill="#d9d9d9"
|
||||
d="m 66.944885,154.29968 v 0 c 0,-13.08115 10.604363,-23.68552 23.685509,-23.68552 H 296.55071 c 6.28177,0 12.30628,2.49544 16.74817,6.93734 4.4419,4.44189 6.93732,10.4664 6.93732,16.74818 v 94.73921 c 0,13.08116 -10.60434,23.6855 -23.68549,23.6855 H 90.630394 c -13.081146,0 -23.685509,-10.60434 -23.685509,-23.6855 z"
|
||||
fill-rule="evenodd"
|
||||
id="path322" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="2"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 66.944885,154.29968 v 0 c 0,-13.08115 10.604363,-23.68552 23.685509,-23.68552 H 296.55071 c 6.28177,0 12.30628,2.49544 16.74817,6.93734 4.4419,4.44189 6.93732,10.4664 6.93732,16.74818 v 94.73921 c 0,13.08116 -10.60434,23.6855 -23.68549,23.6855 H 90.630394 c -13.081146,0 -23.685509,-10.60434 -23.685509,-23.6855 z"
|
||||
fill-rule="evenodd"
|
||||
id="path324" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 85.75713,154.61205 0.04687,1.23437 q 1.125,-1.40625 2.953125,-1.40625 3.125,0 3.15625,3.51563 v 6.51562 h -1.6875 v -6.51562 q -0.01563,-1.07813 -0.5,-1.57813 -0.46875,-0.51562 -1.484375,-0.51562 -0.8125,0 -1.4375,0.4375 -0.609375,0.4375 -0.96875,1.15625 v 7.01562 h -1.67187 v -9.85937 z m 8.246857,4.84375 q 0,-1.45313 0.5625,-2.60938 0.578125,-1.15625 1.59375,-1.78125 1.015625,-0.625 2.3125,-0.625 2.015623,0 3.250003,1.39063 1.25,1.39062 1.25,3.70312 v 0.125 q 0,1.4375 -0.54688,2.57813 -0.54687,1.14062 -1.57812,1.78125 -1.015628,0.64062 -2.359378,0.64062 -2,0 -3.25,-1.39062 -1.234375,-1.40625 -1.234375,-3.70313 z m 1.6875,0.20312 q 0,1.64063 0.765625,2.64063 0.765625,0.98437 2.03125,0.98437 1.296875,0 2.046878,-1 0.75,-1.01562 0.75,-2.82812 0,-1.625 -0.76563,-2.625 -0.765623,-1.01563 -2.046873,-1.01563 -1.25,0 -2.015625,1 -0.765625,0.98438 -0.765625,2.84375 z m 8.983643,-0.20312 q 0,-2.26563 1.07813,-3.64063 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17188 v -5.14063 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20312 q 0,1.67188 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39062 -2.32813,-1.39062 -1.23437,0 -1.9375,0.95312 -0.6875,0.95313 -0.6875,2.84375 z m 13.33397,5 q -2,0 -3.26563,-1.3125 -1.25,-1.32812 -1.25,-3.53125 v -0.3125 q 0,-1.46875 0.5625,-2.60937 0.5625,-1.15625 1.5625,-1.79688 1.01563,-0.65625 2.1875,-0.65625 1.92188,0 2.98438,1.26563 1.0625,1.26562 1.0625,3.625 v 0.6875 h -6.67188 q 0.0312,1.46875 0.84375,2.375 0.82813,0.89062 2.07813,0.89062 0.89062,0 1.51562,-0.35937 0.625,-0.375 1.07813,-0.96875 l 1.03125,0.79687 q -1.23438,1.90625 -3.71875,1.90625 z m -0.20313,-8.84375 q -1.01562,0 -1.71875,0.75 -0.6875,0.73438 -0.84375,2.07813 h 4.9375 v -0.125 q -0.0781,-1.28125 -0.70312,-1.98438 -0.60938,-0.71875 -1.67188,-0.71875 z"
|
||||
fill-rule="nonzero"
|
||||
id="path326" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="M 741.52496,67.91863 H 819.2572"
|
||||
fill-rule="evenodd"
|
||||
id="path328" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="M 741.52496,67.91863 H 819.2572"
|
||||
fill-rule="evenodd"
|
||||
id="path330" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="M 692.19946,70.157394 H 868.57745 V 116.01565 H 692.19946 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path332" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 747.46643,91.32864 q 0,2.078125 -0.96875,3.359375 -0.95313,1.28125 -2.57813,1.28125 -1.67187,0 -2.625,-1.0625 v 4.40625 h -1.5625 V 86.64114 h 1.42188 l 0.0781,1.015625 q 0.95313,-1.1875 2.65625,-1.1875 1.65625,0 2.60938,1.25 0.96875,1.234375 0.96875,3.453125 z m -1.5625,-0.1875 q 0,-1.546875 -0.67188,-2.4375 -0.65625,-0.90625 -1.8125,-0.90625 -1.42187,0 -2.125,1.265625 v 4.375 q 0.70313,1.25 2.15625,1.25 1.125,0 1.78125,-0.890625 0.67188,-0.890625 0.67188,-2.65625 z m 3.12799,0 q 0,-1.34375 0.53125,-2.421875 0.53125,-1.078125 1.46875,-1.65625 0.95313,-0.59375 2.15625,-0.59375 1.875,0 3.03125,1.296875 1.15625,1.296875 1.15625,3.453125 v 0.109375 q 0,1.328125 -0.51562,2.390625 -0.51563,1.0625 -1.46875,1.65625 -0.95313,0.59375 -2.1875,0.59375 -1.85938,0 -3.01563,-1.296875 -1.15625,-1.296875 -1.15625,-3.421875 z m 1.57813,0.1875 q 0,1.515625 0.70312,2.4375 0.70313,0.921875 1.89063,0.921875 1.20312,0 1.89062,-0.9375 0.70313,-0.9375 0.70313,-2.609375 0,-1.515625 -0.71875,-2.4375 -0.70313,-0.9375 -1.89063,-0.9375 -1.15625,0 -1.875,0.921875 -0.70312,0.921875 -0.70312,2.640625 z m 8.33557,-0.1875 q 0,-2.109375 1,-3.390625 1,-1.28125 2.625,-1.28125 1.60937,0 2.54687,1.109375 v -4.78125 h 1.5625 v 13 h -1.4375 l -0.0781,-0.984375 q -0.9375,1.15625 -2.60938,1.15625 -1.59375,0 -2.60937,-1.296875 -1,-1.3125 -1,-3.40625 z m 1.57812,0.1875 q 0,1.546875 0.64063,2.4375 0.64062,0.875 1.76562,0.875 1.5,0 2.1875,-1.34375 v -4.203125 q -0.70312,-1.296875 -2.15625,-1.296875 -1.15625,0 -1.79687,0.890625 -0.64063,0.890625 -0.64063,2.640625 z m 11.83496,-0.125 h -4.125 v -1.28125 h 4.125 z m 3.65546,-6.78125 v 2.21875 h 1.70312 v 1.21875 h -1.70312 v 5.671875 q 0,0.546875 0.21875,0.828125 0.23437,0.265625 0.78125,0.265625 0.26562,0 0.75,-0.09375 v 1.265625 q -0.625,0.171875 -1.20313,0.171875 -1.04687,0 -1.57812,-0.625 -0.53125,-0.640625 -0.53125,-1.8125 V 87.85989 h -1.67188 v -1.21875 h 1.67188 v -2.21875 z m 2.94427,6.71875 q 0,-1.34375 0.53125,-2.421875 0.53125,-1.078125 1.46875,-1.65625 0.95313,-0.59375 2.15625,-0.59375 1.875,0 3.03125,1.296875 1.15625,1.296875 1.15625,3.453125 v 0.109375 q 0,1.328125 -0.51562,2.390625 -0.51563,1.0625 -1.46875,1.65625 -0.95313,0.59375 -2.1875,0.59375 -1.85938,0 -3.01563,-1.296875 -1.15625,-1.296875 -1.15625,-3.421875 z m 1.57813,0.1875 q 0,1.515625 0.70312,2.4375 0.70313,0.921875 1.89063,0.921875 1.20312,0 1.89062,-0.9375 0.70313,-0.9375 0.70313,-2.609375 0,-1.515625 -0.71875,-2.4375 -0.70313,-0.9375 -1.89063,-0.9375 -1.15625,0 -1.875,0.921875 -0.70312,0.921875 -0.70312,2.640625 z m 11.9762,-0.125 h -4.125 v -1.28125 h 4.125 z m 9.26483,0.125 q 0,2.078125 -0.96875,3.359375 -0.95313,1.28125 -2.57813,1.28125 -1.67187,0 -2.625,-1.0625 v 4.40625 h -1.5625 V 86.64114 h 1.42188 l 0.0781,1.015625 q 0.95313,-1.1875 2.65625,-1.1875 1.65625,0 2.60938,1.25 0.96875,1.234375 0.96875,3.453125 z m -1.5625,-0.1875 q 0,-1.546875 -0.67188,-2.4375 -0.65625,-0.90625 -1.8125,-0.90625 -1.42187,0 -2.125,1.265625 v 4.375 q 0.70313,1.25 2.15625,1.25 1.125,0 1.78125,-0.890625 0.67188,-0.890625 0.67188,-2.65625 z m 3.12793,0 q 0,-1.34375 0.53125,-2.421875 0.53125,-1.078125 1.46875,-1.65625 0.95312,-0.59375 2.15625,-0.59375 1.875,0 3.03125,1.296875 1.15625,1.296875 1.15625,3.453125 v 0.109375 q 0,1.328125 -0.51563,2.390625 -0.51562,1.0625 -1.46875,1.65625 -0.95312,0.59375 -2.1875,0.59375 -1.85937,0 -3.01562,-1.296875 -1.15625,-1.296875 -1.15625,-3.421875 z m 1.57812,0.1875 q 0,1.515625 0.70313,2.4375 0.70312,0.921875 1.89062,0.921875 1.20313,0 1.89063,-0.9375 0.70312,-0.9375 0.70312,-2.609375 0,-1.515625 -0.71875,-2.4375 -0.70312,-0.9375 -1.89062,-0.9375 -1.15625,0 -1.875,0.921875 -0.70313,0.921875 -0.70313,2.640625 z m 8.33557,-0.1875 q 0,-2.109375 1,-3.390625 1,-1.28125 2.625,-1.28125 1.60938,0 2.54688,1.109375 v -4.78125 h 1.5625 v 13 h -1.4375 l -0.0781,-0.984375 q -0.9375,1.15625 -2.60937,1.15625 -1.59375,0 -2.60938,-1.296875 -1,-1.3125 -1,-3.40625 z m 1.57813,0.1875 q 0,1.546875 0.64062,2.4375 0.64063,0.875 1.76563,0.875 1.5,0 2.1875,-1.34375 v -4.203125 q -0.70313,-1.296875 -2.15625,-1.296875 -1.15625,0 -1.79688,0.890625 -0.64062,0.890625 -0.64062,2.640625 z"
|
||||
fill-rule="nonzero"
|
||||
id="path334" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 228.27559,151.61942 h 63.53264 l 8.34137,8.34138 v 41.70587 h -71.87401 z"
|
||||
fill-rule="evenodd"
|
||||
id="path336" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 228.27559,151.61942 h 63.53264 l 8.34137,8.34138 v 41.70587 h -71.87401 z"
|
||||
fill-rule="evenodd"
|
||||
id="path338" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 246.86934,177.89761 q 0,2.25 -1.03125,3.625 -1.01563,1.375 -2.78125,1.375 -1.79688,0 -2.82813,-1.14063 v 4.75 h -1.67187 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85938,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67188,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70312,-0.96875 -1.95312,-0.96875 -1.53125,0 -2.29688,1.35938 v 4.70312 q 0.76563,1.35938 2.32813,1.35938 1.20312,0 1.92187,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45312 0.5625,-2.60937 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54688,2.57812 -0.54687,1.14063 -1.57812,1.78125 -1.01563,0.64063 -2.35938,0.64063 -2,0 -3.25,-1.39063 -1.23437,-1.40625 -1.23437,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76562,2.64062 0.76563,0.98438 2.03125,0.98438 1.29688,0 2.04688,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76563,-2.625 -0.76562,-1.01562 -2.04687,-1.01562 -1.25,0 -2.01563,1 -0.76562,0.98437 -0.76562,2.84375 z m 8.98364,-0.20313 q 0,-2.26562 1.07813,-3.64062 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39063 -2.32813,-1.39063 -1.23437,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path340" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 180.86351,212.08398 h 63.53265 l 8.34137,8.34138 v 41.70586 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path342" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 180.86351,212.08398 h 63.53265 l 8.34137,8.34138 v 41.70586 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path344" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 199.45726,238.36217 q 0,2.25 -1.03125,3.625 -1.01563,1.375 -2.78125,1.375 -1.79688,0 -2.82813,-1.14063 v 4.75 h -1.67187 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85938,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67188,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70312,-0.96875 -1.95312,-0.96875 -1.53125,0 -2.29688,1.35938 v 4.70312 q 0.76563,1.35938 2.32813,1.35938 1.20312,0 1.92187,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45312 0.5625,-2.60937 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54688,2.57812 -0.54687,1.14063 -1.57812,1.78125 -1.01563,0.64063 -2.35938,0.64063 -2,0 -3.25,-1.39063 -1.23437,-1.40625 -1.23437,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76562,2.64062 0.76563,0.98438 2.03125,0.98438 1.29688,0 2.04688,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76563,-2.625 -0.76562,-1.01562 -2.04687,-1.01562 -1.25,0 -2.01563,1 -0.76562,0.98437 -0.76562,2.84375 z m 8.98364,-0.20313 q 0,-2.26562 1.07813,-3.64062 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39063 -2.32813,-1.39063 -1.23437,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path346" />
|
||||
<path
|
||||
fill="#d9d9d9"
|
||||
d="m 398.08398,209.82724 v 0 c 0,-13.08115 10.60437,-23.6855 23.68552,-23.6855 h 205.92032 c 6.2818,0 12.30627,2.49542 16.74817,6.93732 4.44189,4.44189 6.93731,10.4664 6.93731,16.74818 v 94.73923 c 0,13.08115 -10.60437,23.68552 -23.68548,23.68552 H 421.7695 c -13.08115,0 -23.68552,-10.60437 -23.68552,-23.68552 z"
|
||||
fill-rule="evenodd"
|
||||
id="path348" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="2"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 398.08398,209.82724 v 0 c 0,-13.08115 10.60437,-23.6855 23.68552,-23.6855 h 205.92032 c 6.2818,0 12.30627,2.49542 16.74817,6.93732 4.44189,4.44189 6.93731,10.4664 6.93731,16.74818 v 94.73923 c 0,13.08115 -10.60437,23.68552 -23.68548,23.68552 H 421.7695 c -13.08115,0 -23.68552,-10.60437 -23.68552,-23.68552 z"
|
||||
fill-rule="evenodd"
|
||||
id="path350" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 416.89624,210.1396 0.0469,1.23437 q 1.125,-1.40625 2.95313,-1.40625 3.125,0 3.15625,3.51563 v 6.51562 h -1.6875 v -6.51562 q -0.0156,-1.07813 -0.5,-1.57813 -0.46875,-0.51562 -1.48438,-0.51562 -0.8125,0 -1.4375,0.4375 -0.60937,0.4375 -0.96875,1.15625 v 7.01562 h -1.67187 v -9.85937 z m 8.24686,4.84375 q 0,-1.45313 0.5625,-2.60938 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39063 1.25,1.39062 1.25,3.70312 v 0.125 q 0,1.4375 -0.54688,2.57813 -0.54687,1.14062 -1.57812,1.78125 -1.01563,0.64062 -2.35938,0.64062 -2,0 -3.25,-1.39062 -1.23437,-1.40625 -1.23437,-3.70313 z m 1.6875,0.20312 q 0,1.64063 0.76562,2.64063 0.76563,0.98437 2.03125,0.98437 1.29688,0 2.04688,-1 0.75,-1.01562 0.75,-2.82812 0,-1.625 -0.76563,-2.625 -0.76562,-1.01563 -2.04687,-1.01563 -1.25,0 -2.01563,1 -0.76562,0.98438 -0.76562,2.84375 z m 8.98364,-0.20312 q 0,-2.26563 1.07812,-3.64063 1.07813,-1.375 2.8125,-1.375 1.73438,0 2.75,1.17188 v -5.14063 h 1.6875 v 14 h -1.54687 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70313,0 -2.79688,-1.40625 -1.07812,-1.40625 -1.07812,-3.65625 z m 1.6875,0.20312 q 0,1.67188 0.6875,2.625 0.70312,0.9375 1.92187,0.9375 1.60938,0 2.34375,-1.4375 v -4.53125 q -0.76562,-1.39062 -2.32812,-1.39062 -1.23438,0 -1.9375,0.95312 -0.6875,0.95313 -0.6875,2.84375 z m 13.33395,5 q -2,0 -3.26562,-1.3125 -1.25,-1.32812 -1.25,-3.53125 v -0.3125 q 0,-1.46875 0.5625,-2.60937 0.5625,-1.15625 1.5625,-1.79688 1.01562,-0.65625 2.1875,-0.65625 1.92187,0 2.98437,1.26563 1.0625,1.26562 1.0625,3.625 v 0.6875 h -6.67187 q 0.0312,1.46875 0.84375,2.375 0.82812,0.89062 2.07812,0.89062 0.89063,0 1.51563,-0.35937 0.625,-0.375 1.07812,-0.96875 l 1.03125,0.79687 q -1.23437,1.90625 -3.71875,1.90625 z m -0.20312,-8.84375 q -1.01563,0 -1.71875,0.75 -0.6875,0.73438 -0.84375,2.07813 h 4.9375 v -0.125 q -0.0781,-1.28125 -0.70313,-1.98438 -0.60937,-0.71875 -1.67187,-0.71875 z"
|
||||
fill-rule="nonzero"
|
||||
id="path352" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 436.5315,244.97638 h 63.53265 l 8.34137,8.34137 v 41.70587 H 436.5315 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path354" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 436.5315,244.97638 h 63.53265 l 8.34137,8.34137 v 41.70587 H 436.5315 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path356" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 455.12524,271.25458 q 0,2.25 -1.03125,3.625 -1.01562,1.375 -2.78125,1.375 -1.79687,0 -2.82812,-1.14063 v 4.75 h -1.67188 V 266.2077 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85937,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67187,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70313,-0.96875 -1.95313,-0.96875 -1.53125,0 -2.29687,1.35938 v 4.70312 q 0.76562,1.35938 2.32812,1.35938 1.20313,0 1.92188,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45312 0.5625,-2.60937 0.57813,-1.15625 1.59375,-1.78125 1.01563,-0.625 2.3125,-0.625 2.01563,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54687,2.57812 -0.54688,1.14063 -1.57813,1.78125 -1.01562,0.64063 -2.35937,0.64063 -2,0 -3.25,-1.39063 -1.23438,-1.40625 -1.23438,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76563,2.64062 0.76562,0.98438 2.03125,0.98438 1.29687,0 2.04687,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76562,-2.625 -0.76563,-1.01562 -2.04688,-1.01562 -1.25,0 -2.01562,1 -0.76563,0.98437 -0.76563,2.84375 z m 8.98365,-0.20313 q 0,-2.26562 1.07812,-3.64062 1.07813,-1.375 2.8125,-1.375 1.73438,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54687 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70313,0 -2.79688,-1.40625 -1.07812,-1.40625 -1.07812,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70312,0.9375 1.92187,0.9375 1.60938,0 2.34375,-1.4375 v -4.53125 q -0.76562,-1.39063 -2.32812,-1.39063 -1.23438,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path358" />
|
||||
<path
|
||||
fill="#d9d9d9"
|
||||
d="m 95.60105,380.54904 v 0 c 0,-13.08115 10.60436,-23.68552 23.68551,-23.68552 h 205.92032 c 6.2818,0 12.30627,2.49543 16.74817,6.93732 4.44192,4.4419 6.93735,10.4664 6.93735,16.7482 v 94.7392 c 0,13.08115 -10.60437,23.68552 -23.68552,23.68552 H 119.28656 c -13.08115,0 -23.68551,-10.60437 -23.68551,-23.68552 z"
|
||||
fill-rule="evenodd"
|
||||
id="path360" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="2"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 95.60105,380.54904 v 0 c 0,-13.08115 10.60436,-23.68552 23.68551,-23.68552 h 205.92032 c 6.2818,0 12.30627,2.49543 16.74817,6.93732 4.44192,4.4419 6.93735,10.4664 6.93735,16.7482 v 94.7392 c 0,13.08115 -10.60437,23.68552 -23.68552,23.68552 H 119.28656 c -13.08115,0 -23.68551,-10.60437 -23.68551,-23.68552 z"
|
||||
fill-rule="evenodd"
|
||||
id="path362" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 114.4133,380.8614 0.0469,1.23437 q 1.125,-1.40625 2.95312,-1.40625 3.125,0 3.15625,3.51563 v 6.51562 h -1.6875 v -6.51562 q -0.0156,-1.07813 -0.5,-1.57813 -0.46875,-0.51562 -1.48437,-0.51562 -0.8125,0 -1.4375,0.4375 -0.60938,0.4375 -0.96875,1.15625 v 7.01562 h -1.67188 v -9.85937 z m 8.24686,4.84375 q 0,-1.45313 0.5625,-2.60938 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39063 1.25,1.39062 1.25,3.70312 v 0.125 q 0,1.4375 -0.54688,2.57813 -0.54687,1.14062 -1.57812,1.78125 -1.01563,0.64062 -2.35938,0.64062 -2,0 -3.25,-1.39062 -1.23437,-1.40625 -1.23437,-3.70313 z m 1.6875,0.20312 q 0,1.64063 0.76562,2.64063 0.76563,0.98437 2.03125,0.98437 1.29688,0 2.04688,-1 0.75,-1.01562 0.75,-2.82812 0,-1.625 -0.76563,-2.625 -0.76562,-1.01563 -2.04687,-1.01563 -1.25,0 -2.01563,1 -0.76562,0.98438 -0.76562,2.84375 z m 8.98364,-0.20312 q 0,-2.26563 1.07812,-3.64063 1.07813,-1.375 2.8125,-1.375 1.73438,0 2.75,1.17188 v -5.14063 h 1.6875 v 14 h -1.54687 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70313,0 -2.79688,-1.40625 -1.07812,-1.40625 -1.07812,-3.65625 z m 1.6875,0.20312 q 0,1.67188 0.6875,2.625 0.70312,0.9375 1.92187,0.9375 1.60938,0 2.34375,-1.4375 v -4.53125 q -0.76562,-1.39062 -2.32812,-1.39062 -1.23438,0 -1.9375,0.95312 -0.6875,0.95313 -0.6875,2.84375 z m 13.33397,5 q -2,0 -3.26563,-1.3125 -1.25,-1.32812 -1.25,-3.53125 v -0.3125 q 0,-1.46875 0.5625,-2.60937 0.5625,-1.15625 1.5625,-1.79688 1.01563,-0.65625 2.1875,-0.65625 1.92188,0 2.98438,1.26563 1.0625,1.26562 1.0625,3.625 v 0.6875 h -6.67188 q 0.0312,1.46875 0.84375,2.375 0.82813,0.89062 2.07813,0.89062 0.89062,0 1.51562,-0.35937 0.625,-0.375 1.07813,-0.96875 l 1.03125,0.79687 q -1.23438,1.90625 -3.71875,1.90625 z m -0.20313,-8.84375 q -1.01562,0 -1.71875,0.75 -0.6875,0.73438 -0.84375,2.07813 h 4.9375 v -0.125 q -0.0781,-1.28125 -0.70312,-1.98438 -0.60938,-0.71875 -1.67188,-0.71875 z"
|
||||
fill-rule="nonzero"
|
||||
id="path364" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 200.92651,377.43307 h 63.53262 l 8.3414,8.34137 v 41.70587 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path366" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 200.92651,377.43307 h 63.53262 l 8.3414,8.34137 v 41.70587 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path368" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 219.52026,403.71124 q 0,2.25 -1.03125,3.625 -1.01563,1.375 -2.78125,1.375 -1.79688,0 -2.82813,-1.14063 v 4.75 h -1.67187 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85938,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67188,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70312,-0.96875 -1.95312,-0.96875 -1.53125,0 -2.29688,1.35938 v 4.70312 q 0.76563,1.35938 2.32813,1.35938 1.20312,0 1.92187,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45312 0.5625,-2.60937 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54688,2.57812 -0.54687,1.14063 -1.57812,1.78125 -1.01563,0.64063 -2.35938,0.64063 -2,0 -3.25,-1.39063 -1.23437,-1.40625 -1.23437,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76562,2.64062 0.76563,0.98438 2.03125,0.98438 1.29688,0 2.04688,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76563,-2.625 -0.76562,-1.01562 -2.04687,-1.01562 -1.25,0 -2.01563,1 -0.76562,0.98437 -0.76562,2.84375 z m 8.98364,-0.20313 q 0,-2.26562 1.07813,-3.64062 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39063 -2.32813,-1.39063 -1.23437,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path370" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 145.48819,305.3176 v 0 c 0,-108.95232 87.37144,-197.2756 195.1496,-197.2756 v 0 c 51.7569,0 101.39395,20.78433 137.99161,57.78069 36.59766,36.99635 57.15802,87.17416 57.15802,139.49492 v 0 c 0,108.9523 -87.37146,197.27557 -195.14963,197.27557 v 0 c -107.77815,0 -195.1496,-88.32327 -195.1496,-197.27557 z"
|
||||
fill-rule="evenodd"
|
||||
id="path372" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 145.48819,305.3176 v 0 c 0,-108.95232 87.37144,-197.2756 195.1496,-197.2756 v 0 c 51.7569,0 101.39395,20.78433 137.99161,57.78069 36.59766,36.99635 57.15802,87.17416 57.15802,139.49492 v 0 c 0,108.9523 -87.37146,197.27557 -195.14963,197.27557 v 0 c -107.77815,0 -195.1496,-88.32327 -195.1496,-197.27557 z"
|
||||
fill-rule="evenodd"
|
||||
id="path374" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 108.9895,417.0105 h 63.53264 l 8.34137,8.34137 v 41.70587 H 108.9895 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path376" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 108.9895,417.0105 h 63.53264 l 8.34137,8.34137 v 41.70587 H 108.9895 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path378" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 127.58325,443.2887 q 0,2.25 -1.03125,3.625 -1.01562,1.375 -2.78125,1.375 -1.79687,0 -2.82812,-1.14063 v 4.75 h -1.67188 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85937,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67187,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70313,-0.96875 -1.95313,-0.96875 -1.53125,0 -2.29687,1.35938 v 4.70312 q 0.76562,1.35938 2.32812,1.35938 1.20313,0 1.92188,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37306,0 q 0,-1.45312 0.5625,-2.60937 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54688,2.57812 -0.54687,1.14063 -1.57812,1.78125 -1.01563,0.64063 -2.35938,0.64063 -2,0 -3.25,-1.39063 -1.23437,-1.40625 -1.23437,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76562,2.64062 0.76563,0.98438 2.03125,0.98438 1.29688,0 2.04688,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76563,-2.625 -0.76562,-1.01562 -2.04687,-1.01562 -1.25,0 -2.01563,1 -0.76562,0.98437 -0.76562,2.84375 z m 8.98364,-0.20313 q 0,-2.26562 1.07813,-3.64062 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39063 -2.32813,-1.39063 -1.23437,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path380" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 87.96063,209.9895 h 63.53264 l 8.34137,8.34137 v 41.70587 H 87.96063 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path382" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 87.96063,209.9895 h 63.53264 l 8.34137,8.34137 v 41.70587 H 87.96063 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path384" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 106.55438,236.26768 q 0,2.25 -1.03125,3.625 -1.01563,1.375 -2.78125,1.375 -1.79688,0 -2.828125,-1.14062 v 4.75 H 98.24188 v -13.65625 h 1.53125 l 0.07813,1.09375 q 1.03124,-1.26563 2.85937,-1.26563 1.78125,0 2.8125,1.34375 1.03125,1.32813 1.03125,3.71875 z m -1.67188,-0.20312 q 0,-1.65625 -0.71875,-2.625 -0.70312,-0.96875 -1.95312,-0.96875 -1.53125,0 -2.296875,1.35937 v 4.70313 q 0.765625,1.35937 2.328125,1.35937 1.20312,0 1.92187,-0.95312 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45313 0.5625,-2.60938 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39063 1.25,1.39062 1.25,3.70312 v 0.125 q 0,1.4375 -0.54688,2.57813 -0.54687,1.14062 -1.57812,1.78125 -1.01563,0.64062 -2.35938,0.64062 -2,0 -3.25,-1.39062 -1.23437,-1.40625 -1.23437,-3.70313 z m 1.6875,0.20312 q 0,1.64063 0.76562,2.64063 0.76563,0.98437 2.03125,0.98437 1.29688,0 2.04688,-1 0.75,-1.01562 0.75,-2.82812 0,-1.625 -0.76563,-2.625 -0.76562,-1.01563 -2.04687,-1.01563 -1.25,0 -2.01563,1 -0.76562,0.98438 -0.76562,2.84375 z m 8.98364,-0.20312 q 0,-2.26563 1.07813,-3.64063 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17188 v -5.14063 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20312 q 0,1.67188 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39062 -2.32813,-1.39062 -1.23437,0 -1.9375,0.95312 -0.6875,0.95313 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path386" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 204.99213,171.03674 -81.10236,38.96063"
|
||||
fill-rule="evenodd"
|
||||
id="path388" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 204.99213,171.03674 -81.10236,38.96063"
|
||||
fill-rule="evenodd"
|
||||
id="path390" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 185.52843,162.97461 31.27559,49.10237"
|
||||
fill-rule="evenodd"
|
||||
id="path392" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 185.52843,162.97461 31.27559,49.10237"
|
||||
fill-rule="evenodd"
|
||||
id="path394" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 182.18898,171.03674 46.07873,5.60631"
|
||||
fill-rule="evenodd"
|
||||
id="path396" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 182.18898,171.03674 46.07873,5.60631"
|
||||
fill-rule="evenodd"
|
||||
id="path398" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 180.86351,442.03412 44,27.2756"
|
||||
fill-rule="evenodd"
|
||||
id="path400" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 180.86351,442.03412 44,27.2756"
|
||||
fill-rule="evenodd"
|
||||
id="path402" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="M 236.86351,427.48032 208.73753,469.3386"
|
||||
fill-rule="evenodd"
|
||||
id="path404" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="M 236.86351,427.48032 208.73753,469.3386"
|
||||
fill-rule="evenodd"
|
||||
id="path406" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 228.2021,461.26248 h 24.53543"
|
||||
fill-rule="evenodd"
|
||||
id="path408" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 228.2021,461.26248 h 24.53543"
|
||||
fill-rule="evenodd"
|
||||
id="path410" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 521.6842,219.98076 35.21259,20.59843"
|
||||
fill-rule="evenodd"
|
||||
id="path412" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 521.6842,219.98076 35.21259,20.59843"
|
||||
fill-rule="evenodd"
|
||||
id="path414" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 505.5599,219.98076 -33.10236,25.00789"
|
||||
fill-rule="evenodd"
|
||||
id="path416" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 505.5599,219.98076 -33.10236,25.00789"
|
||||
fill-rule="evenodd"
|
||||
id="path418" />
|
||||
<path
|
||||
fill="#0000ff"
|
||||
d="m 205.39896,461.26248 v 0 c 0,-6.29694 5.10466,-11.40158 11.40157,-11.40158 v 0 c 3.02389,0 5.92392,1.20123 8.06213,3.33945 2.13821,2.13818 3.33945,5.03823 3.33945,8.06213 v 0 c 0,6.29691 -5.10466,11.40155 -11.40158,11.40155 v 0 c -6.29691,0 -11.40157,-5.10465 -11.40157,-11.40155 z"
|
||||
fill-rule="evenodd"
|
||||
id="path420" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 208.73839,453.20035 16.12427,16.12424 m 0,-16.12424 -16.12427,16.12424"
|
||||
fill-rule="evenodd"
|
||||
id="path422" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 205.39896,461.26248 v 0 c 0,-6.29694 5.10466,-11.40158 11.40157,-11.40158 v 0 c 3.02389,0 5.92392,1.20123 8.06213,3.33945 2.13821,2.13818 3.33945,5.03823 3.33945,8.06213 v 0 c 0,6.29691 -5.10466,11.40155 -11.40158,11.40155 v 0 c -6.29691,0 -11.40157,-5.10465 -11.40157,-11.40155 z"
|
||||
fill-rule="evenodd"
|
||||
id="path424" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 208.73839,453.20035 16.12427,16.12424 m 0,-16.12424 -16.12427,16.12424"
|
||||
fill-rule="evenodd"
|
||||
id="path426" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 205.39896,461.26248 v 0 c 0,-6.29694 5.10466,-11.40158 11.40157,-11.40158 v 0 c 3.02389,0 5.92392,1.20123 8.06213,3.33945 2.13821,2.13818 3.33945,5.03823 3.33945,8.06213 v 0 c 0,6.29691 -5.10466,11.40155 -11.40158,11.40155 v 0 c -6.29691,0 -11.40157,-5.10465 -11.40157,-11.40155 z"
|
||||
fill-rule="evenodd"
|
||||
id="path428" />
|
||||
<path
|
||||
fill="#0000ff"
|
||||
d="m 182.18898,171.03674 v 0 c 0,-6.29692 5.10466,-11.40157 11.40157,-11.40157 v 0 c 3.02389,0 5.92392,1.20124 8.06213,3.33944 2.13821,2.13821 3.33945,5.03825 3.33945,8.06213 v 0 c 0,6.29692 -5.10466,11.40158 -11.40158,11.40158 v 0 c -6.29691,0 -11.40157,-5.10466 -11.40157,-11.40158 z"
|
||||
fill-rule="evenodd"
|
||||
id="path430" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 185.52843,162.97461 16.12425,16.12427 m 0,-16.12427 -16.12425,16.12427"
|
||||
fill-rule="evenodd"
|
||||
id="path432" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 182.18898,171.03674 v 0 c 0,-6.29692 5.10466,-11.40157 11.40157,-11.40157 v 0 c 3.02389,0 5.92392,1.20124 8.06213,3.33944 2.13821,2.13821 3.33945,5.03825 3.33945,8.06213 v 0 c 0,6.29692 -5.10466,11.40158 -11.40158,11.40158 v 0 c -6.29691,0 -11.40157,-5.10466 -11.40157,-11.40158 z"
|
||||
fill-rule="evenodd"
|
||||
id="path434" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 185.52843,162.97461 16.12425,16.12427 m 0,-16.12427 -16.12425,16.12427"
|
||||
fill-rule="evenodd"
|
||||
id="path436" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 182.18898,171.03674 v 0 c 0,-6.29692 5.10466,-11.40157 11.40157,-11.40157 v 0 c 3.02389,0 5.92392,1.20124 8.06213,3.33944 2.13821,2.13821 3.33945,5.03825 3.33945,8.06213 v 0 c 0,6.29692 -5.10466,11.40158 -11.40158,11.40158 v 0 c -6.29691,0 -11.40157,-5.10466 -11.40157,-11.40158 z"
|
||||
fill-rule="evenodd"
|
||||
id="path438" />
|
||||
<path
|
||||
fill="#0000ff"
|
||||
d="m 502.22046,211.91864 v 0 c 0,-6.29692 5.10468,-11.40158 11.40161,-11.40158 v 0 c 3.02387,0 5.92389,1.20123 8.06214,3.33945 2.13818,2.13821 3.33941,5.03823 3.33941,8.06213 v 0 c 0,6.29691 -5.10467,11.40157 -11.40155,11.40157 v 0 c -6.29693,0 -11.40161,-5.10466 -11.40161,-11.40157 z"
|
||||
fill-rule="evenodd"
|
||||
id="path440" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 505.5599,203.8565 16.1243,16.12425 m 0,-16.12425 -16.1243,16.12425"
|
||||
fill-rule="evenodd"
|
||||
id="path442" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 502.22046,211.91864 v 0 c 0,-6.29692 5.10468,-11.40158 11.40161,-11.40158 v 0 c 3.02387,0 5.92389,1.20123 8.06214,3.33945 2.13818,2.13821 3.33941,5.03823 3.33941,8.06213 v 0 c 0,6.29691 -5.10467,11.40157 -11.40155,11.40157 v 0 c -6.29693,0 -11.40161,-5.10466 -11.40161,-11.40157 z"
|
||||
fill-rule="evenodd"
|
||||
id="path444" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 505.5599,203.8565 16.1243,16.12425 m 0,-16.12425 -16.1243,16.12425"
|
||||
fill-rule="evenodd"
|
||||
id="path446" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 502.22046,211.91864 v 0 c 0,-6.29692 5.10468,-11.40158 11.40161,-11.40158 v 0 c 3.02387,0 5.92389,1.20123 8.06214,3.33945 2.13818,2.13821 3.33941,5.03823 3.33941,8.06213 v 0 c 0,6.29691 -5.10467,11.40157 -11.40155,11.40157 v 0 c -6.29693,0 -11.40161,-5.10466 -11.40161,-11.40157 z"
|
||||
fill-rule="evenodd"
|
||||
id="path448" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 520.96063,240.58267 h 63.53265 l 8.34137,8.34138 v 41.70586 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path450" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 520.96063,240.58267 h 63.53265 l 8.34137,8.34138 v 41.70586 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path452" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 539.5544,266.86087 q 0,2.25 -1.03125,3.625 -1.01563,1.375 -2.78125,1.375 -1.79688,0 -2.82813,-1.14063 v 4.75 h -1.67187 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85938,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67188,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70312,-0.96875 -1.95312,-0.96875 -1.53125,0 -2.29688,1.35938 v 4.70312 q 0.76563,1.35938 2.32813,1.35938 1.20312,0 1.92187,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37305,0 q 0,-1.45312 0.5625,-2.60937 0.57813,-1.15625 1.59375,-1.78125 1.01563,-0.625 2.3125,-0.625 2.01563,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54687,2.57812 -0.54688,1.14063 -1.57813,1.78125 -1.01562,0.64063 -2.35937,0.64063 -2,0 -3.25,-1.39063 -1.23438,-1.40625 -1.23438,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76563,2.64062 0.76562,0.98438 2.03125,0.98438 1.29687,0 2.04687,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76562,-2.625 -0.76563,-1.01562 -2.04688,-1.01562 -1.25,0 -2.01562,1 -0.76563,0.98437 -0.76563,2.84375 z m 8.98364,-0.20313 q 0,-2.26562 1.07813,-3.64062 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39063 -2.32813,-1.39063 -1.23437,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path454" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 252.73753,436.23883 h 63.53264 l 8.34137,8.3414 v 41.70587 h -71.87401 z"
|
||||
fill-rule="evenodd"
|
||||
id="path456" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 252.73753,436.23883 h 63.53264 l 8.34137,8.3414 v 41.70587 h -71.87401 z"
|
||||
fill-rule="evenodd"
|
||||
id="path458" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 271.33127,462.51703 q 0,2.25 -1.03125,3.625 -1.01562,1.375 -2.78125,1.375 -1.79687,0 -2.82812,-1.14063 v 4.75 h -1.67188 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85937,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67187,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70313,-0.96875 -1.95313,-0.96875 -1.53125,0 -2.29687,1.35938 v 4.70312 q 0.76562,1.35938 2.32812,1.35938 1.20313,0 1.92188,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45312 0.5625,-2.60937 0.57813,-1.15625 1.59375,-1.78125 1.01563,-0.625 2.3125,-0.625 2.01563,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54687,2.57812 -0.54688,1.14063 -1.57813,1.78125 -1.01562,0.64063 -2.35937,0.64063 -2,0 -3.25,-1.39063 -1.23438,-1.40625 -1.23438,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76563,2.64062 0.76562,0.98438 2.03125,0.98438 1.29687,0 2.04687,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76562,-2.625 -0.76563,-1.01562 -2.04688,-1.01562 -1.25,0 -2.01562,1 -0.76563,0.98437 -0.76563,2.84375 z m 8.98365,-0.20313 q 0,-2.26562 1.07812,-3.64062 1.07813,-1.375 2.8125,-1.375 1.73438,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54687 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70313,0 -2.79688,-1.40625 -1.07812,-1.40625 -1.07812,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70312,0.9375 1.92187,0.9375 1.60938,0 2.34375,-1.4375 v -4.53125 q -0.76562,-1.39063 -2.32812,-1.39063 -1.23438,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path460" />
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 39 KiB |
587
slides/images/k8s-net-2-pod-to-svc.svg
Normal file
|
After Width: | Height: | Size: 57 KiB |
493
slides/images/k8s-net-3-netpol.svg
Normal file
|
After Width: | Height: | Size: 55 KiB |
1108
slides/images/k8s-net-4-overview.svg
Normal file
|
After Width: | Height: | Size: 110 KiB |
@@ -1,3 +1,48 @@
|
||||
- date: [2021-02-08, 2021-02-10]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2021-02-15, 2021-02-18]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2021-02-22, 2021-02-23]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Packaging et CI/CD pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2021-02-24, 2021-02-26]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2021-03-01, 2021-03-02]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-10-05, 2020-10-06]
|
||||
country: www
|
||||
city: streaming
|
||||
|
||||
104
slides/k8s/access-eks-cluster.md
Normal file
@@ -0,0 +1,104 @@
|
||||
## Accessing our EKS cluster
|
||||
|
||||
- We also have a shared EKS cluster
|
||||
|
||||
- With individual IAM users
|
||||
|
||||
- Let's connect to this cluster!
|
||||
|
||||
---
|
||||
|
||||
## What we need
|
||||
|
||||
- `kubectl` (obviously!)
|
||||
|
||||
- `aws` CLI (recent-ish version)
|
||||
|
||||
(or `aws` CLI + `aws-iam-authenticator` plugin)
|
||||
|
||||
- AWS API access key and secret access key
|
||||
|
||||
- AWS region
|
||||
|
||||
- EKS cluster name
|
||||
|
||||
---
|
||||
|
||||
## Setting up AWS credentials
|
||||
|
||||
- There are many ways to do this
|
||||
|
||||
- We're going to use environment variables
|
||||
|
||||
- You're welcome to use whatever you like (e.g. AWS profiles)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Set the AWS region, API access key, and secret key:
|
||||
```bash
|
||||
export AWS_DEFAULT_REGION=`us-east-2`
|
||||
export AWS_ACCESS_KEY_ID=`AKI...`
|
||||
export AWS_SECRET_ACCESS_KEY=`xyz123...`
|
||||
```
|
||||
|
||||
- Check that the AWS API recognizes us:
|
||||
```bash
|
||||
aws sts get-caller-identity
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating our kubeconfig file
|
||||
|
||||
- Now we can use the AWS CLI to:
|
||||
|
||||
- obtain the Kubernetes API address
|
||||
|
||||
- register it in our kubeconfig file
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update our kubeconfig file:
|
||||
```bash
|
||||
aws eks update-kubeconfig --name `fancy-clustername-1234`
|
||||
```
|
||||
|
||||
- Run some harmless command:
|
||||
```bash
|
||||
kubectl version
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Our resources
|
||||
|
||||
- We have the following permissions:
|
||||
|
||||
- `view` in the `default` namespace
|
||||
|
||||
- `edit` in the `container-training` namespace
|
||||
|
||||
- `admin` in our personal namespace
|
||||
|
||||
- Our personal namespace is our IAM user name
|
||||
|
||||
(but with dots replaced with dashes)
|
||||
|
||||
- For instance, user `ada.lovelace` has namespace `ada-lovelace`
|
||||
|
||||
---
|
||||
|
||||
## Deploying things
|
||||
|
||||
- Let's deploy DockerCoins in our personal namespace!
|
||||
|
||||
- Expose the Web UI with a `LoadBalancer` service
|
||||
|
||||
???
|
||||
|
||||
:EN:- Working with an EKS cluster
|
||||
:FR:- Travailler avec un cluster EKS
|
||||
@@ -1,41 +1,13 @@
|
||||
# Accessing internal services
|
||||
|
||||
- When we are logged in on a cluster node, we can access internal services
|
||||
|
||||
(by virtue of the Kubernetes network model: all nodes can reach all pods and services)
|
||||
|
||||
- When we are accessing a remote cluster, things are different
|
||||
|
||||
(generally, our local machine won't have access to the cluster's internal subnet)
|
||||
|
||||
- How can we temporarily access a service without exposing it to everyone?
|
||||
|
||||
--
|
||||
|
||||
- `kubectl proxy`: gives us access to the API, which includes a proxy for HTTP resources
|
||||
|
||||
- `kubectl port-forward`: allows forwarding of TCP ports to arbitrary pods, services, ...
|
||||
|
||||
---
|
||||
|
||||
## Suspension of disbelief
|
||||
|
||||
The exercises in this section assume that we have set up `kubectl` on our
|
||||
local machine in order to access a remote cluster.
|
||||
|
||||
We will therefore show how to access services and pods of the remote cluster,
|
||||
from our local machine.
|
||||
|
||||
You can also run these exercises directly on the cluster (if you haven't
|
||||
installed and set up `kubectl` locally).
|
||||
|
||||
Running commands locally will be less useful
|
||||
(since you could access services and pods directly),
|
||||
but keep in mind that these commands will work anywhere as long as you have
|
||||
installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
---
|
||||
|
||||
## `kubectl proxy` in theory
|
||||
|
||||
- Running `kubectl proxy` gives us access to the entire Kubernetes API
|
||||
@@ -56,7 +28,7 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
## `kubectl proxy` in practice
|
||||
|
||||
- Let's access the `webui` service through `kubectl proxy`
|
||||
- Let's access the `web` service through `kubectl proxy`
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -65,9 +37,9 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
kubectl proxy &
|
||||
```
|
||||
|
||||
- Access the `webui` service:
|
||||
- Access the `web` service:
|
||||
```bash
|
||||
curl localhost:8001/api/v1/namespaces/default/services/webui/proxy/index.html
|
||||
curl localhost:8001/api/v1/namespaces/default/services/web/proxy/
|
||||
```
|
||||
|
||||
- Terminate the proxy:
|
||||
@@ -99,22 +71,20 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
## `kubectl port-forward` in practice
|
||||
|
||||
- Let's access our remote Redis server
|
||||
- Let's access our remote NGINX server
|
||||
|
||||
.exercise[
|
||||
|
||||
- Forward connections from local port 10000 to remote port 6379:
|
||||
- Forward connections from local port 1234 to remote port 80:
|
||||
```bash
|
||||
kubectl port-forward svc/redis 10000:6379 &
|
||||
kubectl port-forward svc/web 1234:80 &
|
||||
```
|
||||
|
||||
- Connect to the Redis server:
|
||||
- Connect to the NGINX server:
|
||||
```bash
|
||||
telnet localhost 10000
|
||||
curl localhost:1234
|
||||
```
|
||||
|
||||
- Issue a few commands, e.g. `INFO server` then `QUIT`
|
||||
|
||||
<!--
|
||||
```wait Connected to localhost```
|
||||
```keys INFO server```
|
||||
@@ -134,3 +104,17 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
:EN:- Securely accessing internal services
|
||||
:FR:- Accès sécurisé aux services internes
|
||||
|
||||
:T: Accessing internal services from our local machine
|
||||
|
||||
:Q: What's the advantage of "kubectl port-forward" compared to a NodePort?
|
||||
:A: It can forward arbitrary protocols
|
||||
:A: It doesn't require Kubernetes API credentials
|
||||
:A: It offers deterministic load balancing (instead of random)
|
||||
:A: ✔️It doesn't expose the service to the public
|
||||
|
||||
:Q: What's the security concept behind "kubectl port-forward"?
|
||||
:A: ✔️We authenticate with the Kubernetes API, and it forwards connections on our behalf
|
||||
:A: It detects our source IP address, and only allows connections coming from it
|
||||
:A: It uses end-to-end mTLS (mutual TLS) to authenticate our connections
|
||||
:A: There is no security (as long as it's running, anyone can connect from anywhere)
|
||||
|
||||
549
slides/k8s/admission.md
Normal file
@@ -0,0 +1,549 @@
|
||||
# Dynamic Admission Control
|
||||
|
||||
- This is one of the many ways to extend the Kubernetes API
|
||||
|
||||
- High level summary: dynamic admission control relies on webhooks that are ...
|
||||
|
||||
- dynamic (can be added/removed on the fly)
|
||||
|
||||
- running inside our outside the cluster
|
||||
|
||||
- *validating* (yay/nay) or *mutating* (can change objects that are created/updated)
|
||||
|
||||
- selective (can be configured to apply only to some kinds, some selectors...)
|
||||
|
||||
- mandatory or optional (should it block operations when webhook is down?)
|
||||
|
||||
- Used for themselves (e.g. policy enforcement) or as part of operators
|
||||
|
||||
---
|
||||
|
||||
## Use cases
|
||||
|
||||
Some examples ...
|
||||
|
||||
- Stand-alone admission controllers
|
||||
|
||||
*validating:* policy enforcement (e.g. quotas, naming conventions ...)
|
||||
|
||||
*mutating:* inject or provide default values (e.g. pod presets)
|
||||
|
||||
- Admission controllers part of a greater system
|
||||
|
||||
*validating:* advanced typing for operators
|
||||
|
||||
*mutating:* inject sidecars for service meshes
|
||||
|
||||
---
|
||||
|
||||
## You said *dynamic?*
|
||||
|
||||
- Some admission controllers are built in the API server
|
||||
|
||||
- They are enabled/disabled through Kubernetes API server configuration
|
||||
|
||||
(e.g. `--enable-admission-plugins`/`--disable-admission-plugins` flags)
|
||||
|
||||
- Here, we're talking about *dynamic* admission controllers
|
||||
|
||||
- They can be added/remove while the API server is running
|
||||
|
||||
(without touching the configuration files or even having access to them)
|
||||
|
||||
- This is done through two kinds of cluster-scope resources:
|
||||
|
||||
ValidatingWebhookConfiguration and MutatingWebhookConfiguration
|
||||
|
||||
---
|
||||
|
||||
## You said *webhooks?*
|
||||
|
||||
- A ValidatingWebhookConfiguration or MutatingWebhookConfiguration contains:
|
||||
|
||||
- a resource filter
|
||||
<br/>
|
||||
(e.g. "all pods", "deployments in namespace xyz", "everything"...)
|
||||
|
||||
- an operations filter
|
||||
<br/>
|
||||
(e.g. CREATE, UPDATE, DELETE)
|
||||
|
||||
- the address of the webhook server
|
||||
|
||||
- Each time an operation matches the filters, it is sent to the webhook server
|
||||
|
||||
---
|
||||
|
||||
## What gets sent exactly?
|
||||
|
||||
- The API server will `POST` a JSON object to the webhook
|
||||
|
||||
- That object will be a Kubernetes API message with `kind` `AdmissionReview`
|
||||
|
||||
- It will contain a `request` field, with, notably:
|
||||
|
||||
- `request.uid` (to be used when replying)
|
||||
|
||||
- `request.object` (the object created/deleted/changed)
|
||||
|
||||
- `request.oldObject` (when an object is modified)
|
||||
|
||||
- `request.userInfo` (who was making the request to the API in the first place)
|
||||
|
||||
(See [the documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#request) for a detailed example showing more fields.)
|
||||
|
||||
---
|
||||
|
||||
## How should the webhook respond?
|
||||
|
||||
- By replying with another `AdmissionReview` in JSON
|
||||
|
||||
- It should have a `response` field, with, notably:
|
||||
|
||||
- `response.uid` (matching the `request.uid`)
|
||||
|
||||
- `response.allowed` (`true`/`false`)
|
||||
|
||||
- `response.status.message` (optional string; useful when denying requests)
|
||||
|
||||
- `response.patchType` (when a mutating webhook changes the object; e.g. `json`)
|
||||
|
||||
- `response.patch` (the patch, encoded in base64)
|
||||
|
||||
---
|
||||
|
||||
## What if the webhook *does not* respond?
|
||||
|
||||
- If "something bad" happens, the API server follows the `failurePolicy` option
|
||||
|
||||
- this is a per-webhook option (specified in the webhook configuration)
|
||||
|
||||
- it can be `Fail` (the default) or `Ignore` ("allow all, unmodified")
|
||||
|
||||
- What's "something bad"?
|
||||
|
||||
- webhook responds with something invalid
|
||||
|
||||
- webhook takes more than 10 seconds to respond
|
||||
<br/>
|
||||
(this can be changed with `timeoutSeconds` field in the webhook config)
|
||||
|
||||
- webhook is down or has invalid certificates
|
||||
<br/>
|
||||
(TLS! It's not just a good idea; for admission control, it's the law!)
|
||||
|
||||
---
|
||||
|
||||
## What did you say about TLS?
|
||||
|
||||
- The webhook configuration can indicate:
|
||||
|
||||
- either `url` of the webhook server (has to begin with `https://`)
|
||||
|
||||
- or `service.name` and `service.namespace` of a Service on the cluster
|
||||
|
||||
- In the latter case, the Service has to accept TLS connections on port 443
|
||||
|
||||
- It has to use a certificate with CN `<name>.<namespace>.svc`
|
||||
|
||||
(**and** a `subjectAltName` extension with `DNS:<name>.<namespace>.svc`)
|
||||
|
||||
- The certificate needs to be valid (signed by a CA trusted by the API server)
|
||||
|
||||
... alternatively, we can pass a `caBundle` in the webhook configuration
|
||||
|
||||
---
|
||||
|
||||
## Webhook server inside or outside
|
||||
|
||||
- "Outside" webhook server is defined with `url` option
|
||||
|
||||
- convenient for external webooks (e.g. tamper-resistent audit trail)
|
||||
|
||||
- also great for initial development (e.g. with ngrok)
|
||||
|
||||
- requires outbound connectivity (duh) and can become a SPOF
|
||||
|
||||
- "Inside" webhook server is defined with `service` option
|
||||
|
||||
- convenient when the webhook needs to be deployed and managed on the cluster
|
||||
|
||||
- also great for air gapped clusters
|
||||
|
||||
- development can be harder (but tools like [Tilt](https://tilt.dev) can help)
|
||||
|
||||
---
|
||||
|
||||
## Developing a simple admission webhook
|
||||
|
||||
- We're going to register a custom webhook!
|
||||
|
||||
- First, we'll just dump the `AdmissionRequest` object
|
||||
|
||||
(using a little Node app)
|
||||
|
||||
- Then, we'll implement a strict policy on a specific label
|
||||
|
||||
(using a little Flask app)
|
||||
|
||||
- Development will happen in local containers, plumbed with ngrok
|
||||
|
||||
- The we will deploy to the cluster 🔥
|
||||
|
||||
---
|
||||
|
||||
## Running the webhook locally
|
||||
|
||||
- We prepared a Docker Compose file to start the whole stack
|
||||
|
||||
(the Node "echo" app, the Flask app, and one ngrok tunnel for each of them)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the webhook directory:
|
||||
```bash
|
||||
cd ~/container.training/webhooks/admission
|
||||
```
|
||||
|
||||
- Start the webhook in Docker containers:
|
||||
```bash
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
*Note the URL in `ngrok-echo_1` looking like `url=https://xxxx.ngrok.io`.*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's ngrok?
|
||||
|
||||
- Ngrok provides secure tunnels to access local services
|
||||
|
||||
- Example: run `ngrok http 1234`
|
||||
|
||||
- `ngrok` will display a publicly-available URL (e.g. https://xxxxyyyyzzzz.ngrok.io)
|
||||
|
||||
- Connections to https://xxxxyyyyzzzz.ngrok.io will terminate at `localhost:1234`
|
||||
|
||||
- Basic product is free; extra features (vanity domains, end-to-end TLS...) for $$$
|
||||
|
||||
- Perfect to develop our webhook!
|
||||
|
||||
- Probably not for production, though
|
||||
|
||||
(webhook requests and responses now pass through the ngrok platform)
|
||||
|
||||
---
|
||||
|
||||
## Update the webhook configuration
|
||||
|
||||
- We have a webhook configuration in `k8s/webhook-configuration.yaml`
|
||||
|
||||
- We need to update the configuration with the correct `url`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the webhook configuration manifest:
|
||||
```bash
|
||||
vim k8s/webhook-configuration.yaml
|
||||
```
|
||||
|
||||
- **Uncomment** the `url:` line
|
||||
|
||||
- **Update** the `.ngrok.io` URL with the URL shown by Compose
|
||||
|
||||
- Save and quit
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Register the webhook configuration
|
||||
|
||||
- Just after we register the webhook, it will be called for each matching request
|
||||
|
||||
(CREATE and UPDATE on Pods in all namespaces)
|
||||
|
||||
- The `failurePolicy` is `Ignore`
|
||||
|
||||
(so if the webhook server is down, we can still create pods)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Register the webhook:
|
||||
```bash
|
||||
kubectl apply -f k8s/webhook-configuration.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
It is strongly recommended to tail the logs of the API server while doing that.
|
||||
|
||||
---
|
||||
|
||||
## Create a pod
|
||||
|
||||
- Let's create a pod and try to set a `color` label
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a pod named `chroma`:
|
||||
```bash
|
||||
kubectl run --restart=Never chroma --image=nginx
|
||||
```
|
||||
|
||||
- Add a label `color` set to `pink`:
|
||||
```bash
|
||||
kubectl label pod chroma color=pink
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see the `AdmissionReview` objects in the Compose logs.
|
||||
|
||||
Note: the webhook doesn't do anything (other than printing the request payload).
|
||||
|
||||
---
|
||||
|
||||
## Use the "real" admission webhook
|
||||
|
||||
- We have a small Flask app implementing a particular policy on pod labels:
|
||||
|
||||
- if a pod sets a label `color`, it must be `blue`, `green`, `red`
|
||||
|
||||
- once that `color` label is set, it cannot be removed or changed
|
||||
|
||||
- That Flask app was started when we did `docker-compose up` earlier
|
||||
|
||||
- It is exposed through its own ngrok tunnel
|
||||
|
||||
- We are going to use that webhook instead of the other one
|
||||
|
||||
(by changing only the `url` field in the ValidatingWebhookConfiguration)
|
||||
|
||||
---
|
||||
|
||||
## Update the webhook configuration
|
||||
|
||||
.exercise[
|
||||
|
||||
- First, check the ngrok URL of the tunnel for the Flask app:
|
||||
```bash
|
||||
docker-compose logs ngrok-flask
|
||||
```
|
||||
|
||||
- Then, edit the webhook configuration:
|
||||
```bash
|
||||
kubectl edit validatingwebhookconfiguration admission.container.training
|
||||
```
|
||||
- Find the `url:` field with the `.ngrok.io` URL and update it
|
||||
|
||||
- Save and quit; the new configuration is applied immediately
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Verify the behavior of the webhook
|
||||
|
||||
- Try to create a few pods and/or change labels on existing pods
|
||||
|
||||
- What happens if we try to make changes to the earlier pod?
|
||||
|
||||
(the one that has `label=pink`)
|
||||
|
||||
---
|
||||
|
||||
## Deploying the webhook on the cluster
|
||||
|
||||
- Let's see what's needed to self-host the webhook server!
|
||||
|
||||
- The webhook needs to be reachable through a Service on our cluster
|
||||
|
||||
- The Service needs to accept TLS connections on port 443
|
||||
|
||||
- We need a proper TLS certificate:
|
||||
|
||||
- with the right `CN` and `subjectAltName` (`<servicename>.<namespace>.svc`)
|
||||
|
||||
- signed by a trusted CA
|
||||
|
||||
- We can either use a "real" CA, or use the `caBundle` option to specify the CA cert
|
||||
|
||||
(the latter makes it easy to use self-signed certs)
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- We're going to generate a key pair and a self-signed certificate
|
||||
|
||||
- We will store them in a Secret
|
||||
|
||||
- We will run the webhook in a Deployment, exposed with a Service
|
||||
|
||||
- We will update the webhook configuration to use that Service
|
||||
|
||||
- The Service will be named `admission`, in Namespace `webhooks`
|
||||
|
||||
(keep in mind that the ValidatingWebhookConfiguration itself is at cluster scope)
|
||||
|
||||
---
|
||||
|
||||
## Let's get to work!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Make sure we're in the right directory:
|
||||
```bash
|
||||
cd ~/container.training/webhooks/admission
|
||||
```
|
||||
|
||||
- Create the namespace:
|
||||
```bash
|
||||
kubectl create namespace webhooks
|
||||
```
|
||||
|
||||
- Switch to the namespace:
|
||||
```bash
|
||||
kubectl config set-context --current --namespace=webhooks
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the webhook
|
||||
|
||||
- *Normally,* we would author an image for this
|
||||
|
||||
- Since our webhook is just *one* Python source file ...
|
||||
|
||||
... we'll store it in a ConfigMap, and install dependencies on the fly
|
||||
|
||||
.exercise[
|
||||
|
||||
- Load the webhook source in a ConfigMap:
|
||||
```bash
|
||||
kubectl create configmap admission --from-file=flask/webhook.py
|
||||
```
|
||||
|
||||
- Create the Deployment and Service:
|
||||
```bash
|
||||
kubectl apply -f k8s/webhook-server.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Generating the key pair and certificate
|
||||
|
||||
- Let's call OpenSSL to the rescue!
|
||||
|
||||
(of course, there are plenty others options; e.g. `cfssl`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate a self-signed certificate:
|
||||
```bash
|
||||
NAMESPACE=webhooks
|
||||
SERVICE=admission
|
||||
CN=$SERVICE.$NAMESPACE.svc
|
||||
openssl req -x509 -newkey rsa:4096 -nodes -keyout key.pem -out cert.pem \
|
||||
-days 30 -subj /CN=$CN -addext subjectAltName=DNS:$CN
|
||||
```
|
||||
|
||||
- Load up the key and cert in a Secret:
|
||||
```bash
|
||||
kubectl create secret tls admission --cert=cert.pem --key=key.pem
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Update the webhook configuration
|
||||
|
||||
- Let's reconfigure the webhook to use our Service instead of ngrok
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the webhook configuration manifest:
|
||||
```bash
|
||||
vim k8s/webhook-configuration.yaml
|
||||
```
|
||||
|
||||
- Comment out the `url:` line
|
||||
|
||||
- Uncomment the `service:` section
|
||||
|
||||
- Save, quit
|
||||
|
||||
- Update the webhook configuration:
|
||||
```bash
|
||||
kubectl apply -f k8s/webhook-configuration.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Add our self-signed cert to the `caBundle`
|
||||
|
||||
- The API server won't accept our self-signed certificate
|
||||
|
||||
- We need to add it to the `caBundle` field in the webhook configuration
|
||||
|
||||
- The `caBundle` will be our `cert.pem` file, encoded in base64
|
||||
|
||||
---
|
||||
|
||||
Shell to the rescue!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Load up our cert and encode it in base64:
|
||||
```bash
|
||||
CA=$(base64 -w0 < cert.pem)
|
||||
```
|
||||
|
||||
- Define a patch operation to update the `caBundle`:
|
||||
```bash
|
||||
PATCH='[{
|
||||
"op": "replace",
|
||||
"path": "/webhooks/0/clientConfig/caBundle",
|
||||
"value":"'$CA'"
|
||||
}]'
|
||||
```
|
||||
|
||||
- Patch the webhook configuration:
|
||||
```bash
|
||||
kubectl patch validatingwebhookconfiguration \
|
||||
admission.webhook.container.training \
|
||||
--type='json' -p="$PATCH"
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Try it out!
|
||||
|
||||
- Keep an eye on the API server logs
|
||||
|
||||
- Tail the logs of the pod running the webhook server
|
||||
|
||||
- Create a few pods; we should see requests in the webhook server logs
|
||||
|
||||
- Check that the label `color` is enforced correctly
|
||||
|
||||
(it should only allow values of `red`, `green`, `blue`)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Dynamic admission control with webhooks
|
||||
:FR:- Contrôle d'admission dynamique (webhooks)
|
||||
394
slides/k8s/aggregation-layer.md
Normal file
@@ -0,0 +1,394 @@
|
||||
# The Aggregation Layer
|
||||
|
||||
- The aggregation layer is a way to extend the Kubernetes API
|
||||
|
||||
- It is similar to CRDs
|
||||
|
||||
- it lets us define new resource types
|
||||
|
||||
- these resources can then be used with `kubectl` and other clients
|
||||
|
||||
- The implementation is very different
|
||||
|
||||
- CRDs are handled within the API server
|
||||
|
||||
- the aggregation layer offloads requests to another process
|
||||
|
||||
- They are designed for very different use-cases
|
||||
|
||||
---
|
||||
|
||||
## CRDs vs aggregation layer
|
||||
|
||||
- The Kubernetes API is a REST-ish API with a hierarchical structure
|
||||
|
||||
- It can be extended with Custom Resource Definifions (CRDs)
|
||||
|
||||
- Custom resources are managed by the Kubernetes API server
|
||||
|
||||
- we don't need to write code
|
||||
|
||||
- the API server does all the heavy lifting
|
||||
|
||||
- these resources are persisted in Kubernetes' "standard" database
|
||||
<br/>
|
||||
(for most installations, that's `etcd`)
|
||||
|
||||
- We can also define resources that are *not* managed by the API server
|
||||
|
||||
(the API server merely proxies the requests to another server)
|
||||
|
||||
---
|
||||
|
||||
## Which one is best?
|
||||
|
||||
- For things that "map" well to objects stored in a traditional database:
|
||||
|
||||
*probably CRDs*
|
||||
|
||||
- For things that "exist" only in Kubernetes and don't represent external resources:
|
||||
|
||||
*probably CRDs*
|
||||
|
||||
- For things that are read-only, at least from Kubernetes' perspective:
|
||||
|
||||
*probably aggregation layer*
|
||||
|
||||
- For things that can't be stored in etcd because of size or access patterns:
|
||||
|
||||
*probably aggregation layer*
|
||||
|
||||
---
|
||||
|
||||
## How are resources organized?
|
||||
|
||||
- Let's have a look at the Kubernetes API hierarchical structure
|
||||
|
||||
- We'll ask `kubectl` to show us the exacts requests that it's making
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the URI for a cluster-scope, "core" resource, e.g. a Node:
|
||||
```bash
|
||||
kubectl -v6 get node node1
|
||||
```
|
||||
|
||||
- Check the URI for a cluster-scope, "non-core" resource, e.g. a ClusterRole:
|
||||
```bash
|
||||
kubectl -v6 get clusterrole view
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Core vs non-core
|
||||
|
||||
- This is the structure of the URIs that we just checked:
|
||||
|
||||
```
|
||||
/api/v1/nodes/node1
|
||||
↑ ↑ ↑
|
||||
`version` `kind` `name`
|
||||
|
||||
/apis/rbac.authorization.k8s.io/v1/clusterroles/view
|
||||
↑ ↑ ↑ ↑
|
||||
`group` `version` `kind` `name`
|
||||
```
|
||||
|
||||
- There is no group for "core" resources
|
||||
|
||||
- Or, we could say that the group, `core`, is implied
|
||||
|
||||
---
|
||||
|
||||
## Group-Version-Kind
|
||||
|
||||
- In the API server, the Group-Version-Kind triple maps to a Go type
|
||||
|
||||
(look for all the "GVK" occurrences in the source code!)
|
||||
|
||||
- In the API server URI router, the GVK is parsed "relatively early"
|
||||
|
||||
(so that the server can know which resource we're talking about)
|
||||
|
||||
- "Well, actually ..." Things are a bit more complicated, see next slides!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Namespaced resources
|
||||
|
||||
- What about namespaced resources?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the URI for a namespaced, "core" resource, e.g. a Service:
|
||||
```bash
|
||||
kubectl -v6 get service kubernetes --namespace default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- Here are what namespaced resources URIs look like:
|
||||
|
||||
```
|
||||
/api/v1/namespaces/default/services/kubernetes
|
||||
↑ ↑ ↑ ↑
|
||||
`version` `namespace` `kind` `name`
|
||||
|
||||
/apis/apps/v1/namespaces/kube-system/daemonsets/kube-proxy
|
||||
↑ ↑ ↑ ↑ ↑
|
||||
`group` `version` `namespace` `kind` `name`
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Subresources
|
||||
|
||||
- Many resources have *subresources*, for instance:
|
||||
|
||||
- `/status` (decouples status updates from other updates)
|
||||
|
||||
- `/scale` (exposes a consistent interface for autoscalers)
|
||||
|
||||
- `/proxy` (allows access to HTTP resources)
|
||||
|
||||
- `/portforward` (used by `kubectl port-forward`)
|
||||
|
||||
- `/logs` (access pod logs)
|
||||
|
||||
- These are added at the end of the URI
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Accessing a subresource
|
||||
|
||||
.exercise[
|
||||
|
||||
- List `kube-proxy` pods:
|
||||
```bash
|
||||
kubectl get pods --namespace=kube-system --selector=k8s-app=kube-proxy
|
||||
PODNAME=$(
|
||||
kubectl get pods --namespace=kube-system --selector=k8s-app=kube-proxy \
|
||||
-o json | jq -r .items[0].metadata.name)
|
||||
```
|
||||
|
||||
- Execute a command in a pod, showing the API requests:
|
||||
```bash
|
||||
kubectl -v6 exec --namespace=kube-system $PODNAME -- echo hello world
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
The full request looks like:
|
||||
```
|
||||
POST https://.../api/v1/namespaces/kube-system/pods/kube-proxy-c7rlw/exec?
|
||||
command=echo&command=hello&command=world&container=kube-proxy&stderr=true&stdout=true
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Listing what's supported on the server
|
||||
|
||||
- There are at least three useful commands to introspect the API server
|
||||
|
||||
.exercise[
|
||||
|
||||
- List resources types, their group, kind, short names, and scope:
|
||||
```bash
|
||||
kubectl api-resources
|
||||
```
|
||||
|
||||
- List API groups + versions:
|
||||
```bash
|
||||
kubectl api-versions
|
||||
```
|
||||
|
||||
- List APIServices:
|
||||
```bash
|
||||
kubectl get apiservices
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
🤔 What's the difference between the last two?
|
||||
|
||||
---
|
||||
|
||||
## API registration
|
||||
|
||||
- `kubectl api-versions` shows all API groups, including `apiregistration.k8s.io`
|
||||
|
||||
- `kubectl get apiservices` shows the "routing table" for API requests
|
||||
|
||||
- The latter doesn't show `apiregistration.k8s.io`
|
||||
|
||||
(APIServices belong to `apiregistration.k8s.io`)
|
||||
|
||||
- Most API groups are `Local` (handled internally by the API server)
|
||||
|
||||
- If we're running the `metrics-server`, it should handle `metrics.k8s.io`
|
||||
|
||||
- This is an API group handled *outside* of the API server
|
||||
|
||||
- This is the *aggregation layer!*
|
||||
|
||||
---
|
||||
|
||||
## Finding resources
|
||||
|
||||
The following assumes that `metrics-server` is deployed on your cluster.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that the metrics.k8s.io is registered with `metrics-server`:
|
||||
```bash
|
||||
kubectl get apiservices | grep metrics.k8s.io
|
||||
```
|
||||
|
||||
- Check the resource kinds registered in the metrics.k8s.io group:
|
||||
```bash
|
||||
kubectl api-resources --api-group=metrics.k8s.io
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(If the output of either command is empty, install `metrics-server` first.)
|
||||
|
||||
---
|
||||
|
||||
## `nodes` vs `nodes`
|
||||
|
||||
- We can have multiple resources with the same name
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look for resources named `node`:
|
||||
```bash
|
||||
kubectl api-resources | grep -w nodes
|
||||
```
|
||||
|
||||
- Compare the output of both commands:
|
||||
```bash
|
||||
kubectl get nodes
|
||||
kubectl get nodes.metrics.k8s.io
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
🤔 What are the second kind of nodes? How can we see what's really in them?
|
||||
|
||||
---
|
||||
|
||||
## Node vs NodeMetrics
|
||||
|
||||
- `nodes.metrics.k8s.io` (aka NodeMetrics) don't have fancy *printer columns*
|
||||
|
||||
- But we can look at the raw data (with `-o json` or `-o yaml`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at NodeMetrics objects with one of these commands:
|
||||
```bash
|
||||
kubectl get -o yaml nodes.metrics.k8s.io
|
||||
kubectl get -o yaml NodeMetrics
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
💡 Alright, these are the live metrics (CPU, RAM) for our nodes.
|
||||
|
||||
---
|
||||
|
||||
## An easier way to consume metrics
|
||||
|
||||
- We might have seen these metrics before ... With an easier command!
|
||||
|
||||
--
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display node metrics:
|
||||
```bash
|
||||
kubectl top nodes
|
||||
```
|
||||
|
||||
- Check which API requests happen behind the scenes:
|
||||
```bash
|
||||
kubectl top nodes -v6
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Aggregation layer in practice
|
||||
|
||||
- We can write an API server to handle a subset of the Kubernetes API
|
||||
|
||||
- Then we can register that server by creating an APIService resource
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the definition used for the `metrics-server`:
|
||||
```bash
|
||||
kubectl describe apiservices v1beta1.metrics.k8s.io
|
||||
```
|
||||
]
|
||||
|
||||
- Group priority is used when multiple API groups provide similar kinds
|
||||
|
||||
(e.g. `nodes` and `nodes.metrics.k8s.io` as seen earlier)
|
||||
|
||||
---
|
||||
|
||||
## Authentication flow
|
||||
|
||||
- We have two Kubernetes API servers:
|
||||
|
||||
- "aggregator" (the main one; clients connect to it)
|
||||
|
||||
- "aggregated" (the one providing the extra API; aggregator connects to it)
|
||||
|
||||
- Aggregator deals with client authentication
|
||||
|
||||
- Aggregator authenticates with aggregated using mutual TLS
|
||||
|
||||
- Aggregator passes (/forwards/proxies/...) requests to aggregated
|
||||
|
||||
- Aggregated performs authorization by calling back aggregator
|
||||
|
||||
("can subject X perform action Y on resource Z?")
|
||||
|
||||
[This doc page](https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#authentication-flow) has very nice swim lanes showing that flow.
|
||||
|
||||
---
|
||||
|
||||
## Discussion
|
||||
|
||||
- Aggregation layer is great for metrics
|
||||
|
||||
(fast-changing, ephemeral data, that would be outrageously bad for etcd)
|
||||
|
||||
- It *could* be a good fit to expose other REST APIs as a pass-thru
|
||||
|
||||
(but it's more common to see CRDs instead)
|
||||
|
||||
???
|
||||
|
||||
:EN:- The aggregation layer
|
||||
:FR:- Étendre l'API avec le *aggregation layer*
|
||||
179
slides/k8s/apiserver-deepdive.md
Normal file
@@ -0,0 +1,179 @@
|
||||
# API server internals
|
||||
|
||||
- Understanding the internals of the API server is useful.red[¹]:
|
||||
|
||||
- when extending the Kubernetes API server (CRDs, webhooks...)
|
||||
|
||||
- when running Kubernetes at scale
|
||||
|
||||
- Let's dive into a bit of code!
|
||||
|
||||
.footnote[.red[¹]And by *useful*, we mean *strongly recommended or else...*]
|
||||
|
||||
---
|
||||
|
||||
## The main handler
|
||||
|
||||
- The API server parses its configuration, and builds a `GenericAPIServer`
|
||||
|
||||
- ... which contains an `APIServerHandler` ([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/server/handler.go#L37
|
||||
))
|
||||
|
||||
- ... which contains a couple of `http.Handler` fields
|
||||
|
||||
- Requests go through:
|
||||
|
||||
- `FullhandlerChain` (a series of HTTP filters, see next slide)
|
||||
|
||||
- `Director` (switches the request to `GoRestfulContainer` or `NonGoRestfulMux`)
|
||||
|
||||
- `GoRestfulContainer` is for "normal" APIs; integrates nicely with OpenAPI
|
||||
|
||||
- `NonGoRestfulMux` is for everything else (e.g. proxy, delegation)
|
||||
|
||||
---
|
||||
|
||||
## The chain of handlers
|
||||
|
||||
- API requests go through a complex chain of filters ([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/server/config.go#L671))
|
||||
|
||||
(note when reading that code: requests start at the bottom and go up)
|
||||
|
||||
- This is where authentication, authorization, and admission happen
|
||||
|
||||
(as well as a few other things!)
|
||||
|
||||
- Let's review an arbitrary selection of some of these handlers!
|
||||
|
||||
*In the following slides, the handlers are in chronological order.*
|
||||
|
||||
*Note: handlers are nested; so they can act at the beginning and end of a request.*
|
||||
|
||||
---
|
||||
|
||||
## `WithPanicRecovery`
|
||||
|
||||
- Reminder about Go: there is no exception handling in Go; instead:
|
||||
|
||||
- functions typically return a composite `(SomeType, error)` type
|
||||
|
||||
- when things go really bad, the code can call `panic()`
|
||||
|
||||
- `panic()` can be caught with `recover()`
|
||||
<br/>
|
||||
(but this is almost never used like an exception handler!)
|
||||
|
||||
- The API server code is not supposed to `panic()`
|
||||
|
||||
- But just in case, we have that handler to prevent (some) crashes
|
||||
|
||||
---
|
||||
|
||||
## `WithRequestInfo` ([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/request/requestinfo.go#L163))
|
||||
|
||||
|
||||
- Parse out essential information:
|
||||
|
||||
API group, version, namespace, resource, subresource, verb ...
|
||||
|
||||
- WithRequestInfo: parse out API group+version, Namespace, resource, subresource ...
|
||||
|
||||
- Maps HTTP verbs (GET, PUT, ...) to Kubernetes verbs (list, get, watch, ...)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## HTTP verb mapping
|
||||
|
||||
- POST → create
|
||||
|
||||
- PUT → update
|
||||
|
||||
- PATCH → patch
|
||||
|
||||
- DELETE
|
||||
<br/> → delete (if a resource name is specified)
|
||||
<br/> → deletecollection (otherwise)
|
||||
|
||||
- GET, HEAD
|
||||
<br/> → get (if a resource name is specified)
|
||||
<br/> → list (otherwise)
|
||||
<br/> → watch (if the `?watch=true` option is specified)
|
||||
|
||||
---
|
||||
|
||||
## `WithWaitGroup`,
|
||||
|
||||
- When we shutdown, tells clients (with in-flight requests) to retry
|
||||
|
||||
- only for "short" requests
|
||||
|
||||
- for long running requests, the client needs to do more
|
||||
|
||||
- Long running requests include `watch` verb, `proxy` sub-resource
|
||||
|
||||
(See also `WithTimeoutForNonLongRunningRequests`)
|
||||
|
||||
---
|
||||
|
||||
## AuthN and AuthZ
|
||||
|
||||
- `WithAuthentication`:
|
||||
the request goes through a *chain* of authenticators
|
||||
([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/filters/authentication.go#L38))
|
||||
|
||||
- WithAudit
|
||||
|
||||
- WithImpersonation: used for e.g. `kubectl ... --as another.user`
|
||||
|
||||
- WithPriorityAndFairness or WithMaxInFlightLimit
|
||||
|
||||
(`system:masters` can bypass these)
|
||||
|
||||
- WithAuthorization
|
||||
|
||||
---
|
||||
|
||||
## After all these handlers ...
|
||||
|
||||
- We get to the "director" mentioned above
|
||||
|
||||
- Api Groups get installed into the "gorestfulhandler"
|
||||
([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/server/genericapiserver.go#L423))
|
||||
|
||||
- REST-ish resources are managed by various handlers
|
||||
(in [this directory](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/))
|
||||
|
||||
- These files show us the code path for each type of request
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Request code path
|
||||
|
||||
- [create.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/create.go):
|
||||
decode to HubGroupVersion; admission; mutating admission; store
|
||||
|
||||
- [delete.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/delete.go):
|
||||
validating admission only; deletion
|
||||
|
||||
- [get.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/get.go) (get, list):
|
||||
directly fetch from rest storage abstraction
|
||||
|
||||
- [patch.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/patch.go):
|
||||
admission; mutating admission; patch
|
||||
|
||||
- [update.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/update.go):
|
||||
decode to HubGroupVersion; admission; mutating admission; store
|
||||
|
||||
- [watch.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/watch.go):
|
||||
similar to get.go, but with watch logic
|
||||
|
||||
(HubGroupVersion = in-memory, "canonical" version.)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Kubernetes API server internals
|
||||
:FR:- Fonctionnement interne du serveur API
|
||||
@@ -273,6 +273,26 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Group-Version-Kind, or GVK
|
||||
|
||||
- A particular type will be identified by the combination of:
|
||||
|
||||
- the API group it belongs to (core, `apps`, `metrics.k8s.io`, ...)
|
||||
|
||||
- the version of this API group (`v1`, `v1beta1`, ...)
|
||||
|
||||
- the "Kind" itself (Pod, Role, Job, ...)
|
||||
|
||||
- "GVK" appears a lot in the API machinery code
|
||||
|
||||
- Conversions are possible between different versions and even between API groups
|
||||
|
||||
(e.g. when Deployments moved from `extensions` to `apps`)
|
||||
|
||||
---
|
||||
|
||||
## Update
|
||||
|
||||
- Let's update our namespace object
|
||||
@@ -334,6 +354,34 @@ We demonstrated *update* and *watch* semantics.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Watch events
|
||||
|
||||
- `kubectl get --watch` shows changes
|
||||
|
||||
- If we add `--output-watch-events`, we can also see:
|
||||
|
||||
- the difference between ADDED and MODIFIED resources
|
||||
|
||||
- DELETED resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- In one terminal, watch pods, displaying full events:
|
||||
```bash
|
||||
kubectl get pods --watch --output-watch-events
|
||||
```
|
||||
|
||||
- In another, run a short-lived pod:
|
||||
```bash
|
||||
kubectl run pause --image=alpine --rm -ti --restart=Never -- sleep 5
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
# Other control plane components
|
||||
|
||||
- API server ✔️
|
||||
|
||||
@@ -733,17 +733,19 @@ class: extra-details
|
||||
|
||||
## Figuring out who can do what
|
||||
|
||||
- For auditing purposes, sometimes we want to know who can perform an action
|
||||
- For auditing purposes, sometimes we want to know who can perform which actions
|
||||
|
||||
- There are a few tools to help us with that
|
||||
- There are a few tools to help us with that, available as `kubectl` plugins:
|
||||
|
||||
- [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security
|
||||
- `kubectl who-can` / [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security
|
||||
|
||||
- [Review Access (aka Rakkess)](https://github.com/corneliusweig/rakkess)
|
||||
- `kubectl access-matrix` / [Rakkess (Review Access)](https://github.com/corneliusweig/rakkess) by Cornelius Weig
|
||||
|
||||
- Both are available as standalone programs, or as plugins for `kubectl`
|
||||
- `kubectl rbac-lookup` / [RBAC Lookup](https://github.com/FairwindsOps/rbac-lookup) by FairwindsOps
|
||||
|
||||
(`kubectl` plugins can be installed and managed with `krew`)
|
||||
- `kubectl` plugins can be installed and managed with `krew`
|
||||
|
||||
- They can also be installed and executed as standalone programs
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Authoring YAML
|
||||
|
||||
- There are various ways to generate YAML with Kubernetes, e.g.:
|
||||
- We have already generated YAML implicitly, with e.g.:
|
||||
|
||||
- `kubectl run`
|
||||
|
||||
@@ -32,26 +32,63 @@
|
||||
|
||||
---
|
||||
|
||||
## We don't have to start from scratch
|
||||
## Various ways to write YAML
|
||||
|
||||
- Create a resource (e.g. Deployment)
|
||||
- Completely from scratch with our favorite editor
|
||||
|
||||
- Dump its YAML with `kubectl get -o yaml ...`
|
||||
(yeah, right)
|
||||
|
||||
- Edit the YAML
|
||||
- Dump an existing resource with `kubectl get -o yaml ...`
|
||||
|
||||
- Use `kubectl apply -f ...` with the YAML file to:
|
||||
(it is recommended to clean up the result)
|
||||
|
||||
- update the resource (if it's the same kind)
|
||||
- Ask `kubectl` to generate the YAML
|
||||
|
||||
- create a new resource (if it's a different kind)
|
||||
(with a `kubectl create --dry-run -o yaml`)
|
||||
|
||||
- Or: Use The Docs, Luke
|
||||
- Use The Docs, Luke
|
||||
|
||||
(the documentation almost always has YAML examples)
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML from scratch
|
||||
|
||||
- Start with a namespace:
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hello
|
||||
```
|
||||
|
||||
- We can use `kubectl explain` to see resource definitions:
|
||||
```bash
|
||||
kubectl explain -r pod.spec
|
||||
```
|
||||
|
||||
- Not the easiest option!
|
||||
|
||||
---
|
||||
|
||||
## Dump the YAML for an existing resource
|
||||
|
||||
- `kubectl get -o yaml` works!
|
||||
|
||||
- A lot of fields in `metadata` are not necessary
|
||||
|
||||
(`managedFields`, `resourceVersion`, `uid`, `creationTimestamp` ...)
|
||||
|
||||
- Most objects will have a `status` field that is not necessary
|
||||
|
||||
- Default or empty values can also be removed for clarity
|
||||
|
||||
- This can be done manually or with the `kubectl-neat` plugin
|
||||
|
||||
`kubectl get -o yaml ... | kubectl neat`
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML without creating resources
|
||||
|
||||
- We can use the `--dry-run` option
|
||||
@@ -63,14 +100,18 @@
|
||||
kubectl create deployment web --image nginx --dry-run
|
||||
```
|
||||
|
||||
- Optionally clean it up with `kubectl neat`, too
|
||||
|
||||
]
|
||||
|
||||
- We can clean up that YAML even more if we want
|
||||
Note: in recent versions of Kubernetes, we should use `--dry-run=client`
|
||||
|
||||
(for instance, we can remove the `creationTimestamp` and empty dicts)
|
||||
(Or `--dry-run=server`; more on that later!)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using `--dry-run` with `kubectl apply`
|
||||
|
||||
- The `--dry-run` option can also be used with `kubectl apply`
|
||||
@@ -87,6 +128,8 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The limits of `kubectl apply --dry-run`
|
||||
|
||||
.exercise[
|
||||
@@ -112,6 +155,8 @@ The resulting YAML doesn't represent a valid DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Server-side dry run
|
||||
|
||||
- Since Kubernetes 1.13, we can use [server-side dry run and diffs](https://kubernetes.io/blog/2019/01/14/apiserver-dry-run-and-kubectl-diff/)
|
||||
@@ -124,7 +169,7 @@ The resulting YAML doesn't represent a valid DaemonSet.
|
||||
|
||||
- Try the same YAML file as earlier, with server-side dry run:
|
||||
```bash
|
||||
kubectl apply -f web.yaml --server-dry-run --validate=false -o yaml
|
||||
kubectl apply -f web.yaml --dry-run=server --validate=false -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -135,6 +180,8 @@ Instead, it has the fields expected in a DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Advantages of server-side dry run
|
||||
|
||||
- The YAML is verified much more extensively
|
||||
@@ -149,6 +196,8 @@ Instead, it has the fields expected in a DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl diff`
|
||||
|
||||
- Kubernetes 1.13 also introduced `kubectl diff`
|
||||
@@ -209,3 +258,8 @@ Note: we don't need to specify `--validate=false` here.
|
||||
- check that it still works!
|
||||
|
||||
- That YAML will be useful later when using e.g. Kustomize or Helm
|
||||
|
||||
???
|
||||
|
||||
:EN:- Techniques to write YAML manifests
|
||||
:FR:- Comment écrire des *manifests* YAML
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
- Jobs are great for "long" background work
|
||||
|
||||
("long" being at least minutes our hours)
|
||||
("long" being at least minutes or hours)
|
||||
|
||||
- CronJobs are great to schedule Jobs at regular intervals
|
||||
|
||||
@@ -148,6 +148,28 @@ class: extra-details
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Setting a time limit
|
||||
|
||||
- It is possible to set a time limit (or deadline) for a job
|
||||
|
||||
- This is done with the field `spec.activeDeadlineSeconds`
|
||||
|
||||
(by default, it is unlimited)
|
||||
|
||||
- When the job is older than this time limit, all its pods are terminated
|
||||
|
||||
- Note that there can also be a `spec.activeDeadlineSeconds` field in pods!
|
||||
|
||||
- They can be set independently, and have different effects:
|
||||
|
||||
- the deadline of the job will stop the entire job
|
||||
|
||||
- the deadline of the pod will only stop an individual pod
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What about `kubectl run` before v1.18?
|
||||
|
||||
- Creating a Deployment:
|
||||
|
||||
233
slides/k8s/cert-manager.md
Normal file
@@ -0,0 +1,233 @@
|
||||
# cert-manager
|
||||
|
||||
- cert-manager¹ facilitates certificate signing through the Kubernetes API:
|
||||
|
||||
- we create a Certificate object (that's a CRD)
|
||||
|
||||
- cert-manager creates a private key
|
||||
|
||||
- it signs that key ...
|
||||
|
||||
- ... or interacts with a certificate authority to obtain the signature
|
||||
|
||||
- it stores the resulting key+cert in a Secret resource
|
||||
|
||||
- These Secret resources can be used in many places (Ingress, mTLS, ...)
|
||||
|
||||
.footnote[.red[¹]Always lower case, words separated with a dash; see the [style guide](https://cert-manager.io/docs/faq/style/_.)]
|
||||
|
||||
---
|
||||
|
||||
## Getting signatures
|
||||
|
||||
- cert-manager can use multiple *Issuers* (another CRD), including:
|
||||
|
||||
- self-signed
|
||||
|
||||
- cert-manager acting as a CA
|
||||
|
||||
- the [ACME protocol](https://en.wikipedia.org/wiki/Automated_Certificate_Management_Environment]) (notably used by Let's Encrypt)
|
||||
|
||||
- [HashiCorp Vault](https://www.vaultproject.io/)
|
||||
|
||||
- Multiple issuers can be configured simultaneously
|
||||
|
||||
- Issuers can be available in a single namespace, or in the whole cluster
|
||||
|
||||
(then we use the *ClusterIssuer* CRD)
|
||||
|
||||
---
|
||||
|
||||
## cert-manager in action
|
||||
|
||||
- We will install cert-manager
|
||||
|
||||
- We will create a ClusterIssuer to obtain certificates with Let's Encrypt
|
||||
|
||||
(this will involve setting up an Ingress Controller)
|
||||
|
||||
- We will create a Certificate request
|
||||
|
||||
- cert-manager will honor that request and create a TLS Secret
|
||||
|
||||
---
|
||||
|
||||
## Installing cert-manager
|
||||
|
||||
- It can be installed with a YAML manifest, or with Helm
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's install the cert-manager Helm chart with this one-liner:
|
||||
```bash
|
||||
helm install cert-manager cert-manager \
|
||||
--repo https://charts.jetstack.io \
|
||||
--create-namespace --namespace cert-manager \
|
||||
--set installCRDs=true
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- If you prefer to install with a single YAML file, that's fine too!
|
||||
|
||||
(see [the documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-regular-manifests) for instructions)
|
||||
|
||||
---
|
||||
|
||||
## ClusterIssuer manifest
|
||||
|
||||
```yaml
|
||||
@@INCLUDE[k8s/cm-clusterissuer.yaml]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Creating the ClusterIssuer
|
||||
|
||||
- Download the file @@LINK[k8s/cm-clusterissuer.yaml]
|
||||
|
||||
(or copy-paste from the previous slide)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the ClusterIssuer:
|
||||
```bash
|
||||
kubectl apply cm-clusterissuer.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Certificate manifest
|
||||
|
||||
```yaml
|
||||
@@INCLUDE[k8s/cm-certificate.yaml]
|
||||
```
|
||||
|
||||
- The `name`, `secretName`, and `dnsNames` don't have to match
|
||||
|
||||
- There can be multiple `dnsNames`
|
||||
|
||||
- The `issuerRef` must match the ClusterIssuer that we created earlier
|
||||
|
||||
---
|
||||
|
||||
## Creating the Certificate
|
||||
|
||||
- Download the file @@LINK[k8s/cm-certificate.yaml]
|
||||
|
||||
(or copy-paste from the previous slide)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the Certificate to update the domain name
|
||||
|
||||
(make sure to replace A.B.C.D with the IP address of one of your nodes!)
|
||||
|
||||
- Create the Certificate:
|
||||
```bash
|
||||
kubectl apply -f cm-certificate.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's happening?
|
||||
|
||||
- cert-manager will create:
|
||||
|
||||
- the secret key
|
||||
|
||||
- a Pod, a Service, and an Ingress to complete the HTTP challenge
|
||||
|
||||
- then it waits for the challenge to complete
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the resources created by cert-manager:
|
||||
```bash
|
||||
kubectl get pods,services,ingresses \
|
||||
--selector=acme.cert-manager.io/http01-solver=true
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## HTTP challenge
|
||||
|
||||
- The CA (in this case, Let's Encrypt) will fetch a particular URL:
|
||||
|
||||
`http://<our-domain>/.well-known/acme-challenge/<token>`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the *path* of the Ingress in particular:
|
||||
```bash
|
||||
kubectl describe ingress
|
||||
--selector=acme.cert-manager.io/http01-solver=true
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## And then...
|
||||
|
||||
- A little bit later, we will have a `kubernetes.io/tls` Secret:
|
||||
```bash
|
||||
kubectl get secrets
|
||||
```
|
||||
|
||||
- Note that this might take a few minutes, because of the DNS integration!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using the secret
|
||||
|
||||
- For bonus points, try to use the secret in an Ingress!
|
||||
|
||||
- This is what the manifest would look like:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: xyz
|
||||
spec:
|
||||
tls:
|
||||
- secretName: xyz.A.B.C.D.nip.io
|
||||
hosts:
|
||||
- xyz.A.B.C.D.nip.io
|
||||
rules:
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Automatic TLS Ingress with annotations
|
||||
|
||||
- It is also possible to annotate Ingress resources for cert-manager
|
||||
|
||||
- If we annotate an Ingress resource with `cert-manager.io/cluster-issuer=xxx`:
|
||||
|
||||
- cert-manager will detect that annotation
|
||||
|
||||
- it will obtain a certificate using the specified ClusterIssuer (`xxx`)
|
||||
|
||||
- it will store the key and certificate in the specified Secret
|
||||
|
||||
- Note: the Ingress still needs the `tls` section with `secretName` and `hosts`
|
||||
|
||||
???
|
||||
|
||||
:EN:- Obtaining certificates with cert-manager
|
||||
:FR:- Obtenir des certificats avec cert-manager
|
||||
|
||||
:T: Obtaining TLS certificates with cert-manager
|
||||
@@ -338,9 +338,9 @@ docker run --rm --net host -v $PWD:/vol \
|
||||
|
||||
(e.g. [Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/create-snapshots/) can [create snapshots through annotations](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/create-snapshots/snaps-annotations/#taking-periodic-snapshots-on-a-running-pod))
|
||||
|
||||
- Option 3: [snapshots through Kubernetes API](https://kubernetes.io/blog/2018/10/09/introducing-volume-snapshot-alpha-for-kubernetes/)
|
||||
- Option 3: [snapshots through Kubernetes API](https://kubernetes.io/docs/concepts/storage/volume-snapshots/)
|
||||
|
||||
(now in alpha for a few storage providers: GCE, OpenSDS, Ceph, Portworx)
|
||||
(Generally available since Kuberentes 1.20 for a number of [CSI](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/) volume plugins : GCE, OpenSDS, Ceph, Portworx, etc)
|
||||
|
||||
---
|
||||
|
||||
|
||||
193
slides/k8s/cni-internals.md
Normal file
@@ -0,0 +1,193 @@
|
||||
# CNI internals
|
||||
|
||||
- Kubelet looks for a CNI configuration file
|
||||
|
||||
(by default, in `/etc/cni/net.d`)
|
||||
|
||||
- Note: if we have multiple files, the first one will be used
|
||||
|
||||
(in lexicographic order)
|
||||
|
||||
- If no configuration can be found, kubelet holds off on creating containers
|
||||
|
||||
(except if they are using `hostNetwork`)
|
||||
|
||||
- Let's see how exactly plugins are invoked!
|
||||
|
||||
---
|
||||
|
||||
## General principle
|
||||
|
||||
- A plugin is an executable program
|
||||
|
||||
- It is invoked with by kubelet to set up / tear down networking for a container
|
||||
|
||||
- It doesn't take any command-line argument
|
||||
|
||||
- However, it uses environment variables to know what to do, which container, etc.
|
||||
|
||||
- It reads JSON on stdin, and writes back JSON on stdout
|
||||
|
||||
- There will generally be multiple plugins invoked in a row
|
||||
|
||||
(at least IPAM + network setup; possibly more)
|
||||
|
||||
---
|
||||
|
||||
## Environment variables
|
||||
|
||||
- `CNI_COMMAND`: `ADD`, `DEL`, `CHECK`, or `VERSION`
|
||||
|
||||
- `CNI_CONTAINERID`: opaque identifier
|
||||
|
||||
(container ID of the "sandbox", i.e. the container running the `pause` image)
|
||||
|
||||
- `CNI_NETNS`: path to network namespace pseudo-file
|
||||
|
||||
(e.g. `/var/run/netns/cni-0376f625-29b5-7a21-6c45-6a973b3224e5`)
|
||||
|
||||
- `CNI_IFNAME`: interface name, usually `eth0`
|
||||
|
||||
- `CNI_PATH`: path(s) with plugin executables (e.g. `/opt/cni/bin`)
|
||||
|
||||
- `CNI_ARGS`: "extra arguments" (see next slide)
|
||||
|
||||
---
|
||||
|
||||
## `CNI_ARGS`
|
||||
|
||||
- Extra key/value pair arguments passed by "the user"
|
||||
|
||||
- "The user", here, is "kubelet" (or in an abstract way, "Kubernetes")
|
||||
|
||||
- This is used to pass the pod name and namespace to the CNI plugin
|
||||
|
||||
- Example:
|
||||
```
|
||||
IgnoreUnknown=1
|
||||
K8S_POD_NAMESPACE=default
|
||||
K8S_POD_NAME=web-96d5df5c8-jcn72
|
||||
K8S_POD_INFRA_CONTAINER_ID=016493dbff152641d334d9828dab6136c1ff...
|
||||
```
|
||||
|
||||
Note that technically, it's a `;`-separated list, so it really looks like this:
|
||||
```
|
||||
CNI_ARGS=IgnoreUnknown=1;K8S_POD_NAMESPACE=default;K8S_POD_NAME=web-96d...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## JSON in, JSON out
|
||||
|
||||
- The plugin reads its configuration on stdin
|
||||
|
||||
- It writes back results in JSON
|
||||
|
||||
(e.g. allocated address, routes, DNS...)
|
||||
|
||||
⚠️ "Plugin configuration" is not always the same as "CNI configuration"!
|
||||
|
||||
---
|
||||
|
||||
## Conf vs Conflist
|
||||
|
||||
- The CNI configuration can be a single plugin configuration
|
||||
|
||||
- it will then contain a `type` field in the top-most structure
|
||||
|
||||
- it will be passed "as-is"
|
||||
|
||||
- It can also be a "conflist", containing a chain of plugins
|
||||
|
||||
(it will then contain a `plugins` field in the top-most structure)
|
||||
|
||||
- Plugins are then invoked in order (reverse order for `DEL` action)
|
||||
|
||||
- In that case, the input of the plugin is not the whole configuration
|
||||
|
||||
(see details on next slide)
|
||||
|
||||
---
|
||||
|
||||
## List of plugins
|
||||
|
||||
- When invoking a plugin in a list, the JSON input will be:
|
||||
|
||||
- the configuration of the plugin
|
||||
|
||||
- augmented with `name` (matching the conf list `name`)
|
||||
|
||||
- augmented with `prevResult` (which will be the output of the previous plugin)
|
||||
|
||||
- Conceptually, a plugin (generally the first one) will do the "main setup"
|
||||
|
||||
- Other plugins can do tuning / refinement (firewalling, traffic shaping...)
|
||||
|
||||
---
|
||||
|
||||
## Analyzing plugins
|
||||
|
||||
- Let's see what goes in and out of our CNI plugins!
|
||||
|
||||
- We will create a fake plugin that:
|
||||
|
||||
- saves its environment and input
|
||||
|
||||
- executes the real plugin with the saved input
|
||||
|
||||
- saves the plugin output
|
||||
|
||||
- passes the saved output
|
||||
|
||||
---
|
||||
|
||||
## Our fake plugin
|
||||
|
||||
```bash
|
||||
#!/bin/sh
|
||||
PLUGIN=$(basename $0)
|
||||
cat > /tmp/cni.$$.$PLUGIN.in
|
||||
env | sort > /tmp/cni.$$.$PLUGIN.env
|
||||
echo "PPID=$PPID, $(readlink /proc/$PPID/exe)" > /tmp/cni.$$.$PLUGIN.parent
|
||||
$0.real < /tmp/cni.$$.$PLUGIN.in > /tmp/cni.$$.$PLUGIN.out
|
||||
EXITSTATUS=$?
|
||||
cat /tmp/cni.$$.$PLUGIN.out
|
||||
exit $EXITSTATUS
|
||||
```
|
||||
|
||||
Save this script as `/opt/cni/bin/debug` and make it executable.
|
||||
|
||||
---
|
||||
|
||||
## Substituting the fake plugin
|
||||
|
||||
- For each plugin that we want to instrument:
|
||||
|
||||
- rename the plugin from e.g. `ptp` to `ptp.real`
|
||||
|
||||
- symlink `ptp` to our `debug` plugin
|
||||
|
||||
- There is no need to change the CNI configuration or restart kubelet
|
||||
|
||||
---
|
||||
|
||||
## Create some pods and looks at the results
|
||||
|
||||
- Create a pod
|
||||
|
||||
- For each instrumented plugin, there will be files in `/tmp`:
|
||||
|
||||
`cni.PID.pluginname.in` (JSON input)
|
||||
|
||||
`cni.PID.pluginname.env` (environment variables)
|
||||
|
||||
`cni.PID.pluginname.parent` (parent process information)
|
||||
|
||||
`cni.PID.pluginname.out` (JSON output)
|
||||
|
||||
❓️ What is calling our plugins?
|
||||
|
||||
???
|
||||
|
||||
:EN:- Deep dive into CNI internals
|
||||
:FR:- La Container Network Interface (CNI) en détails
|
||||
@@ -220,6 +220,41 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## How many nodes should a cluster have?
|
||||
|
||||
@@ -60,21 +60,41 @@
|
||||
|
||||
## Command-line arguments
|
||||
|
||||
- Pass options to `args` array in the container specification
|
||||
- Indicate what should run in the container
|
||||
|
||||
- Example ([source](https://github.com/coreos/pods/blob/master/kubernetes.yaml#L29)):
|
||||
- Pass `command` and/or `args` in the container options in a Pod's template
|
||||
|
||||
- Both `command` and `args` are arrays
|
||||
|
||||
- Example ([source](https://github.com/jpetazzo/container.training/blob/main/k8s/consul-1.yaml#L70)):
|
||||
```yaml
|
||||
args:
|
||||
- "--data-dir=/var/lib/etcd"
|
||||
- "--advertise-client-urls=http://127.0.0.1:2379"
|
||||
- "--listen-client-urls=http://127.0.0.1:2379"
|
||||
- "--listen-peer-urls=http://127.0.0.1:2380"
|
||||
- "--name=etcd"
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NS)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
```
|
||||
|
||||
- The options can be passed directly to the program that we run ...
|
||||
---
|
||||
|
||||
... or to a wrapper script that will use them to e.g. generate a config file
|
||||
## `args` or `command`?
|
||||
|
||||
- Use `command` to override the `ENTRYPOINT` defined in the image
|
||||
|
||||
- Use `args` to keep the `ENTRYPOINT` defined in the image
|
||||
|
||||
(the parameters specified in `args` are added to the `ENTRYPOINT`)
|
||||
|
||||
- In doubt, use `command`
|
||||
|
||||
- It is also possible to use *both* `command` and `args`
|
||||
|
||||
(they will be strung together, just like `ENTRYPOINT` and `CMD`)
|
||||
|
||||
- See the [docs](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes) to see how they interact together
|
||||
|
||||
---
|
||||
|
||||
@@ -514,73 +534,12 @@ spec:
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Passwords, tokens, sensitive information
|
||||
|
||||
- For sensitive information, there is another special resource: *Secrets*
|
||||
|
||||
- Secrets and Configmaps work almost the same way
|
||||
|
||||
(we'll expose the differences on the next slide)
|
||||
|
||||
- The *intent* is different, though:
|
||||
|
||||
*"You should use secrets for things which are actually secret like API keys,
|
||||
credentials, etc., and use config map for not-secret configuration data."*
|
||||
|
||||
*"In the future there will likely be some differentiators for secrets like rotation or support for backing the secret API w/ HSMs, etc."*
|
||||
|
||||
(Source: [the author of both features](https://stackoverflow.com/a/36925553/580281
|
||||
))
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Differences between configmaps and secrets
|
||||
|
||||
- Secrets are base64-encoded when shown with `kubectl get secrets -o yaml`
|
||||
|
||||
- keep in mind that this is just *encoding*, not *encryption*
|
||||
|
||||
- it is very easy to [automatically extract and decode secrets](https://medium.com/@mveritym/decoding-kubernetes-secrets-60deed7a96a3)
|
||||
|
||||
- [Secrets can be encrypted at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/)
|
||||
|
||||
- With RBAC, we can authorize a user to access configmaps, but not secrets
|
||||
|
||||
(since they are two different kinds of resources)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Immutable ConfigMaps and Secrets
|
||||
|
||||
- Since Kubernetes 1.19, it is possible to mark a ConfigMap or Secret as *immutable*
|
||||
|
||||
```bash
|
||||
kubectl patch configmap xyz --patch='{"immutable": true}'
|
||||
```
|
||||
|
||||
- This brings performance improvements when using lots of ConfigMaps and Secrets
|
||||
|
||||
(lots = tens of thousands)
|
||||
|
||||
- Once a ConfigMap or Secret has been marked as immutable:
|
||||
|
||||
- its content cannot be changed anymore
|
||||
- the `immutable` field can't be changed back either
|
||||
- the only way to change it is to delete and re-create it
|
||||
- Pods using it will have to be re-created as well
|
||||
|
||||
???
|
||||
|
||||
:EN:- Managing application configuration
|
||||
:EN:- Exposing configuration with the downward API
|
||||
:EN:- Exposing configuration with Config Maps and Secrets
|
||||
:EN:- Exposing configuration with Config Maps
|
||||
|
||||
:FR:- Gérer la configuration des applications
|
||||
:FR:- Configuration au travers de la *downward API*
|
||||
:FR:- Configuration via les *Config Maps* et *Secrets*
|
||||
:FR:- Configurer les applications avec des *Config Maps*
|
||||
@@ -92,6 +92,29 @@
|
||||
|
||||
---
|
||||
|
||||
## etcd authorization
|
||||
|
||||
- etcd supports RBAC, but Kubernetes doesn't use it by default
|
||||
|
||||
(note: etcd RBAC is completely different from Kubernetes RBAC!)
|
||||
|
||||
- By default, etcd access is "all or nothing"
|
||||
|
||||
(if you have a valid certificate, you get in)
|
||||
|
||||
- Be very careful if you use the same root CA for etcd and other things
|
||||
|
||||
(if etcd trusts the root CA, then anyone with a valid cert gets full etcd access)
|
||||
|
||||
- For more details, check the following resources:
|
||||
|
||||
- [etcd documentation on authentication](https://etcd.io/docs/current/op-guide/authentication/)
|
||||
|
||||
- [PKI The Wrong Way](https://www.youtube.com/watch?v=gcOLDEzsVHI) at KubeCon NA 2020
|
||||
|
||||
---
|
||||
|
||||
|
||||
## API server clients
|
||||
|
||||
- The API server has a sophisticated authentication and authorization system
|
||||
@@ -190,6 +213,24 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## How are these permissions set up?
|
||||
|
||||
- A bunch of roles and bindings are defined as constants in the API server code:
|
||||
|
||||
[auth/authorizer/rbac/bootstrappolicy/policy.go](https://github.com/kubernetes/kubernetes/blob/release-1.19/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go#L188)
|
||||
|
||||
- They are created automatically when the API server starts:
|
||||
|
||||
[registry/rbac/rest/storage_rbac.go](https://github.com/kubernetes/kubernetes/blob/release-1.19/pkg/registry/rbac/rest/storage_rbac.go#L140)
|
||||
|
||||
- We must use the correct Common Names (`CN`) for the control plane certificates
|
||||
|
||||
(since the bindings defined above refer to these common names)
|
||||
|
||||
---
|
||||
|
||||
## Service account tokens
|
||||
|
||||
- Each time we create a service account, the controller manager generates a token
|
||||
|
||||
334
slides/k8s/crd.md
Normal file
@@ -0,0 +1,334 @@
|
||||
# Custom Resource Definitions
|
||||
|
||||
- CRDs are one of the (many) ways to extend the API
|
||||
|
||||
- CRDs can be defined dynamically
|
||||
|
||||
(no need to recompile or reload the API server)
|
||||
|
||||
- A CRD is defined with a CustomResourceDefinition resource
|
||||
|
||||
(CustomResourceDefinition is conceptually similar to a *metaclass*)
|
||||
|
||||
---
|
||||
|
||||
## A very simple CRD
|
||||
|
||||
The file @@LINK[k8s/coffee-1.yaml] describes a very simple CRD representing different kinds of coffee:
|
||||
|
||||
```yaml
|
||||
@@INCLUDE[k8s/coffee-1.yaml]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Creating a CRD
|
||||
|
||||
- Let's create the Custom Resource Definition for our Coffee resource
|
||||
|
||||
.exercise[
|
||||
|
||||
- Load the CRD:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/coffee-1.yaml
|
||||
```
|
||||
|
||||
- Confirm that it shows up:
|
||||
```bash
|
||||
kubectl get crds
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating custom resources
|
||||
|
||||
The YAML below defines a resource using the CRD that we just created:
|
||||
|
||||
```yaml
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: arabica
|
||||
spec:
|
||||
taste: strong
|
||||
```
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a few types of coffee beans:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/coffees.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Viewing custom resources
|
||||
|
||||
- By default, `kubectl get` only shows name and age of custom resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the coffee beans that we just created:
|
||||
```bash
|
||||
kubectl get coffees
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- We'll see in a bit how to improve that
|
||||
|
||||
---
|
||||
|
||||
## What can we do with CRDs?
|
||||
|
||||
There are many possibilities!
|
||||
|
||||
- *Operators* encapsulate complex sets of resources
|
||||
|
||||
(e.g.: a PostgreSQL replicated cluster; an etcd cluster...
|
||||
<br/>
|
||||
see [awesome operators](https://github.com/operator-framework/awesome-operators) and
|
||||
[OperatorHub](https://operatorhub.io/) to find more)
|
||||
|
||||
- Custom use-cases like [gitkube](https://gitkube.sh/)
|
||||
|
||||
- creates a new custom type, `Remote`, exposing a git+ssh server
|
||||
|
||||
- deploy by pushing YAML or Helm charts to that remote
|
||||
|
||||
- Replacing built-in types with CRDs
|
||||
|
||||
(see [this lightning talk by Tim Hockin](https://www.youtube.com/watch?v=ji0FWzFwNhA))
|
||||
|
||||
---
|
||||
|
||||
## What's next?
|
||||
|
||||
- Creating a basic CRD is quick and easy
|
||||
|
||||
- But there is a lot more that we can (and probably should) do:
|
||||
|
||||
- improve input with *data validation*
|
||||
|
||||
- improve output with *custom columns*
|
||||
|
||||
- And of course, we probably need a *controller* to go with our CRD!
|
||||
|
||||
(otherwise, we're just using the Kubernetes API as a fancy data store)
|
||||
|
||||
---
|
||||
|
||||
## Additional printer columns
|
||||
|
||||
- We can specify `additionalPrinterColumns` in the CRD
|
||||
|
||||
- This is similar to `-o custom-columns`
|
||||
|
||||
(map a column name to a path in the object, e.g. `.spec.taste`)
|
||||
|
||||
```yaml
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using additional printer columns
|
||||
|
||||
- Let's update our CRD using @@LINK[k8s/coffee-3.yaml]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update the CRD:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/coffee-3.yaml
|
||||
```
|
||||
|
||||
- Look at our Coffee resources:
|
||||
```bash
|
||||
kubectl get coffees
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: we can update a CRD without having to re-create the corresponding resources.
|
||||
|
||||
(Good news, right?)
|
||||
|
||||
---
|
||||
|
||||
## Data validation
|
||||
|
||||
- By default, CRDs are not *validated*
|
||||
|
||||
(we can put anything we want in the `spec`)
|
||||
|
||||
- When creating a CRD, we can pass an OpenAPI v3 schema
|
||||
|
||||
(which will then be used to validate resources)
|
||||
|
||||
- More advanced validation can also be done with admission webhooks, e.g.:
|
||||
|
||||
- consistency between parameters
|
||||
|
||||
- advanced integer filters (e.g. odd number of replicas)
|
||||
|
||||
- things that can change in one direction but not the other
|
||||
|
||||
---
|
||||
|
||||
## OpenAPI v3 scheme exapmle
|
||||
|
||||
This is what we have in @@LINK[k8s/coffee-3.yaml]:
|
||||
|
||||
```yaml
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
required: [ spec ]
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
required: [ taste ]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Validation *a posteriori*
|
||||
|
||||
- Some of the "coffees" that we defined earlier *do not* pass validation
|
||||
|
||||
- How is that possible?
|
||||
|
||||
--
|
||||
|
||||
- Validation happens at *admission*
|
||||
|
||||
(when resources get written into the database)
|
||||
|
||||
- Therefore, we can have "invalid" resources in etcd
|
||||
|
||||
(they are invalid from the CRD perspective, but the CRD can be changed)
|
||||
|
||||
🤔 How should we handle that ?
|
||||
|
||||
---
|
||||
|
||||
## Versions
|
||||
|
||||
- If the data format changes, we can roll out a new version of the CRD
|
||||
|
||||
(e.g. go from `v1alpha1` to `v1alpha2`)
|
||||
|
||||
- In a CRD we can specify the versions that exist, that are *served*, and *stored*
|
||||
|
||||
- multiple versions can be *served*
|
||||
|
||||
- only one can be *stored*
|
||||
|
||||
- Kubernetes doesn't automatically migrate the content of the database
|
||||
|
||||
- However, it can convert between versions when resources are read/written
|
||||
|
||||
---
|
||||
|
||||
## Conversion
|
||||
|
||||
- When *creating* a new resource, the *stored* version is used
|
||||
|
||||
(if we create it with another version, it gets converted)
|
||||
|
||||
- When *getting* or *watching* resources, the *requested* version is used
|
||||
|
||||
(if it is stored with another version, it gets converted)
|
||||
|
||||
- By default, "conversion" only changes the `apiVersion` field
|
||||
|
||||
- ... But we can register *conversion webhooks*
|
||||
|
||||
(see [that doc page](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion) for details)
|
||||
|
||||
---
|
||||
|
||||
## Migrating database content
|
||||
|
||||
- We need to *serve* a version as long as we *store* objects in that version
|
||||
|
||||
(=as long as the database has at least one object with that version)
|
||||
|
||||
- If we want to "retire" a version, we need to migrate these objects first
|
||||
|
||||
- All we have to do is to read and re-write them
|
||||
|
||||
(the [kube-storage-version-migrator](https://github.com/kubernetes-sigs/kube-storage-version-migrator) tool can help)
|
||||
|
||||
---
|
||||
|
||||
## What's next?
|
||||
|
||||
- Generally, when creating a CRD, we also want to run a *controller*
|
||||
|
||||
(otherwise nothing will happen when we create resources of that type)
|
||||
|
||||
- The controller will typically *watch* our custom resources
|
||||
|
||||
(and take action when they are created/updated)
|
||||
|
||||
---
|
||||
|
||||
## CRDs in the wild
|
||||
|
||||
- [gitkube](https://storage.googleapis.com/gitkube/gitkube-setup-stable.yaml)
|
||||
|
||||
- [A redis operator](https://github.com/amaizfinance/redis-operator/blob/master/deploy/crds/k8s_v1alpha1_redis_crd.yaml)
|
||||
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.yaml)
|
||||
|
||||
*How big are these YAML files?*
|
||||
|
||||
*What's the size (e.g. in lines) of each resource?*
|
||||
|
||||
---
|
||||
|
||||
## CRDs in practice
|
||||
|
||||
- Production-grade CRDs can be extremely verbose
|
||||
|
||||
(because of the openAPI schema validation)
|
||||
|
||||
- This can (and usually will) be managed by a framework
|
||||
|
||||
---
|
||||
|
||||
## (Ab)using the API server
|
||||
|
||||
- If we need to store something "safely" (as in: in etcd), we can use CRDs
|
||||
|
||||
- This gives us primitives to read/write/list objects (and optionally validate them)
|
||||
|
||||
- The Kubernetes API server can run on its own
|
||||
|
||||
(without the scheduler, controller manager, and kubelets)
|
||||
|
||||
- By loading CRDs, we can have it manage totally different objects
|
||||
|
||||
(unrelated to containers, clusters, etc.)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Custom Resource Definitions (CRDs)
|
||||
:FR:- Les CRDs *(Custom Resource Definitions)*
|
||||
@@ -108,6 +108,26 @@ The CA (or anyone else) never needs to know my private key.
|
||||
|
||||
---
|
||||
|
||||
## Warning
|
||||
|
||||
- The CSR API isn't really suited to issue user certificates
|
||||
|
||||
- It is primarily intended to issue control plane certificates
|
||||
|
||||
(for instance, deal with kubelet certificates renewal)
|
||||
|
||||
- The API was expanded a bit in Kubernetes 1.19 to encompass broader usage
|
||||
|
||||
- There are still lots of gaps in the spec
|
||||
|
||||
(e.g. how to specify expiration in a standard way)
|
||||
|
||||
- ... And no other implementation to this date
|
||||
|
||||
(but [cert-manager](https://cert-manager.io/docs/faq/#kubernetes-has-a-builtin-certificatesigningrequest-api-why-not-use-that) might eventually get there!)
|
||||
|
||||
---
|
||||
|
||||
## General idea
|
||||
|
||||
- We will create a Namespace named "users"
|
||||
|
||||
@@ -431,15 +431,23 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Complex selectors
|
||||
## Selectors with multiple labels
|
||||
|
||||
- If a selector specifies multiple labels, they are understood as a logical *AND*
|
||||
|
||||
(In other words: the pods must match all the labels)
|
||||
(in other words: the pods must match all the labels)
|
||||
|
||||
- Kubernetes has support for advanced, set-based selectors
|
||||
- We cannot have a logical *OR*
|
||||
|
||||
(But these cannot be used with services, at least not yet!)
|
||||
(e.g. `app=api AND (release=prod OR release=preprod)`)
|
||||
|
||||
- We can, however, apply as many extra labels as we want to our pods:
|
||||
|
||||
- use selector `app=api AND prod-or-preprod=yes`
|
||||
|
||||
- add `prod-or-preprod=yes` to both sets of pods
|
||||
|
||||
- We will see later that in other places, we can use more advanced selectors
|
||||
|
||||
---
|
||||
|
||||
@@ -689,6 +697,95 @@ class: extra-details
|
||||
|
||||
- This gives us building blocks for canary and blue/green deployments
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Advanced label selectors
|
||||
|
||||
- As indicated earlier, service selectors are limited to a `AND`
|
||||
|
||||
- But in many other places in the Kubernetes API, we can use complex selectors
|
||||
|
||||
(e.g. Deployment, ReplicaSet, DaemonSet, NetworkPolicy ...)
|
||||
|
||||
- These allow extra operations; specifically:
|
||||
|
||||
- checking for presence (or absence) of a label
|
||||
|
||||
- checking if a label is (or is not) in a given set
|
||||
|
||||
- Relevant documentation:
|
||||
|
||||
[Service spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#servicespec-v1-core),
|
||||
[LabelSelector spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#labelselector-v1-meta),
|
||||
[label selector doc](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Example of advanced selector
|
||||
|
||||
```yaml
|
||||
theSelector:
|
||||
matchLabels:
|
||||
app: portal
|
||||
component: api
|
||||
matchExpressions:
|
||||
- key: release
|
||||
operator: In
|
||||
values: [ production, preproduction ]
|
||||
- key: signed-off-by
|
||||
operator: Exists
|
||||
```
|
||||
|
||||
This selector matches pods that meet *all* the indicated conditions.
|
||||
|
||||
`operator` can be `In`, `NotIn`, `Exists`, `DoesNotExist`.
|
||||
|
||||
A `nil` selector matches *nothing*, a `{}` selector matches *everything*.
|
||||
<br/>
|
||||
(Because that means "match all pods that meet at least zero condition".)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Services and Endpoints
|
||||
|
||||
- Each Service has a corresponding Endpoints resource
|
||||
|
||||
(see `kubectl get endpoints` or `kubectl get ep`)
|
||||
|
||||
- That Endpoints resource is used by various controllers
|
||||
|
||||
(e.g. `kube-proxy` when setting up `iptables` rules for ClusterIP services)
|
||||
|
||||
- These Endpoints are populated (and updated) with the Service selector
|
||||
|
||||
- We can update the Endpoints manually, but our changes will get overwritten
|
||||
|
||||
- ... Except if the Service selector is empty!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Empty Service selector
|
||||
|
||||
- If a service selector is empty, Endpoints don't get updated automatically
|
||||
|
||||
(but we can still set them manually)
|
||||
|
||||
- This lets us create Services pointing to arbitrary destinations
|
||||
|
||||
(potentially outside the cluster; or things that are not in pods)
|
||||
|
||||
- Another use-case: the `kubernetes` service in the `default` namespace
|
||||
|
||||
(its Endpoints are maintained automatically by the API server)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Scaling with Daemon Sets
|
||||
|
||||
@@ -2,47 +2,65 @@
|
||||
|
||||
- Kubernetes resources can also be viewed with a web dashboard
|
||||
|
||||
- That dashboard is usually exposed over HTTPS
|
||||
|
||||
(this requires obtaining a proper TLS certificate)
|
||||
|
||||
- Dashboard users need to authenticate
|
||||
|
||||
- We are going to take a *dangerous* shortcut
|
||||
(typically with a token)
|
||||
|
||||
- The dashboard should be exposed over HTTPS
|
||||
|
||||
(to prevent interception of the aforementioned token)
|
||||
|
||||
- Ideally, this requires obtaining a proper TLS certificate
|
||||
|
||||
(for instance, with Let's Encrypt)
|
||||
|
||||
---
|
||||
|
||||
## The insecure method
|
||||
## Three ways to install the dashboard
|
||||
|
||||
- We could (and should) use [Let's Encrypt](https://letsencrypt.org/) ...
|
||||
- Our `k8s` directory has no less than three manifests!
|
||||
|
||||
- ... but we don't want to deal with TLS certificates
|
||||
- `dashboard-recommended.yaml`
|
||||
|
||||
- We could (and should) learn how authentication and authorization work ...
|
||||
(purely internal dashboard; user must be created manually)
|
||||
|
||||
- ... but we will use a guest account with admin access instead
|
||||
- `dashboard-with-token.yaml`
|
||||
|
||||
.footnote[.warning[Yes, this will open our cluster to all kinds of shenanigans. Don't do this at home.]]
|
||||
(dashboard exposed with NodePort; creates an admin user for us)
|
||||
|
||||
- `dashboard-insecure.yaml` aka *YOLO*
|
||||
|
||||
(dashboard exposed over HTTP; gives root access to anonymous users)
|
||||
|
||||
---
|
||||
|
||||
## Running a very insecure dashboard
|
||||
## `dashboard-insecure.yaml`
|
||||
|
||||
- We are going to deploy that dashboard with *one single command*
|
||||
- This will allow anyone to deploy anything on your cluster
|
||||
|
||||
- This command will create all the necessary resources
|
||||
(without any authentication whatsoever)
|
||||
|
||||
(the dashboard itself, the HTTP wrapper, the admin/guest account)
|
||||
- **Do not** use this, except maybe on a local cluster
|
||||
|
||||
- All these resources are defined in a YAML file
|
||||
(or a cluster that you will destroy a few minutes later)
|
||||
|
||||
- All we have to do is load that YAML file with with `kubectl apply -f`
|
||||
- On "normal" clusters, use `dashboard-with-token.yaml` instead!
|
||||
|
||||
---
|
||||
|
||||
## What's in the manifest?
|
||||
|
||||
- The dashboard itself
|
||||
|
||||
- An HTTP/HTTPS unwrapper (using `socat`)
|
||||
|
||||
- The guest/admin account
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create all the dashboard resources, with the following command:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/insecure-dashboard.yaml
|
||||
kubectl apply -f ~/container.training/k8s/dashboard-insecure.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -89,11 +107,26 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
--
|
||||
|
||||
.warning[By the way, we just added a backdoor to our Kubernetes cluster!]
|
||||
.warning[Remember, we just added a backdoor to our Kubernetes cluster!]
|
||||
|
||||
---
|
||||
|
||||
## Running the Kubernetes dashboard securely
|
||||
## Closing the backdoor
|
||||
|
||||
- Seriously, don't leave that thing running!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Remove what we just created:
|
||||
```bash
|
||||
kubectl delete -f ~/container.training/k8s/dashboard-insecure.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## The risks
|
||||
|
||||
- The steps that we just showed you are *for educational purposes only!*
|
||||
|
||||
@@ -105,6 +138,99 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
---
|
||||
|
||||
## `dashboard-with-token.yaml`
|
||||
|
||||
- This is a less risky way to deploy the dashboard
|
||||
|
||||
- It's not completely secure, either:
|
||||
|
||||
- we're using a self-signed certificate
|
||||
|
||||
- this is subject to eavesdropping attacks
|
||||
|
||||
- Using `kubectl port-forward` or `kubectl proxy` is even better
|
||||
|
||||
---
|
||||
|
||||
## What's in the manifest?
|
||||
|
||||
- The dashboard itself (but exposed with a `NodePort`)
|
||||
|
||||
- A ServiceAccount with `cluster-admin` privileges
|
||||
|
||||
(named `kubernetes-dashboard:cluster-admin`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create all the dashboard resources, with the following command:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/dashboard-with-token.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Obtaining the token
|
||||
|
||||
- The manifest creates a ServiceAccount
|
||||
|
||||
- Kubernetes will automatically generate a token for that ServiceAccount
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display the token:
|
||||
```bash
|
||||
kubectl --namespace=kubernetes-dashboard \
|
||||
describe secret cluster-admin-token
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The token should start with `eyJ...` (it's a JSON Web Token).
|
||||
|
||||
Note that the secret name will actually be `cluster-admin-token-xxxxx`.
|
||||
<br/>
|
||||
(But `kubectl` prefix matches are great!)
|
||||
|
||||
---
|
||||
|
||||
## Connecting to the dashboard
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check which port the dashboard is on:
|
||||
```bash
|
||||
kubectl get svc --namespace=kubernetes-dashboard
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
You'll want the `3xxxx` port.
|
||||
|
||||
|
||||
.exercise[
|
||||
|
||||
- Connect to http://oneofournodes:3xxxx/
|
||||
|
||||
<!-- ```open http://node1:3xxxx/``` -->
|
||||
|
||||
]
|
||||
|
||||
The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
---
|
||||
|
||||
## Dashboard authentication
|
||||
|
||||
- Select "token" authentication
|
||||
|
||||
- Copy paste the token (starting with `eyJ...`) obtained earlier
|
||||
|
||||
- We're logged in!
|
||||
|
||||
---
|
||||
|
||||
## Other dashboards
|
||||
|
||||
- [Kube Web View](https://codeberg.org/hjacobs/kube-web-view)
|
||||
@@ -115,7 +241,7 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
- see [vision and goals](https://kube-web-view.readthedocs.io/en/latest/vision.html#vision) for details
|
||||
|
||||
- [Kube Ops View](https://github.com/hjacobs/kube-ops-view)
|
||||
- [Kube Ops View](https://codeberg.org/hjacobs/kube-ops-view)
|
||||
|
||||
- "provides a common operational picture for multiple Kubernetes clusters"
|
||||
|
||||
|
||||
455
slides/k8s/eck.md
Normal file
@@ -0,0 +1,455 @@
|
||||
# An ElasticSearch Operator
|
||||
|
||||
- We will install [Elastic Cloud on Kubernetes](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-quickstart.html), an ElasticSearch operator
|
||||
|
||||
- This operator requires PersistentVolumes
|
||||
|
||||
- We will install Rancher's [local path storage provisioner](https://github.com/rancher/local-path-provisioner) to automatically create these
|
||||
|
||||
- Then, we will create an ElasticSearch resource
|
||||
|
||||
- The operator will detect that resource and provision the cluster
|
||||
|
||||
- We will integrate that ElasticSearch cluster with other resources
|
||||
|
||||
(Kibana, Filebeat, Cerebro ...)
|
||||
|
||||
---
|
||||
|
||||
## Installing a Persistent Volume provisioner
|
||||
|
||||
(This step can be skipped if you already have a dynamic volume provisioner.)
|
||||
|
||||
- This provisioner creates Persistent Volumes backed by `hostPath`
|
||||
|
||||
(local directories on our nodes)
|
||||
|
||||
- It doesn't require anything special ...
|
||||
|
||||
- ... But losing a node = losing the volumes on that node!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the local path storage provisioner:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/local-path-storage.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Making sure we have a default StorageClass
|
||||
|
||||
- The ElasticSearch operator will create StatefulSets
|
||||
|
||||
- These StatefulSets will instantiate PersistentVolumeClaims
|
||||
|
||||
- These PVCs need to be explicitly associated with a StorageClass
|
||||
|
||||
- Or we need to tag a StorageClass to be used as the default one
|
||||
|
||||
.exercise[
|
||||
|
||||
- List StorageClasses:
|
||||
```bash
|
||||
kubectl get storageclasses
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see the `local-path` StorageClass.
|
||||
|
||||
---
|
||||
|
||||
## Setting a default StorageClass
|
||||
|
||||
- This is done by adding an annotation to the StorageClass:
|
||||
|
||||
`storageclass.kubernetes.io/is-default-class: true`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tag the StorageClass so that it's the default one:
|
||||
```bash
|
||||
kubectl annotate storageclass local-path \
|
||||
storageclass.kubernetes.io/is-default-class=true
|
||||
```
|
||||
|
||||
- Check the result:
|
||||
```bash
|
||||
kubectl get storageclasses
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Now, the StorageClass should have `(default)` next to its name.
|
||||
|
||||
---
|
||||
|
||||
## Install the ElasticSearch operator
|
||||
|
||||
- The operator provides:
|
||||
|
||||
- a few CustomResourceDefinitions
|
||||
- a Namespace for its other resources
|
||||
- a ValidatingWebhookConfiguration for type checking
|
||||
- a StatefulSet for its controller and webhook code
|
||||
- a ServiceAccount, ClusterRole, ClusterRoleBinding for permissions
|
||||
|
||||
- All these resources are grouped in a convenient YAML file
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the operator:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/eck-operator.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Check our new custom resources
|
||||
|
||||
- Let's see which CRDs were created
|
||||
|
||||
.exercise[
|
||||
|
||||
- List all CRDs:
|
||||
```bash
|
||||
kubectl get crds
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
This operator supports ElasticSearch, but also Kibana and APM. Cool!
|
||||
|
||||
---
|
||||
|
||||
## Create the `eck-demo` namespace
|
||||
|
||||
- For clarity, we will create everything in a new namespace, `eck-demo`
|
||||
|
||||
- This namespace is hard-coded in the YAML files that we are going to use
|
||||
|
||||
- We need to create that namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the `eck-demo` namespace:
|
||||
```bash
|
||||
kubectl create namespace eck-demo
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns eck-demo
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Can we use a different namespace?
|
||||
|
||||
Yes, but then we need to update all the YAML manifests that we
|
||||
are going to apply in the next slides.
|
||||
|
||||
The `eck-demo` namespace is hard-coded in these YAML manifests.
|
||||
|
||||
Why?
|
||||
|
||||
Because when defining a ClusterRoleBinding that references a
|
||||
ServiceAccount, we have to indicate in which namespace the
|
||||
ServiceAccount is located.
|
||||
|
||||
---
|
||||
|
||||
## Create an ElasticSearch resource
|
||||
|
||||
- We can now create a resource with `kind: ElasticSearch`
|
||||
|
||||
- The YAML for that resource will specify all the desired parameters:
|
||||
|
||||
- how many nodes we want
|
||||
- image to use
|
||||
- add-ons (kibana, cerebro, ...)
|
||||
- whether to use TLS or not
|
||||
- etc.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create our ElasticSearch cluster:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/eck-elasticsearch.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Operator in action
|
||||
|
||||
- Over the next minutes, the operator will create our ES cluster
|
||||
|
||||
- It will report our cluster status through the CRD
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of the operator:
|
||||
```bash
|
||||
stern --namespace=elastic-system operator
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait elastic-operator-0```
|
||||
```tmux split-pane -v```
|
||||
--->
|
||||
|
||||
- Watch the status of the cluster through the CRD:
|
||||
```bash
|
||||
kubectl get es -w
|
||||
```
|
||||
|
||||
<!--
|
||||
```longwait green```
|
||||
```key ^C```
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Connecting to our cluster
|
||||
|
||||
- It's not easy to use the ElasticSearch API from the shell
|
||||
|
||||
- But let's check at least if ElasticSearch is up!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get the ClusterIP of our ES instance:
|
||||
```bash
|
||||
kubectl get services
|
||||
```
|
||||
|
||||
- Issue a request with `curl`:
|
||||
```bash
|
||||
curl http://`CLUSTERIP`:9200
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We get an authentication error. Our cluster is protected!
|
||||
|
||||
---
|
||||
|
||||
## Obtaining the credentials
|
||||
|
||||
- The operator creates a user named `elastic`
|
||||
|
||||
- It generates a random password and stores it in a Secret
|
||||
|
||||
.exercise[
|
||||
|
||||
- Extract the password:
|
||||
```bash
|
||||
kubectl get secret demo-es-elastic-user \
|
||||
-o go-template="{{ .data.elastic | base64decode }} "
|
||||
```
|
||||
|
||||
- Use it to connect to the API:
|
||||
```bash
|
||||
curl -u elastic:`PASSWORD` http://`CLUSTERIP`:9200
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see a JSON payload with the `"You Know, for Search"` tagline.
|
||||
|
||||
---
|
||||
|
||||
## Sending data to the cluster
|
||||
|
||||
- Let's send some data to our brand new ElasticSearch cluster!
|
||||
|
||||
- We'll deploy a filebeat DaemonSet to collect node logs
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy filebeat:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/eck-filebeat.yaml
|
||||
```
|
||||
|
||||
- Wait until some pods are up:
|
||||
```bash
|
||||
watch kubectl get pods -l k8s-app=filebeat
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Running```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
- Check that a filebeat index was created:
|
||||
```bash
|
||||
curl -u elastic:`PASSWORD` http://`CLUSTERIP`:9200/_cat/indices
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying an instance of Kibana
|
||||
|
||||
- Kibana can visualize the logs injected by filebeat
|
||||
|
||||
- The ECK operator can also manage Kibana
|
||||
|
||||
- Let's give it a try!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy a Kibana instance:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/eck-kibana.yaml
|
||||
```
|
||||
|
||||
- Wait for it to be ready:
|
||||
```bash
|
||||
kubectl get kibana -w
|
||||
```
|
||||
|
||||
<!--
|
||||
```longwait green```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Connecting to Kibana
|
||||
|
||||
- Kibana is automatically set up to conect to ElasticSearch
|
||||
|
||||
(this is arranged by the YAML that we're using)
|
||||
|
||||
- However, it will ask for authentication
|
||||
|
||||
- It's using the same user/password as ElasticSearch
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get the NodePort allocated to Kibana:
|
||||
```bash
|
||||
kubectl get services
|
||||
```
|
||||
|
||||
- Connect to it with a web browser
|
||||
|
||||
- Use the same user/password as before
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Setting up Kibana
|
||||
|
||||
After the Kibana UI loads, we need to click around a bit
|
||||
|
||||
.exercise[
|
||||
|
||||
- Pick "explore on my own"
|
||||
|
||||
- Click on Use Elasticsearch data / Connect to your Elasticsearch index"
|
||||
|
||||
- Enter `filebeat-*` for the index pattern and click "Next step"
|
||||
|
||||
- Select `@timestamp` as time filter field name
|
||||
|
||||
- Click on "discover" (the small icon looking like a compass on the left bar)
|
||||
|
||||
- Play around!
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Scaling up the cluster
|
||||
|
||||
- At this point, we have only one node
|
||||
|
||||
- We are going to scale up
|
||||
|
||||
- But first, we'll deploy Cerebro, an UI for ElasticSearch
|
||||
|
||||
- This will let us see the state of the cluster, how indexes are sharded, etc.
|
||||
|
||||
---
|
||||
|
||||
## Deploying Cerebro
|
||||
|
||||
- Cerebro is stateless, so it's fairly easy to deploy
|
||||
|
||||
(one Deployment + one Service)
|
||||
|
||||
- However, it needs the address and credentials for ElasticSearch
|
||||
|
||||
- We prepared yet another manifest for that!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy Cerebro:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/eck-cerebro.yaml
|
||||
```
|
||||
|
||||
- Lookup the NodePort number and connect to it:
|
||||
```bash
|
||||
kubectl get services
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Scaling up the cluster
|
||||
|
||||
- We can see on Cerebro that the cluster is "yellow"
|
||||
|
||||
(because our index is not replicated)
|
||||
|
||||
- Let's change that!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the ElasticSearch cluster manifest:
|
||||
```bash
|
||||
kubectl edit es demo
|
||||
```
|
||||
|
||||
- Find the field `count: 1` and change it to 3
|
||||
|
||||
- Save and quit
|
||||
|
||||
<!--
|
||||
```wait Please edit```
|
||||
```keys /count:```
|
||||
```key ^J```
|
||||
```keys $r3:x```
|
||||
```key ^J```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
:EN:- Deploying ElasticSearch with ECK
|
||||
:FR:- Déployer ElasticSearch avec ECK
|
||||
152
slides/k8s/events.md
Normal file
@@ -0,0 +1,152 @@
|
||||
# Events
|
||||
|
||||
- Kubernetes has an internal structured log of *events*
|
||||
|
||||
- These events are ordinary resources:
|
||||
|
||||
- we can view them with `kubectl get events`
|
||||
|
||||
- they can be viewed and created through the Kubernetes API
|
||||
|
||||
- they are stored in Kubernetes default database (e.g. etcd)
|
||||
|
||||
- Most components will generate events to let us know what's going on
|
||||
|
||||
- Events can be *related* to other resources
|
||||
|
||||
---
|
||||
|
||||
## Reading events
|
||||
|
||||
- `kubectl get events` (or `kubectl get ev`)
|
||||
|
||||
- Can use `--watch`
|
||||
|
||||
⚠️ Looks like `tail -f`, but events aren't necessarily sorted!
|
||||
|
||||
- Can use `--all-namespaces`
|
||||
|
||||
- Cluster events (e.g. related to nodes) are in the `default` namespace
|
||||
|
||||
- Viewing all "non-normal" events:
|
||||
```bash
|
||||
kubectl get ev -A --field-selector=type!=Normal
|
||||
```
|
||||
|
||||
(as of Kubernetes 1.19, `type` can be either `Normal` or `Warning`)
|
||||
|
||||
---
|
||||
|
||||
## Reading events (take 2)
|
||||
|
||||
- When we use `kubectl describe` on an object, `kubectl` retrieves the associated events
|
||||
|
||||
.exercise[
|
||||
|
||||
- See the API requests happening when we use `kubectl describe`:
|
||||
```bash
|
||||
kubectl describe service kubernetes --namespace=default -v6 >/dev/null
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Generating events
|
||||
|
||||
- This is rarely (if ever) done manually
|
||||
|
||||
(i.e. by crafting some YAML)
|
||||
|
||||
- But controllers (e.g. operators) need this!
|
||||
|
||||
- It's not mandatory, but it helps with *operability*
|
||||
|
||||
(e.g. when we `kubectl describe` a CRD, we will see associated events)
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ Work in progress
|
||||
|
||||
- "Events" can be :
|
||||
|
||||
- "old-style" events (in core API group, aka `v1`)
|
||||
|
||||
- "new-style" events (in API group `events.k8s.io`)
|
||||
|
||||
- See [KEP 383](https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/383-new-event-api-ga-graduation/README.md) in particular this [comparison between old and new APIs](https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/383-new-event-api-ga-graduation/README.md#comparison-between-old-and-new-apis)
|
||||
|
||||
---
|
||||
|
||||
## Experimenting with events
|
||||
|
||||
- Let's create an event related to a Node, based on @@LINK[k8s/event-node.yaml]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `k8s/event-node.yaml`
|
||||
|
||||
- Update the `name` and `uid` of the `involvedObject`
|
||||
|
||||
- Create the event with `kubectl create -f`
|
||||
|
||||
- Look at the Node with `kubectl describe`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Experimenting with events
|
||||
|
||||
- Let's create an event related to a Pod, based on @@LINK[k8s/event-pod.yaml]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a pod
|
||||
|
||||
- Edit `k8s/event-pod.yaml`
|
||||
|
||||
- Edit the `involvedObject` section (don't forget the `uid`)
|
||||
|
||||
- Create the event with `kubectl create -f`
|
||||
|
||||
- Look at the Pod with `kubectl describe`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Generating events in practice
|
||||
|
||||
- In Go, use an `EventRecorder` provided by the `kubernetes/client-go` library
|
||||
|
||||
- [EventRecorder interface](https://github.com/kubernetes/client-go/blob/release-1.19/tools/record/event.go#L87)
|
||||
|
||||
- [kubebuilder book example](https://book-v1.book.kubebuilder.io/beyond_basics/creating_events.html)
|
||||
|
||||
- It will take care of formatting / aggregating events
|
||||
|
||||
- To get an idea of what to put in the `reason` field, check [kubelet events](
|
||||
https://github.com/kubernetes/kubernetes/blob/release-1.19/pkg/kubelet/events/event.go)
|
||||
|
||||
---
|
||||
|
||||
## Cluster operator perspective
|
||||
|
||||
- Events are kept 1 hour by default
|
||||
|
||||
- This can be changed with the `--event-ttl` flag on the API server
|
||||
|
||||
- On very busy clusters, events can be kept on a separate etcd cluster
|
||||
|
||||
- This is done with the `--etcd-servers-overrides` flag on the API server
|
||||
|
||||
- Example:
|
||||
```
|
||||
--etcd-servers-overrides=/events#http://127.0.0.1:12379
|
||||
```
|
||||
|
||||
???
|
||||
|
||||
:EN:- Consuming and generating cluster events
|
||||
:FR:- Suivre l'activité du cluster avec les *events*
|
||||
@@ -10,7 +10,7 @@ Level 2: make it so that the number of replicas can be set with `--set replicas=
|
||||
|
||||
Level 3: change the colors of the lego bricks.
|
||||
|
||||
(For level 3, fork the repository and use ctr.run to build images.)
|
||||
(For level 3, you'll have to build/push your own images.)
|
||||
|
||||
See next slide if you need hints!
|
||||
|
||||
@@ -44,20 +44,12 @@ Also add `replicas: 5` to `values.yaml` to provide a default value.
|
||||
|
||||
## Changing the color
|
||||
|
||||
- Fork the repository
|
||||
- Create an account on e.g. Docker Hub (e.g. `janedoe`)
|
||||
|
||||
- Make sure that your fork has valid Dockerfiles
|
||||
|
||||
(or identify a branch that has valid Dockerfiles)
|
||||
|
||||
- Use the following images:
|
||||
|
||||
ctr.run/yourgithubusername/wordsmith/db:branchname
|
||||
|
||||
(replace db with web and words for the other components)
|
||||
- Create an image repository (e.g. `janedoe/web`)
|
||||
|
||||
- Change the images and/or CSS in `web/static`
|
||||
|
||||
- Commit, push, trigger a rolling update
|
||||
- Build and push
|
||||
|
||||
(`imagePullPolicy` should be `Always`, which is the default)
|
||||
- Trigger a rolling update using the image you just pushed
|
||||
|
||||
@@ -4,224 +4,133 @@ There are multiple ways to extend the Kubernetes API.
|
||||
|
||||
We are going to cover:
|
||||
|
||||
- Controllers
|
||||
|
||||
- Dynamic Admission Webhooks
|
||||
|
||||
- Custom Resource Definitions (CRDs)
|
||||
|
||||
- Admission Webhooks
|
||||
|
||||
- The Aggregation Layer
|
||||
|
||||
But first, let's re(re)visit the API server ...
|
||||
|
||||
---
|
||||
|
||||
## Revisiting the API server
|
||||
|
||||
- The Kubernetes API server is a central point of the control plane
|
||||
|
||||
(everything connects to it: controller manager, scheduler, kubelets)
|
||||
- Everything connects to the API server:
|
||||
|
||||
- Almost everything in Kubernetes is materialized by a resource
|
||||
- users (that's us, but also automation like CI/CD)
|
||||
|
||||
- Resources have a type (or "kind")
|
||||
- kubelets
|
||||
|
||||
(similar to strongly typed languages)
|
||||
- network components (e.g. `kube-proxy`, pod network, NPC)
|
||||
|
||||
- We can see existing types with `kubectl api-resources`
|
||||
|
||||
- We can list resources of a given type with `kubectl get <type>`
|
||||
- controllers; lots of controllers
|
||||
|
||||
---
|
||||
|
||||
## Creating new types
|
||||
## Some controllers
|
||||
|
||||
- We can create new types with Custom Resource Definitions (CRDs)
|
||||
- `kube-controller-manager` runs built-on controllers
|
||||
|
||||
- CRDs are created dynamically
|
||||
(watching Deployments, Nodes, ReplicaSets, and much more)
|
||||
|
||||
(without recompiling or restarting the API server)
|
||||
- `kube-scheduler` runs the scheduler
|
||||
|
||||
- CRDs themselves are resources:
|
||||
(it's conceptually not different from another controller)
|
||||
|
||||
- we can create a new type with `kubectl create` and some YAML
|
||||
- `cloud-controller-manager` takes care of "cloud stuff"
|
||||
|
||||
- we can see all our custom types with `kubectl get crds`
|
||||
(e.g. provisioning load balancers, persistent volumes...)
|
||||
|
||||
- After we create a CRD, the new type works just like built-in types
|
||||
- Some components mentioned above are also controllers
|
||||
|
||||
(e.g. Network Policy Controller)
|
||||
|
||||
---
|
||||
|
||||
## A very simple CRD
|
||||
## More controllers
|
||||
|
||||
The YAML below describes a very simple CRD representing different kinds of coffee:
|
||||
- Cloud resources can also be managed by additional controllers
|
||||
|
||||
```yaml
|
||||
apiVersion: apiextensions.k8s.io/v1alpha1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
```
|
||||
(e.g. the [AWS Load Balancer Controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller))
|
||||
|
||||
- Leveraging Ingress resources requires an Ingress Controller
|
||||
|
||||
(many options available here; we can even install multiple ones!)
|
||||
|
||||
- Many add-ons (including CRDs and operators) have controllers as well
|
||||
|
||||
🤔 *What's even a controller ?!?*
|
||||
|
||||
---
|
||||
|
||||
## Creating a CRD
|
||||
## What's a controller?
|
||||
|
||||
- Let's create the Custom Resource Definition for our Coffee resource
|
||||
According to the [documentation](https://kubernetes.io/docs/concepts/architecture/controller/):
|
||||
|
||||
.exercise[
|
||||
*Controllers are **control loops** that<br/>
|
||||
**watch** the state of your cluster,<br/>
|
||||
then make or request changes where needed.*
|
||||
|
||||
- Load the CRD:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/coffee-1.yaml
|
||||
```
|
||||
|
||||
- Confirm that it shows up:
|
||||
```bash
|
||||
kubectl get crds
|
||||
```
|
||||
|
||||
]
|
||||
*Each controller tries to move the current cluster state closer to the desired state.*
|
||||
|
||||
---
|
||||
|
||||
## Creating custom resources
|
||||
## What controllers do
|
||||
|
||||
The YAML below defines a resource using the CRD that we just created:
|
||||
- Watch resources
|
||||
|
||||
```yaml
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: arabica
|
||||
spec:
|
||||
taste: strong
|
||||
```
|
||||
- Make changes:
|
||||
|
||||
.exercise[
|
||||
- purely at the API level (e.g. Deployment, ReplicaSet controllers)
|
||||
|
||||
- Create a few types of coffee beans:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/coffees.yaml
|
||||
```
|
||||
- and/or configure resources (e.g. `kube-proxy`)
|
||||
|
||||
]
|
||||
- and/or provision resources (e.g. load balancer controller)
|
||||
|
||||
---
|
||||
|
||||
## Viewing custom resources
|
||||
## Extending Kubernetes with controllers
|
||||
|
||||
- By default, `kubectl get` only shows name and age of custom resources
|
||||
- Random example:
|
||||
|
||||
.exercise[
|
||||
- watch resources like Deployments, Services ...
|
||||
|
||||
- View the coffee beans that we just created:
|
||||
```bash
|
||||
kubectl get coffees
|
||||
```
|
||||
- read annotations to configure monitoring
|
||||
|
||||
]
|
||||
- Technically, this is not extending the API
|
||||
|
||||
- We can improve that, but it's outside the scope of this section!
|
||||
(but it can still be very useful!)
|
||||
|
||||
---
|
||||
|
||||
## What can we do with CRDs?
|
||||
## Other ways to extend Kubernetes
|
||||
|
||||
There are many possibilities!
|
||||
- Prevent or alter API requests before resources are committed to storage:
|
||||
|
||||
- *Operators* encapsulate complex sets of resources
|
||||
*Admission Control*
|
||||
|
||||
(e.g.: a PostgreSQL replicated cluster; an etcd cluster...
|
||||
<br/>
|
||||
see [awesome operators](https://github.com/operator-framework/awesome-operators) and
|
||||
[OperatorHub](https://operatorhub.io/) to find more)
|
||||
- Create new resource types leveraging Kubernetes storage facilities:
|
||||
|
||||
- Custom use-cases like [gitkube](https://gitkube.sh/)
|
||||
*Custom Resource Definitions*
|
||||
|
||||
- creates a new custom type, `Remote`, exposing a git+ssh server
|
||||
- Create new resource types with different storage or different semantics:
|
||||
|
||||
- deploy by pushing YAML or Helm charts to that remote
|
||||
*Aggregation Layer*
|
||||
|
||||
- Replacing built-in types with CRDs
|
||||
- Spoiler alert: often, we will combine multiple techniques
|
||||
|
||||
(see [this lightning talk by Tim Hockin](https://www.youtube.com/watch?v=ji0FWzFwNhA))
|
||||
|
||||
---
|
||||
|
||||
## Little details
|
||||
|
||||
- By default, CRDs are not *validated*
|
||||
|
||||
(we can put anything we want in the `spec`)
|
||||
|
||||
- When creating a CRD, we can pass an OpenAPI v3 schema (BETA!)
|
||||
|
||||
(which will then be used to validate resources)
|
||||
|
||||
- Generally, when creating a CRD, we also want to run a *controller*
|
||||
|
||||
(otherwise nothing will happen when we create resources of that type)
|
||||
|
||||
- The controller will typically *watch* our custom resources
|
||||
|
||||
(and take action when they are created/updated)
|
||||
|
||||
*
|
||||
Examples:
|
||||
[YAML to install the gitkube CRD](https://storage.googleapis.com/gitkube/gitkube-setup-stable.yaml),
|
||||
[YAML to install a redis operator CRD](https://github.com/amaizfinance/redis-operator/blob/master/deploy/crds/k8s_v1alpha1_redis_crd.yaml)
|
||||
*
|
||||
|
||||
---
|
||||
|
||||
## (Ab)using the API server
|
||||
|
||||
- If we need to store something "safely" (as in: in etcd), we can use CRDs
|
||||
|
||||
- This gives us primitives to read/write/list objects (and optionally validate them)
|
||||
|
||||
- The Kubernetes API server can run on its own
|
||||
|
||||
(without the scheduler, controller manager, and kubelets)
|
||||
|
||||
- By loading CRDs, we can have it manage totally different objects
|
||||
|
||||
(unrelated to containers, clusters, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Service catalog
|
||||
|
||||
- *Service catalog* is another extension mechanism
|
||||
|
||||
- It's not extending the Kubernetes API strictly speaking
|
||||
|
||||
(but it still provides new features!)
|
||||
|
||||
- It doesn't create new types; it uses:
|
||||
|
||||
- ClusterServiceBroker
|
||||
- ClusterServiceClass
|
||||
- ClusterServicePlan
|
||||
- ServiceInstance
|
||||
- ServiceBinding
|
||||
|
||||
- It uses the Open service broker API
|
||||
(and involve controllers as well!)
|
||||
|
||||
---
|
||||
|
||||
## Admission controllers
|
||||
|
||||
- Admission controllers are another way to extend the Kubernetes API
|
||||
|
||||
- Instead of creating new types, admission controllers can transform or vet API requests
|
||||
- Admission controllers can vet or transform API requests
|
||||
|
||||
- The diagram on the next slide shows the path of an API request
|
||||
|
||||
@@ -273,9 +182,9 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Admission Webhooks
|
||||
## Dynamic Admission Control
|
||||
|
||||
- We can setup *admission webhooks* to extend the behavior of the API server
|
||||
- We can set up *admission webhooks* to extend the behavior of the API server
|
||||
|
||||
- The API server will submit incoming API requests to these webhooks
|
||||
|
||||
@@ -307,6 +216,77 @@ class: extra-details
|
||||
|
||||
(to avoid e.g. triggering webhooks when setting up webhooks)
|
||||
|
||||
- The webhook server can be hosted in or out of the cluster
|
||||
|
||||
---
|
||||
|
||||
## Dynamic Admission Examples
|
||||
|
||||
- Policy control
|
||||
|
||||
([Kyverno](https://kyverno.io/),
|
||||
[Open Policy Agent](https://www.openpolicyagent.org/docs/latest/))
|
||||
|
||||
- Sidecar injection
|
||||
|
||||
(Used by some service meshes)
|
||||
|
||||
- Type validation
|
||||
|
||||
(More on this later, in the CRD section)
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes API types
|
||||
|
||||
- Almost everything in Kubernetes is materialized by a resource
|
||||
|
||||
- Resources have a type (or "kind")
|
||||
|
||||
(similar to strongly typed languages)
|
||||
|
||||
- We can see existing types with `kubectl api-resources`
|
||||
|
||||
- We can list resources of a given type with `kubectl get <type>`
|
||||
|
||||
---
|
||||
|
||||
## Creating new types
|
||||
|
||||
- We can create new types with Custom Resource Definitions (CRDs)
|
||||
|
||||
- CRDs are created dynamically
|
||||
|
||||
(without recompiling or restarting the API server)
|
||||
|
||||
- CRDs themselves are resources:
|
||||
|
||||
- we can create a new type with `kubectl create` and some YAML
|
||||
|
||||
- we can see all our custom types with `kubectl get crds`
|
||||
|
||||
- After we create a CRD, the new type works just like built-in types
|
||||
|
||||
---
|
||||
|
||||
## Examples
|
||||
|
||||
- Representing composite resources
|
||||
|
||||
(e.g. clusters like databases, messages queues ...)
|
||||
|
||||
- Representing external resources
|
||||
|
||||
(e.g. virtual machines, object store buckets, domain names ...)
|
||||
|
||||
- Representing configuration for controllers and operators
|
||||
|
||||
(e.g. custom Ingress resources, certificate issuers, backups ...)
|
||||
|
||||
- Alternate representations of other objects; services and service instances
|
||||
|
||||
(e.g. encrypted secret, git endpoints ...)
|
||||
|
||||
---
|
||||
|
||||
## The aggregation layer
|
||||
@@ -325,9 +305,57 @@ class: extra-details
|
||||
|
||||
- Example: `metrics-server`
|
||||
|
||||
(storing live metrics in etcd would be extremely inefficient)
|
||||
---
|
||||
|
||||
- Requires significantly more work than CRDs!
|
||||
## Why?
|
||||
|
||||
- Using a CRD for live metrics would be extremely inefficient
|
||||
|
||||
(etcd **is not** a metrics store; write performance is way too slow)
|
||||
|
||||
- Instead, `metrics-server`:
|
||||
|
||||
- collects metrics from kubelets
|
||||
|
||||
- stores them in memory
|
||||
|
||||
- exposes them as PodMetrics and NodeMetrics (in API group metrics.k8s.io)
|
||||
|
||||
- is registered as an APIService
|
||||
|
||||
---
|
||||
|
||||
## Drawbacks
|
||||
|
||||
- Requires a server
|
||||
|
||||
- ... that implements a non-trivial API (aka the Kubernetes API semantics)
|
||||
|
||||
- If we need REST semantics, CRDs are probably way simpler
|
||||
|
||||
- *Sometimes* synchronizing external state with CRDs might do the trick
|
||||
|
||||
(unless we want the external state to be our single source of truth)
|
||||
|
||||
---
|
||||
|
||||
## Service catalog
|
||||
|
||||
- *Service catalog* is another extension mechanism
|
||||
|
||||
- It's not extending the Kubernetes API strictly speaking
|
||||
|
||||
(but it still provides new features!)
|
||||
|
||||
- It doesn't create new types; it uses:
|
||||
|
||||
- ClusterServiceBroker
|
||||
- ClusterServiceClass
|
||||
- ClusterServicePlan
|
||||
- ServiceInstance
|
||||
- ServiceBinding
|
||||
|
||||
- It uses the Open service broker API
|
||||
|
||||
---
|
||||
|
||||
@@ -347,11 +375,5 @@ class: extra-details
|
||||
|
||||
???
|
||||
|
||||
:EN:- Extending the Kubernetes API
|
||||
:EN:- Custom Resource Definitions (CRDs)
|
||||
:EN:- The aggregation layer
|
||||
:EN:- Admission control and webhooks
|
||||
|
||||
:EN:- Overview of Kubernetes API extensions
|
||||
:FR:- Comment étendre l'API Kubernetes
|
||||
:FR:- Les CRDs *(Custom Resource Definitions)*
|
||||
:FR:- Extension via *aggregation layer*, *admission control*, *webhooks*
|
||||
|
||||
230
slides/k8s/finalizers.md
Normal file
@@ -0,0 +1,230 @@
|
||||
# Finalizers
|
||||
|
||||
- Sometimes, we.red[¹] want to prevent a resource from being deleted:
|
||||
|
||||
- perhaps it's "precious" (holds important data)
|
||||
|
||||
- perhaps other resources depend on it (and should be deleted first)
|
||||
|
||||
- perhaps we need to perform some clean up before it's deleted
|
||||
|
||||
- *Finalizers* are a way to do that!
|
||||
|
||||
.footnote[.red[¹]The "we" in that sentence generally stands for a controller.
|
||||
<br/>(We can also use finalizers directly ourselves, but it's not very common.)]
|
||||
|
||||
---
|
||||
|
||||
## Examples
|
||||
|
||||
- Prevent deletion of a PersistentVolumeClaim which is used by a Pod
|
||||
|
||||
- Prevent deletion of a PersistentVolume which is bound to a PersistentVolumeClaim
|
||||
|
||||
- Prevent deletion of a Namespace that still contains objects
|
||||
|
||||
- When a LoadBalancer Service is deleted, make sure that the corresponding external resource (e.g. NLB, GLB, etc.) gets deleted.red[¹]
|
||||
|
||||
- When a CRD gets deleted, make sure that all the associated resources get deleted.red[²]
|
||||
|
||||
.footnote[.red[¹²]Finalizers are not the only solution for these use-cases.]
|
||||
|
||||
---
|
||||
|
||||
## How do they work?
|
||||
|
||||
- Each resource can have list of `finalizers` in its `metadata`, e.g.:
|
||||
|
||||
```yaml
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: my-pvc
|
||||
annotations: ...
|
||||
finalizers:
|
||||
- kubernetes.io/pvc-protection
|
||||
```
|
||||
|
||||
- If we try to delete an resource that has at least one finalizer:
|
||||
|
||||
- the resource is *not* deleted
|
||||
|
||||
- instead, its `deletionTimestamp` is set to the current time
|
||||
|
||||
- we are merely *marking the resource for deletion*
|
||||
|
||||
---
|
||||
|
||||
## What happens next?
|
||||
|
||||
- The controller that added the finalizer is supposed to:
|
||||
|
||||
- watch for resources with a `deletionTimestamp`
|
||||
|
||||
- execute necessary clean-up actions
|
||||
|
||||
- then remove the finalizer
|
||||
|
||||
- The resource is deleted once all the finalizers have been removed
|
||||
|
||||
(there is no timeout, so this could take forever)
|
||||
|
||||
- Until then, the resource can be used normally
|
||||
|
||||
(but no further finalizer can be *added* to the resource)
|
||||
|
||||
---
|
||||
|
||||
## Finalizers in review
|
||||
|
||||
Let's review the examples mentioned earlier.
|
||||
|
||||
For each of them, we'll see if there are other (perhaps better) options.
|
||||
|
||||
---
|
||||
|
||||
## Volume finalizer
|
||||
|
||||
- Kubernetes applies the following finalizers:
|
||||
|
||||
- `kubernetes.io/pvc-protection` on PersistentVolumeClaims
|
||||
|
||||
- `kubernetes.io/pv-protection` on PersistentVolumes
|
||||
|
||||
- This prevents removing them when they are in use
|
||||
|
||||
- Implementation detail: the finalizer is present *even when the resource is not in use*
|
||||
|
||||
- When the resource is ~~deleted~~ marked for deletion, the controller will check if the finalizer can be removed
|
||||
|
||||
(Perhaps to avoid race conditions?)
|
||||
|
||||
---
|
||||
|
||||
## Namespace finalizer
|
||||
|
||||
- Kubernetes applies a finalizer named `kubernetes`
|
||||
|
||||
- It prevents removing the namespace if it still contains objects
|
||||
|
||||
- *Can we remove the namespace anyway?*
|
||||
|
||||
- remove the finalizer
|
||||
|
||||
- delete the namespace
|
||||
|
||||
- force deletion
|
||||
|
||||
- It *seems to works* but, in fact, the objects in the namespace still exist
|
||||
|
||||
(and they will re-appear if we re-create the namespace)
|
||||
|
||||
See [this blog post](https://www.openshift.com/blog/the-hidden-dangers-of-terminating-namespaces) for more details about this.
|
||||
|
||||
---
|
||||
|
||||
## LoadBalancer finalizer
|
||||
|
||||
- Scenario:
|
||||
|
||||
We run a custom controller to implement provisioning of LoadBalancer Services.
|
||||
|
||||
When a Service with type=LoadBalancer is deleted, we want to make sure
|
||||
that the corresponding external resources are properly deleted.
|
||||
|
||||
- Rationale for using a finalizer:
|
||||
|
||||
Normally, we would watch and observe the deletion of the Service;
|
||||
but if the Service is deleted while our controller is down,
|
||||
we could "miss" the deletion and forget to clean up the external resource.
|
||||
|
||||
The finalizer ensures that we will "see" the deletion
|
||||
and clean up the external resource.
|
||||
|
||||
---
|
||||
|
||||
## Counterpoint
|
||||
|
||||
- We could also:
|
||||
|
||||
- Tag the external resources
|
||||
<br/>(to indicate which Kubernetes Service they correspond to)
|
||||
|
||||
- Periodically reconcile them against Kubernetes resources
|
||||
|
||||
- If a Kubernetes resource does no longer exist, delete the external resource
|
||||
|
||||
- This doesn't have to be a *pre-delete* hook
|
||||
|
||||
(unless we store important information in the Service, e.g. as annotations)
|
||||
|
||||
---
|
||||
|
||||
## CRD finalizer
|
||||
|
||||
- Scenario:
|
||||
|
||||
We have a CRD that represents a PostgreSQL cluster.
|
||||
|
||||
It provisions StatefulSets, Deployments, Services, Secrets, ConfigMaps.
|
||||
|
||||
When the CRD is deleted, we want to delete all these resources.
|
||||
|
||||
- Rationale for using a finalizer:
|
||||
|
||||
Same as previously; we could observe the CRD, but if it is deleted
|
||||
while the controller isn't running, we would miss the deletion,
|
||||
and the other resources would keep running.
|
||||
|
||||
---
|
||||
|
||||
## Counterpoint
|
||||
|
||||
- We could use the same technique as described before
|
||||
|
||||
(tag the resources with e.g. annotations, to associate them with the CRD)
|
||||
|
||||
- Even better: we could use `ownerReferences`
|
||||
|
||||
(this feature is *specifically* designed for that use-case!)
|
||||
|
||||
---
|
||||
|
||||
## CRD finalizer (take two)
|
||||
|
||||
- Scenario:
|
||||
|
||||
We have a CRD that represents a PostgreSQL cluster.
|
||||
|
||||
It provisions StatefulSets, Deployments, Services, Secrets, ConfigMaps.
|
||||
|
||||
When the CRD is deleted, we want to delete all these resources.
|
||||
|
||||
We also want to store a final backup of the database.
|
||||
|
||||
We also want to update final usage metrics (e.g. for billing purposes).
|
||||
|
||||
- Rationale for using a finalizer:
|
||||
|
||||
We need to take some actions *before* the resources get deleted, not *after*.
|
||||
|
||||
---
|
||||
|
||||
## Wrapping up
|
||||
|
||||
- Finalizers are a great way to:
|
||||
|
||||
- prevent deletion of a resource that is still in use
|
||||
|
||||
- have a "guaranteed" pre-delete hook
|
||||
|
||||
- They can also be (ab)used for other purposes
|
||||
|
||||
- Code spelunking exercise:
|
||||
|
||||
*check where finalizers are used in the Kubernetes code base and why!*
|
||||
|
||||
???
|
||||
|
||||
:EN:- Using "finalizers" to manage resource lifecycle
|
||||
:FR:- Gérer le cycle de vie des ressources avec les *finalizers*
|
||||
766
slides/k8s/gitlab.md
Normal file
@@ -0,0 +1,766 @@
|
||||
# CI/CD with GitLab
|
||||
|
||||
- In this section, we will see how to set up a CI/CD pipeline with GitLab
|
||||
|
||||
(using a "self-hosted" GitLab; i.e. running on our Kubernetes cluster)
|
||||
|
||||
- The big picture:
|
||||
|
||||
- each time we push code to GitLab, it will be deployed in a staging environment
|
||||
|
||||
- each time we push the `production` tag, it will be deployed in production
|
||||
|
||||
---
|
||||
|
||||
## Disclaimers
|
||||
|
||||
- We'll use GitLab here as an exemple, but there are many other options
|
||||
|
||||
(e.g. some combination of Argo, Harbor, Tekton ...)
|
||||
|
||||
- There are also hosted options
|
||||
|
||||
(e.g. GitHub Actions and many others)
|
||||
|
||||
- We'll use a specific pipeline and workflow, but it's purely arbitrary
|
||||
|
||||
(treat it as a source of inspiration, not a model to be copied!)
|
||||
|
||||
---
|
||||
|
||||
## Workflow overview
|
||||
|
||||
- Push code to GitLab's git server
|
||||
|
||||
- GitLab notices the `.gitlab-ci.yml` file, which defines our pipeline
|
||||
|
||||
- Our pipeline can have multiple *stages* executed sequentially
|
||||
|
||||
(e.g. lint, build, test, deploy ...)
|
||||
|
||||
- Each stage can have multiple *jobs* executed in parallel
|
||||
|
||||
(e.g. build images in parallel)
|
||||
|
||||
- Each job will be executed in an independent *runner* pod
|
||||
|
||||
---
|
||||
|
||||
## Pipeline overview
|
||||
|
||||
- Our repository holds source code, Dockerfiles, and a Helm chart
|
||||
|
||||
- *Lint* stage will check the Helm chart validity
|
||||
|
||||
- *Build* stage will build container images
|
||||
|
||||
(and push them to GitLab's integrated registry)
|
||||
|
||||
- *Deploy* stage will deploy the Helm chart, using these images
|
||||
|
||||
- Pushes to `production` will deploy to "the" production namespace
|
||||
|
||||
- Pushes to other tags/branches will deploy to a namespace created on the fly
|
||||
|
||||
- We will discuss shortcomings and alternatives and the end of this chapter!
|
||||
|
||||
---
|
||||
|
||||
## Lots of requirements
|
||||
|
||||
- We need *a lot* of components to pull this off:
|
||||
|
||||
- a domain name
|
||||
|
||||
- a storage class
|
||||
|
||||
- a TLS-capable ingress controller
|
||||
|
||||
- the cert-manager operator
|
||||
|
||||
- GitLab itself
|
||||
|
||||
- the GitLab pipeline
|
||||
|
||||
- Wow, why?!?
|
||||
|
||||
---
|
||||
|
||||
## I find your lack of TLS disturbing
|
||||
|
||||
- We need a container registry (obviously!)
|
||||
|
||||
- Docker (and other container engines) *require* TLS on the registry
|
||||
|
||||
(with valid certificates)
|
||||
|
||||
- A few options:
|
||||
|
||||
- use a "real" TLS certificate (e.g. obtained with Let's Encrypt)
|
||||
|
||||
- use a self-signed TLS certificate
|
||||
|
||||
- communicate with the registry over localhost (TLS isn't required then)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why not self-signed certs?
|
||||
|
||||
- When using self-signed certs, we need to either:
|
||||
|
||||
- add the cert (or CA) to trusted certs
|
||||
|
||||
- disable cert validation
|
||||
|
||||
- This needs to be done on *every client* connecting to the registry:
|
||||
|
||||
- CI/CD pipeline (building and pushing images)
|
||||
|
||||
- container engine (deploying the images)
|
||||
|
||||
- other tools (e.g. container security scanner)
|
||||
|
||||
- It's doable, but it's a lot of hacks (especially when adding more tools!)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why not localhost?
|
||||
|
||||
- TLS is usually not required when the registry is on localhost
|
||||
|
||||
- We could expose the registry e.g. on a `NodePort`
|
||||
|
||||
- ... And then tweak the CI/CD pipeline to use that instead
|
||||
|
||||
- This is great when obtaining valid certs is difficult:
|
||||
|
||||
- air-gapped or internal environments (that can't use Let's Encrypt)
|
||||
|
||||
- no domain name available
|
||||
|
||||
- Downside: the registry isn't easily or safely available from outside
|
||||
|
||||
(the `NodePort` essentially defeats TLS)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Can we use `nip.io`?
|
||||
|
||||
- We will use Let's Encrypt
|
||||
|
||||
- Let's Encrypt has a quota of certificates per domain
|
||||
|
||||
(in 2020, that was [50 certificates per week per domain](https://letsencrypt.org/docs/rate-limits/))
|
||||
|
||||
- So if we all use `nip.io`, we will probably run into that limit
|
||||
|
||||
- But you can try and see if it works!
|
||||
|
||||
---
|
||||
|
||||
## Install GitLab itself
|
||||
|
||||
- We will deploy GitLab with its official Helm chart
|
||||
|
||||
- It will still require a bunch of parameters and customization
|
||||
|
||||
- Brace!
|
||||
|
||||
---
|
||||
|
||||
## Installing the GitLab chart
|
||||
|
||||
```bash
|
||||
helm repo add gitlab https://charts.gitlab.io/
|
||||
DOMAIN=`cloudnative.party`
|
||||
ISSUER=letsencrypt-production
|
||||
helm upgrade --install gitlab gitlab/gitlab \
|
||||
--create-namespace --namespace gitlab \
|
||||
--set global.hosts.domain=$DOMAIN \
|
||||
--set certmanager.install=false \
|
||||
--set nginx-ingress.enabled=false \
|
||||
--set global.ingress.class=traefik \
|
||||
--set global.ingress.provider=traefik \
|
||||
--set global.ingress.configureCertmanager=false \
|
||||
--set global.ingress.annotations."cert-manager\.io/cluster-issuer"=$ISSUER \
|
||||
--set gitlab.webservice.ingress.tls.secretName=gitlab-gitlab-tls \
|
||||
--set registry.ingress.tls.secretName=gitlab-registry-tls \
|
||||
--set minio.ingress.tls.secretName=gitlab-minio-tls
|
||||
```
|
||||
|
||||
😰 Can we talk about all these parameters?
|
||||
|
||||
---
|
||||
|
||||
## Breaking down all these parameters
|
||||
|
||||
- `certmanager.install=false`
|
||||
|
||||
do not install cert-manager, we already have it
|
||||
|
||||
- `nginx-ingress.enabled=false`
|
||||
|
||||
do not install the NGINX ingress controller, we already have Traefik
|
||||
|
||||
- `global.ingress.class=traefik`, `global.ingress.provider=traefik`
|
||||
|
||||
these merely enable creation of Ingress resources
|
||||
|
||||
- `global.ingress.configureCertmanager=false`
|
||||
|
||||
do not create a cert-manager Issuer or ClusterIssuer, we have ours
|
||||
|
||||
---
|
||||
|
||||
## More parameters
|
||||
|
||||
- `global.ingress.annotations."cert-manager\.io/cluster-issuer"=$ISSUER`
|
||||
|
||||
this annotation tells cert-manager to automatically issue certs
|
||||
|
||||
- `gitlab.webservice.ingress.tls.secretName=gitlab-gitlab-tls`,
|
||||
<br/>
|
||||
`registry.ingress.tls.secretName=gitlab-registry-tls`,
|
||||
<br/>
|
||||
`minio.ingress.tls.secretName=gitlab-minio-tls`
|
||||
|
||||
these annotations enable TLS in the Ingress controller
|
||||
|
||||
---
|
||||
|
||||
## Wait for GitLab to come up
|
||||
|
||||
- Let's watch what's happening in the GitLab namespace:
|
||||
```bash
|
||||
watch kubectl get all --namespace gitlab
|
||||
```
|
||||
|
||||
- We want to wait for all the Pods to be "Running" or "Completed"
|
||||
|
||||
- This will take a few minutes (10-15 minutes for me)
|
||||
|
||||
- Don't worry if you see Pods crashing and restarting
|
||||
|
||||
(it happens when they are waiting on a dependency which isn't up yet)
|
||||
|
||||
---
|
||||
|
||||
## Things that could go wrong
|
||||
|
||||
- Symptom: Pods remain "Pending" or "ContainerCreating" for a while
|
||||
|
||||
- Investigate these pods (with `kubectl describe pod ...`)
|
||||
|
||||
- Also look at events:
|
||||
```bash
|
||||
kubectl get events \
|
||||
--field-selector=type=Warning --sort-by=metadata.creationTimestamp
|
||||
```
|
||||
|
||||
- Make sure your cluster is big enough
|
||||
|
||||
(I use 3 `g6-standard-4` nodes)
|
||||
|
||||
---
|
||||
|
||||
## Log into GitLab
|
||||
|
||||
- First, let's check that we can connect to GitLab (with TLS):
|
||||
|
||||
`https://gitlab.$DOMAIN`
|
||||
|
||||
- It's asking us for a login and password!
|
||||
|
||||
- The login is `root`, and the password is stored in a Secret:
|
||||
```bash
|
||||
kubectl get secrets --namespace=gitlab gitlab-gitlab-initial-root-password \
|
||||
-o jsonpath={.data.password} | base64 -d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configure GitLab
|
||||
|
||||
- For simplicity, we're going to use that "root" user
|
||||
|
||||
(but later, you can create multiple users, teams, etc.)
|
||||
|
||||
- First, let's add our SSH key
|
||||
|
||||
(top-right user menu → settings, then SSH keys on the left)
|
||||
|
||||
- Then, create a project
|
||||
|
||||
(using the + menu next to the search bar on top)
|
||||
|
||||
- Let's call it `kubecoin`
|
||||
|
||||
(you can change it, but you'll have to adjust Git paths later on)
|
||||
|
||||
---
|
||||
|
||||
## Try to push our repository
|
||||
|
||||
- This is the repository that we're going to use:
|
||||
|
||||
https://github.com/jpetazzo/kubecoin
|
||||
|
||||
- Let's clone that repository locally first:
|
||||
```bash
|
||||
git clone https://github.com/jpetazzo/kubecoin
|
||||
```
|
||||
|
||||
- Add our GitLab instance as a remote:
|
||||
```bash
|
||||
git remote add gitlab git@gitlab.$DOMAIN:root/kubecoin.git
|
||||
```
|
||||
|
||||
- Try to push:
|
||||
```bash
|
||||
git push -u gitlab
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Connection refused?
|
||||
|
||||
- Normally, we get the following error:
|
||||
|
||||
`port 22: Connection refused`
|
||||
|
||||
- Why? 🤔
|
||||
|
||||
--
|
||||
|
||||
- What does `gitlab.$DOMAIN` point to?
|
||||
|
||||
--
|
||||
|
||||
- Our Ingress Controller! (i.e. Traefik) 💡
|
||||
|
||||
- Our Ingress Controller has nothing to do with port 22
|
||||
|
||||
- So how do we solve this?
|
||||
|
||||
---
|
||||
|
||||
## Routing port 22
|
||||
|
||||
- Whatever is on `gitlab.$DOMAIN` needs to have the following "routing":
|
||||
|
||||
- port 80 → GitLab web service
|
||||
|
||||
- port 443 → GitLab web service, with TLS
|
||||
|
||||
- port 22 → GitLab shell service
|
||||
|
||||
- Currently, Traefik is managing `gitlab.$DOMAIN`
|
||||
|
||||
- We are going to tell Traefik to:
|
||||
|
||||
- accept connections on port 22
|
||||
|
||||
- send them to GitLab
|
||||
|
||||
---
|
||||
|
||||
## TCP routing
|
||||
|
||||
- The technique that we are going to use is specific to Traefik
|
||||
|
||||
- Other Ingress Controllers may or may not have similar features
|
||||
|
||||
- When they have similar features, they will be enabled very differently
|
||||
|
||||
---
|
||||
|
||||
## Telling Traefik to open port 22
|
||||
|
||||
- Let's reconfigure Traefik:
|
||||
```bash
|
||||
helm upgrade --install traefik traefik/traefik \
|
||||
--create-namespace --namespace traefik \
|
||||
--set "ports.websecure.tls.enabled=true" \
|
||||
--set "providers.kubernetesIngress.publishedService.enabled=true" \
|
||||
--set "ports.ssh.port=2222" \
|
||||
--set "ports.ssh.exposedPort=22" \
|
||||
--set "ports.ssh.expose=true" \
|
||||
--set "ports.ssh.protocol=TCP"
|
||||
```
|
||||
|
||||
- This creates a new "port" on Traefik, called "ssh", listening on port 22
|
||||
|
||||
- Internally, Traefik listens on port 2222 (for permission reasons)
|
||||
|
||||
- Note: Traefik docs also call these ports "entrypoints"
|
||||
|
||||
(these entrypoints are totally unrelated to the `ENTRYPOINT` in Dockerfiles)
|
||||
|
||||
---
|
||||
|
||||
## Knocking on port 22
|
||||
|
||||
- What happens if we try to connect to that port 22 right now?
|
||||
```bash
|
||||
curl gitlab.$DOMAIN:22
|
||||
```
|
||||
|
||||
- We hit GitLab's web service!
|
||||
|
||||
- We need to tell Traefik what to do with connections to that port 22
|
||||
|
||||
- For that, we will create a "TCP route"
|
||||
|
||||
---
|
||||
|
||||
## Traefik TCP route
|
||||
|
||||
The following custom resource tells Traefik to route the `ssh` port that we
|
||||
created earlier, to the `gitlab-gitlab-shell` service belonging to GitLab.
|
||||
|
||||
```yaml
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRouteTCP
|
||||
metadata:
|
||||
name: gitlab-shell
|
||||
namespace: gitlab
|
||||
spec:
|
||||
entryPoints:
|
||||
- ssh
|
||||
routes:
|
||||
- match: HostSNI(\`*`)
|
||||
services:
|
||||
- name: gitlab-gitlab-shell
|
||||
port: 22
|
||||
```
|
||||
|
||||
The `HostSNI` wildcard is the magic option to define a "default route".
|
||||
|
||||
---
|
||||
|
||||
## Creating the TCP route
|
||||
|
||||
Since our manifest has backticks, we must pay attention to quoting:
|
||||
|
||||
```bash
|
||||
kubectl apply -f- << "EOF"
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRouteTCP
|
||||
metadata:
|
||||
name: gitlab-shell
|
||||
namespace: gitlab
|
||||
spec:
|
||||
entryPoints:
|
||||
- ssh
|
||||
routes:
|
||||
- match: HostSNI(\`*`)
|
||||
services:
|
||||
- name: gitlab-gitlab-shell
|
||||
port: 22
|
||||
EOF
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Knocking on port 22, again
|
||||
|
||||
- Let's see what happens if we try port 22 now:
|
||||
```bash
|
||||
curl gitlab.$DOMAIN:22
|
||||
```
|
||||
|
||||
- This should tell us something like `Received HTTP/0.9 when not allowed`
|
||||
|
||||
(because we're no longer talking to an HTTP server, but to SSH!)
|
||||
|
||||
- Try with SSH:
|
||||
```bash
|
||||
ssh git@gitlab.$DOMAIN
|
||||
```
|
||||
|
||||
- After accepting the key fingerprint, we should see `Welcome to GitLab, @root!`
|
||||
|
||||
---
|
||||
|
||||
## Pushing again
|
||||
|
||||
- Now we can try to push our repository again:
|
||||
```bash
|
||||
git push -u gitlab
|
||||
```
|
||||
|
||||
- Reload the project page in GitLab
|
||||
|
||||
- We should see our repository!
|
||||
|
||||
---
|
||||
|
||||
## CI/CD
|
||||
|
||||
- Click on the CI/CD tab on the left
|
||||
|
||||
(the one with the shuttle / space rocket icon)
|
||||
|
||||
- Our pipeline was detected...
|
||||
|
||||
- But it failed 😕
|
||||
|
||||
- Let's click on one of the failed jobs
|
||||
|
||||
- This is a permission issue!
|
||||
|
||||
---
|
||||
|
||||
## Fixing permissions
|
||||
|
||||
- GitLab needs to do a few of things in our cluster:
|
||||
|
||||
- create Pods to build our container images with BuildKit
|
||||
|
||||
- create Namespaces to deploy staging and production versions of our app
|
||||
|
||||
- create and update resources in these Namespaces
|
||||
|
||||
- For the time being, we're going to grant broad permissions
|
||||
|
||||
(and we will revisit and discuss what to do later)
|
||||
|
||||
---
|
||||
|
||||
## Granting permissions
|
||||
|
||||
- Let's give `cluster-admin` permissions to the GitLab ServiceAccount:
|
||||
```bash
|
||||
kubectl create clusterrolebinding gitlab \
|
||||
--clusterrole=cluster-admin --serviceaccount=gitlab:default
|
||||
```
|
||||
|
||||
- Then retry the CI/CD pipeline
|
||||
|
||||
- The build steps will now succeed; but the deploy steps will fail
|
||||
|
||||
- We need to set the `REGISTRY_USER` and `REGISTRY_PASSWORD` variables
|
||||
|
||||
- Let's explain what this is about!
|
||||
|
||||
---
|
||||
|
||||
## GitLab container registry access
|
||||
|
||||
- A registry access token is created for the duration of the CI/CD pipeline
|
||||
|
||||
(it is exposed through the `$CI_JOB_TOKEN` environment variable)
|
||||
|
||||
- This token gives access only to a specific repository in the registry
|
||||
|
||||
- It is valid only during the execution of the CI/CD pipeline
|
||||
|
||||
- We can (and we do!) use it to *push* images to the registry
|
||||
|
||||
- We cannot use it to *pull* images when running in staging or production
|
||||
|
||||
(because Kubernetes might need to pull images *after* the token expires)
|
||||
|
||||
- We need to create a separate read-only registry access token
|
||||
|
||||
---
|
||||
|
||||
## Creating the registry access token
|
||||
|
||||
- Let's go to "Settings" (the cog wheel on the left) / "Access Tokens"
|
||||
|
||||
- Create a token with `read_registry` permission
|
||||
|
||||
- Save the token name and the token value
|
||||
|
||||
- Then go to "Settings" / "CI/CD"
|
||||
|
||||
- In the "Variables" section, add two variables:
|
||||
|
||||
- `REGISTRY_USER` → token name
|
||||
- `REGISTRY_PASSWORD` → token value
|
||||
|
||||
- Make sure that they are **not** protected!
|
||||
|
||||
(otherwise, they won't be available in non-default tags and branches)
|
||||
|
||||
---
|
||||
|
||||
## Trying again
|
||||
|
||||
- Go back to the CI/CD pipeline view, and hit "Retry"
|
||||
|
||||
- The deploy stage should now work correctly! 🎉
|
||||
|
||||
---
|
||||
|
||||
## Our CI/CD pipeline
|
||||
|
||||
- Let's have a look at the [.gitlab-ci.yml](https://github.com/jpetazzo/kubecoin/blob/107dac5066087c52747e557babc97e57f42dd71d/.gitlab-ci.yml) file
|
||||
|
||||
- We have multiple *stages*:
|
||||
|
||||
- lint (currently doesn't do much, it's mostly as an example)
|
||||
|
||||
- build (currently uses BuildKit)
|
||||
|
||||
- deploy
|
||||
|
||||
- "Deploy" behaves differently in staging and production
|
||||
|
||||
- Let's investigate that!
|
||||
|
||||
---
|
||||
|
||||
## Staging vs production
|
||||
|
||||
- In our pipeline, "production" means "a tag or branch named `production`"
|
||||
|
||||
(see the `except:` and `only:` sections)
|
||||
|
||||
- Everything else is "staging"
|
||||
|
||||
- In "staging":
|
||||
|
||||
- we build and push images
|
||||
- we create a staging Namespace and deploy a copy of the app there
|
||||
|
||||
- In "production":
|
||||
|
||||
- we do not build anything
|
||||
- we deploy (or update) a copy of the app in the production Namespace
|
||||
|
||||
---
|
||||
|
||||
## Namespace naming
|
||||
|
||||
- GitLab will create Namespaces named `gl-<user>-<project>-<hash>`
|
||||
|
||||
- At the end of the deployment, the web UI will be available at:
|
||||
|
||||
`http://<user>-<project>-<githash>-gitlab.<domain>`
|
||||
|
||||
- The "production" Namespace will be `<user>-<project>`
|
||||
|
||||
- And it will be available on its own domain as well:
|
||||
|
||||
`http://<project>-<githash>-gitlab.<domain>`
|
||||
|
||||
---
|
||||
|
||||
## Production
|
||||
|
||||
- `git tag -f production && git push -f --tags`
|
||||
|
||||
- Our CI/CD pipeline will deploy on the production URL
|
||||
|
||||
(`http://<user>-<project>-gitlab.<domain>`)
|
||||
|
||||
- It will do it *only* if that same git commit was pushed to staging first
|
||||
|
||||
(because the "production" pipeline skips the build phase)
|
||||
|
||||
---
|
||||
|
||||
## Let's talk about build
|
||||
|
||||
- There are many ways to build container images on Kubernetes
|
||||
|
||||
- ~~And they all suck~~ Many of them have inconveniencing issues
|
||||
|
||||
- Let's do a quick review!
|
||||
|
||||
---
|
||||
|
||||
## Docker-based approaches
|
||||
|
||||
- Bind-mount the Docker socket
|
||||
|
||||
- very easy, but requires Docker Engine
|
||||
- build resource usage "evades" Kubernetes scheduler
|
||||
- insecure
|
||||
|
||||
- Docker-in-Docker in a pod
|
||||
|
||||
- requires privileged pod
|
||||
- insecure
|
||||
- approaches like rootless or sysbox might help in the future
|
||||
|
||||
- External build host
|
||||
|
||||
- more secure
|
||||
- requires resources outside of the Kubernetes cluster
|
||||
|
||||
---
|
||||
|
||||
## Non-privileged builders
|
||||
|
||||
- Kaniko
|
||||
|
||||
- each build runs in its own containers or pod
|
||||
- no caching by default
|
||||
- registry-based caching is possible
|
||||
|
||||
- BuildKit / `docker buildx`
|
||||
|
||||
- can leverage Docker Engine or long-running Kubernetes worker pod
|
||||
- supports distributed, multi-arch build farms
|
||||
- basic caching out of the box
|
||||
- can also leverage registry-based caching
|
||||
|
||||
---
|
||||
|
||||
## Other approaches
|
||||
|
||||
- Ditch the Dockerfile!
|
||||
|
||||
- bazel
|
||||
|
||||
- jib
|
||||
|
||||
- ko
|
||||
|
||||
- etc.
|
||||
|
||||
---
|
||||
|
||||
## Discussion
|
||||
|
||||
- Our CI/CD workflow is just *one* of the many possibilities
|
||||
|
||||
- It would be nice to add some actual unit or e2e tests
|
||||
|
||||
- Map the production namespace to a "real" domain name
|
||||
|
||||
- Automatically remove older staging environments
|
||||
|
||||
(see e.g. [kube-janitor](https://codeberg.org/hjacobs/kube-janitor))
|
||||
|
||||
- Deploy production to a separate cluster
|
||||
|
||||
- Better segregate permissions
|
||||
|
||||
(don't give `cluster-admin` to the GitLab pipeline)
|
||||
|
||||
---
|
||||
|
||||
## Why not use GitLab's Kubernetes integration?
|
||||
|
||||
- "All-in-one" approach
|
||||
|
||||
(deploys its own Ingress, cert-manager, Prometheus, and much more)
|
||||
|
||||
- I wanted to show you something flexible and customizable instead
|
||||
|
||||
- But feel free to explore it now that we have shown the basics!
|
||||
|
||||
???
|
||||
|
||||
:EN:- CI/CD with GitLab
|
||||
:FR:- CI/CD avec GitLab
|
||||
@@ -40,7 +40,22 @@
|
||||
|
||||
- a `Chart.yaml` file, containing metadata (name, version, description ...)
|
||||
|
||||
- Let's look at a simple chart, `stable/tomcat`
|
||||
- Let's look at a simple chart for a basic demo app
|
||||
|
||||
---
|
||||
|
||||
## Adding the repo
|
||||
|
||||
- If you haven't done it before, you need to add the repo for that chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the repo that holds the chart for the OWASP Juice Shop:
|
||||
```bash
|
||||
helm repo add juice https://charts.securecodebox.io
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -50,17 +65,17 @@
|
||||
|
||||
.exercise[
|
||||
|
||||
- Download the tarball for `stable/tomcat`:
|
||||
- Download the tarball for `juice/juice-shop`:
|
||||
```bash
|
||||
helm pull stable/tomcat
|
||||
helm pull juice/juice-shop
|
||||
```
|
||||
(This will create a file named `tomcat-X.Y.Z.tgz`.)
|
||||
(This will create a file named `juice-shop-X.Y.Z.tgz`.)
|
||||
|
||||
- Or, download + untar `stable/tomcat`:
|
||||
- Or, download + untar `juice/juice-shop`:
|
||||
```bash
|
||||
helm pull stable/tomcat --untar
|
||||
helm pull juice/juice-shop --untar
|
||||
```
|
||||
(This will create a directory named `tomcat`.)
|
||||
(This will create a directory named `juice-shop`.)
|
||||
|
||||
]
|
||||
|
||||
@@ -68,13 +83,13 @@
|
||||
|
||||
## Looking at the chart's content
|
||||
|
||||
- Let's look at the files and directories in the `tomcat` chart
|
||||
- Let's look at the files and directories in the `juice-shop` chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display the tree structure of the chart we just downloaded:
|
||||
```bash
|
||||
tree tomcat
|
||||
tree juice-shop
|
||||
```
|
||||
|
||||
]
|
||||
@@ -93,12 +108,11 @@ We see the components mentioned above: `Chart.yaml`, `templates/`, `values.yaml`
|
||||
|
||||
(using the standard Go template library)
|
||||
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the template file for the tomcat Service resource:
|
||||
- Look at the template file for the Service resource:
|
||||
```bash
|
||||
cat tomcat/templates/appsrv-svc.yaml
|
||||
cat juice-shop/templates/service.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -190,7 +204,7 @@ We see the components mentioned above: `Chart.yaml`, `templates/`, `values.yaml`
|
||||
|
||||
- At the top-level of the chart, it's a good idea to have a README
|
||||
|
||||
- It will be viewable with e.g. `helm show readme stable/tomcat`
|
||||
- It will be viewable with e.g. `helm show readme juice/juice-shop`
|
||||
|
||||
- In the `templates/` directory, we can also have a `NOTES.txt` file
|
||||
|
||||
|
||||
338
slides/k8s/helm-dependencies.md
Normal file
@@ -0,0 +1,338 @@
|
||||
# Charts using other charts
|
||||
|
||||
- Helm charts can have *dependencies* on other charts
|
||||
|
||||
- These dependencies will help us to share or reuse components
|
||||
|
||||
(so that we write and maintain less manifests, less templates, less code!)
|
||||
|
||||
- As an example, we will use a community chart for Redis
|
||||
|
||||
- This will help people who write charts, and people who use them
|
||||
|
||||
- ... And potentially remove a lot of code! ✌️
|
||||
|
||||
---
|
||||
|
||||
## Redis in DockerCoins
|
||||
|
||||
- In the DockerCoins demo app, we have 5 components:
|
||||
|
||||
- 2 internal webservices
|
||||
- 1 worker
|
||||
- 1 public web UI
|
||||
- 1 Redis data store
|
||||
|
||||
- Every component is running some custom code, except Redis
|
||||
|
||||
- Every component is using a custom image, except Redis
|
||||
|
||||
(which is using the official `redis` image)
|
||||
|
||||
- Could we use a standard chart for Redis?
|
||||
|
||||
- Yes! Dependencies to the rescue!
|
||||
|
||||
---
|
||||
|
||||
## Adding our dependency
|
||||
|
||||
- First, we will add the dependency to the `Chart.yaml` file
|
||||
|
||||
- Then, we will ask Helm to download that dependency
|
||||
|
||||
- We will also *lock* the dependency
|
||||
|
||||
(lock it to a specific version, to ensure reproducibility)
|
||||
|
||||
---
|
||||
|
||||
## Declaring the dependency
|
||||
|
||||
- First, let's edit `Chart.yaml`
|
||||
|
||||
.exercise[
|
||||
|
||||
- In `Chart.yaml`, fill the `dependencies` section:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: 11.0.5
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Where do that `repository` and `version` come from?
|
||||
|
||||
We're assuming here that we did our reserach,
|
||||
or that our resident Helm expert advised us to
|
||||
use Bitnami's Redis chart.
|
||||
|
||||
---
|
||||
|
||||
## Conditions
|
||||
|
||||
- The `condition` field gives us a way to enable/disable the dependency:
|
||||
```yaml
|
||||
conditions: redis.enabled
|
||||
```
|
||||
|
||||
- Here, we can disable Redis with the Helm flag `--set redis.enabled=false`
|
||||
|
||||
(or set that value in a `values.yaml` file)
|
||||
|
||||
- Of course, this is mostly useful for *optional* dependencies
|
||||
|
||||
(otherwise, the app ends up being broken since it'll miss a component)
|
||||
|
||||
---
|
||||
|
||||
## Lock & Load!
|
||||
|
||||
- After adding the dependency, we ask Helm to pin an download it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Ask Helm:
|
||||
```bash
|
||||
helm dependency update
|
||||
```
|
||||
|
||||
(Or `helm dep up`)
|
||||
|
||||
]
|
||||
|
||||
- This wil create `Chart.lock` and fetch the dependency
|
||||
|
||||
---
|
||||
|
||||
## What's `Chart.lock`?
|
||||
|
||||
- This is a common pattern with dependencies
|
||||
|
||||
(see also: `Gemfile.lock`, `package.json.lock`, and many others)
|
||||
|
||||
- This lets us define loose dependencies in `Chart.yaml`
|
||||
|
||||
(e.g. "version 11.whatever, but below 12")
|
||||
|
||||
- But have the exact version used in `Chart.lock`
|
||||
|
||||
- This ensures reproducible deployments
|
||||
|
||||
- `Chart.lock` can (should!) be added to our source tree
|
||||
|
||||
- `Chart.lock` can (should!) regularly be updated
|
||||
|
||||
---
|
||||
|
||||
## Loose dependencies
|
||||
|
||||
- Here is an example of loose version requirement:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: ">=11 <12"
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
```
|
||||
|
||||
- This makes sure that we have the most recent version in the 11.x train
|
||||
|
||||
- ... But without upgrading to version 12.x
|
||||
|
||||
(because it might be incompatible)
|
||||
|
||||
---
|
||||
|
||||
## `build` vs `update`
|
||||
|
||||
- Helm actually offers two commands to manage dependencies:
|
||||
|
||||
`helm dependency build` = fetch dependencies listed in `Chart.lock`
|
||||
|
||||
`helm dependency update` = update `Chart.lock` (and run `build`)
|
||||
|
||||
- When the dependency gets updated, we can/should:
|
||||
|
||||
- `helm dep up` (update `Chart.lock` and fetch new chart)
|
||||
|
||||
- test!
|
||||
|
||||
- if everything is fine, `git add Chart.lock` and commit
|
||||
|
||||
---
|
||||
|
||||
## Where are my dependencies?
|
||||
|
||||
- Dependencies are downloaded to the `charts/` subdirectory
|
||||
|
||||
- When they're downloaded, they stay in compressed format (`.tgz`)
|
||||
|
||||
- Should we commit them to our code repository?
|
||||
|
||||
- Pros:
|
||||
|
||||
- more resilient to internet/mirror failures/decomissioning
|
||||
|
||||
- Cons:
|
||||
|
||||
- can add a lot of weight to the repo if charts are big or change often
|
||||
|
||||
- this can be solved by extra tools like git-lfs
|
||||
|
||||
---
|
||||
|
||||
## Dependency tuning
|
||||
|
||||
- DockerCoins expects the `redis` Service to be named `redis`
|
||||
|
||||
- Our Redis chart uses a different Service name by default
|
||||
|
||||
- Service name is `{{ template "redis.fullname" . }}-master`
|
||||
|
||||
- `redis.fullname` looks like this:
|
||||
```
|
||||
{{- define "redis.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
[...]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
```
|
||||
|
||||
- How do we fix this?
|
||||
|
||||
---
|
||||
|
||||
## Setting dependency variables
|
||||
|
||||
- If we set `fullnameOverride` to `redis`:
|
||||
|
||||
- the `{{ template ... }}` block will output `redis`
|
||||
|
||||
- the Service name will be `redis-master`
|
||||
|
||||
- A parent chart can set values for its dependencies
|
||||
|
||||
- For example, in the parent's `values.yaml`:
|
||||
|
||||
```yaml
|
||||
redis: # Name of the dependency
|
||||
fullnameOverride: redis # Value passed to redis
|
||||
cluster: # Other values passed to redis
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- User can also set variables with `--set=` or with `--values=`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Passing templates
|
||||
|
||||
- We can even pass template `{{ include "template.name" }}`, but warning:
|
||||
|
||||
- need to be evaluated with the `tpl` function, on the child side
|
||||
|
||||
- evaluated in the context of the child, with no access to parent variables
|
||||
|
||||
<!-- FIXME this probably deserves an example, but I can't imagine one right now 😅 -->
|
||||
|
||||
---
|
||||
|
||||
## Getting rid of the `-master`
|
||||
|
||||
- Even if we set that `fullnameOverride`, the Service name will be `redis-master`
|
||||
|
||||
- To remove the `-master` suffix, we need to edit the chart itself
|
||||
|
||||
- To edit the Redis chart, we need to *embed* it in our own chart
|
||||
|
||||
- We need to:
|
||||
|
||||
- decompress the chart
|
||||
|
||||
- adjust `Chart.yaml` accordingly
|
||||
|
||||
---
|
||||
|
||||
## Embedding a dependency
|
||||
|
||||
.exercise[
|
||||
|
||||
- Decompress the chart:
|
||||
```yaml
|
||||
cd charts
|
||||
tar zxf redis-*.tgz
|
||||
cd ..
|
||||
```
|
||||
|
||||
- Edit `Chart.yaml` and update the `dependencies` section:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: '*' # No need to constraint version, from local files
|
||||
```
|
||||
|
||||
- Run `helm dep update`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating the dependency
|
||||
|
||||
- Now we can edit the Service name
|
||||
|
||||
(it should be in `charts/redis/templates/redis-master-svc.yaml`)
|
||||
|
||||
- Then try to deploy the whole chart!
|
||||
|
||||
---
|
||||
|
||||
## Embedding a dependency multiple times
|
||||
|
||||
- What if we need multiple copies of the same subchart?
|
||||
|
||||
(for instance, if we need two completely different Redis servers)
|
||||
|
||||
- We can declare a dependency multiple times, and specify an `alias`:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: '*'
|
||||
alias: querycache
|
||||
- name: redis
|
||||
version: '*'
|
||||
alias: celeryqueue
|
||||
```
|
||||
|
||||
- `.Chart.Name` will be set to the `alias`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Compatibility with Helm 2
|
||||
|
||||
- Chart `apiVersion: v1` is the only version supported by Helm 2
|
||||
|
||||
- Chart v1 is also supported by Helm 3
|
||||
|
||||
- Use v1 if you want to be compatible with Helm 2
|
||||
|
||||
- Instead of `Chart.yaml`, dependencies are defined in `requirements.yaml`
|
||||
|
||||
(and we should commit `requirements.lock` instead of `Chart.lock`)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Depending on other charts
|
||||
:EN:- Charts within charts
|
||||
|
||||
:FR:- Dépendances entre charts
|
||||
:FR:- Un chart peut en cacher un autre
|
||||
@@ -1,20 +1,84 @@
|
||||
# Managing stacks with Helm
|
||||
|
||||
- We created our first resources with `kubectl run`, `kubectl expose` ...
|
||||
- Helm is a (kind of!) package manager for Kubernetes
|
||||
|
||||
- We have also created resources by loading YAML files with `kubectl apply -f`
|
||||
- We can use it to:
|
||||
|
||||
- For larger stacks, managing thousands of lines of YAML is unreasonable
|
||||
- find existing packages (called "charts") created by other folks
|
||||
|
||||
- These YAML bundles need to be customized with variable parameters
|
||||
- install these packages, configuring them for our particular setup
|
||||
|
||||
(E.g.: number of replicas, image version to use ...)
|
||||
- package our own things (for distribution or for internal use)
|
||||
|
||||
- It would be nice to have an organized, versioned collection of bundles
|
||||
- manage the lifecycle of these installs (rollback to previous version etc.)
|
||||
|
||||
- It would be nice to be able to upgrade/rollback these bundles carefully
|
||||
- It's a "CNCF graduate project", indicating a certain level of maturity
|
||||
|
||||
- [Helm](https://helm.sh/) is an open source project offering all these things!
|
||||
(more on that later)
|
||||
|
||||
---
|
||||
|
||||
## From `kubectl run` to YAML
|
||||
|
||||
- We can create resources with one-line commands
|
||||
|
||||
(`kubectl run`, `kubectl createa deployment`, `kubectl expose`...)
|
||||
|
||||
- We can also create resources by loading YAML files
|
||||
|
||||
(with `kubectl apply -f`, `kubectl create -f`...)
|
||||
|
||||
- There can be multiple resources in a single YAML files
|
||||
|
||||
(making them convenient to deploy entire stacks)
|
||||
|
||||
- However, these YAML bundles often need to be customized
|
||||
|
||||
(e.g.: number of replicas, image version to use, features to enable...)
|
||||
|
||||
---
|
||||
|
||||
## Beyond YAML
|
||||
|
||||
- Very often, after putting together our first `app.yaml`, we end up with:
|
||||
|
||||
- `app-prod.yaml`
|
||||
|
||||
- `app-staging.yaml`
|
||||
|
||||
- `app-dev.yaml`
|
||||
|
||||
- instructions indicating to users "please tweak this and that in the YAML"
|
||||
|
||||
- That's where using something like
|
||||
[CUE](https://github.com/cuelang/cue/blob/v0.3.2/doc/tutorial/kubernetes/README.md),
|
||||
[Kustomize](https://kustomize.io/),
|
||||
or [Helm](https://helm.sh/) can help!
|
||||
|
||||
- Now we can do something like this:
|
||||
```bash
|
||||
helm install app ... --set this.parameter=that.value
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Other features of Helm
|
||||
|
||||
- With Helm, we create "charts"
|
||||
|
||||
- These charts can be used internally or distributed publicly
|
||||
|
||||
- Public charts can be indexed through the [Artifact Hub](https://artifacthub.io/)
|
||||
|
||||
- This gives us a way to find and install other folks' charts
|
||||
|
||||
- Helm also gives us ways to manage the lifecycle of what we install:
|
||||
|
||||
- keep track of what we have installed
|
||||
|
||||
- upgrade versions, change parameters, roll back, uninstall
|
||||
|
||||
- Furthermore, even if it's not "the" standard, it's definitely "a" standard!
|
||||
|
||||
---
|
||||
|
||||
@@ -229,55 +293,95 @@ fine for personal and development clusters.)
|
||||
|
||||
---
|
||||
|
||||
## Managing repositories
|
||||
class: extra-details
|
||||
|
||||
- Let's check what repositories we have, and add the `stable` repo
|
||||
## How to find charts, the old way
|
||||
|
||||
(the `stable` repo contains a set of official-ish charts)
|
||||
- Helm 2 came with one pre-configured repo, the "stable" repo
|
||||
|
||||
.exercise[
|
||||
(located at https://charts.helm.sh/stable)
|
||||
|
||||
- List our repos:
|
||||
```bash
|
||||
helm repo list
|
||||
```
|
||||
- Helm 3 doesn't have any pre-configured repo
|
||||
|
||||
- Add the `stable` repo:
|
||||
```bash
|
||||
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
```
|
||||
- The "stable" repo mentioned above is now being deprecated
|
||||
|
||||
]
|
||||
- The new approach is to have fully decentralized repos
|
||||
|
||||
Adding a repo can take a few seconds (it downloads the list of charts from the repo).
|
||||
- Repos can be indexed in the Artifact Hub
|
||||
|
||||
It's OK to add a repo that already exists (it will merely update it).
|
||||
(which supersedes the Helm Hub)
|
||||
|
||||
---
|
||||
|
||||
## Search available charts
|
||||
## How to find charts, the new way
|
||||
|
||||
- We can search available charts with `helm search`
|
||||
- Go to the [Artifact Hub](https://artifacthub.io/packages/search?kind=0) (https://artifacthub.io)
|
||||
|
||||
- We need to specify where to search (only our repos, or Helm Hub)
|
||||
- Or use `helm search hub ...` from the CLI
|
||||
|
||||
- Let's search for all charts mentioning tomcat!
|
||||
- Let's try to find a Helm chart for something called "OWASP Juice Shop"!
|
||||
|
||||
(it is a famous demo app used in security challenges)
|
||||
|
||||
---
|
||||
|
||||
## Finding charts from the CLI
|
||||
|
||||
- We can use `helm search hub <keyword>`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Search for tomcat in the repo that we added earlier:
|
||||
- Look for the OWASP Juice Shop app:
|
||||
```bash
|
||||
helm search repo tomcat
|
||||
helm search hub owasp juice
|
||||
```
|
||||
|
||||
- Search for tomcat on the Helm Hub:
|
||||
- Since the URLs are truncated, try with the YAML output:
|
||||
```bash
|
||||
helm search hub tomcat
|
||||
helm search hub owasp juice -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
[Helm Hub](https://hub.helm.sh/) indexes many repos, using the [Monocular](https://github.com/helm/monocular) server.
|
||||
Then go to → https://artifacthub.io/packages/helm/seccurecodebox/juice-shop
|
||||
|
||||
---
|
||||
|
||||
## Finding charts on the web
|
||||
|
||||
- We can also use the Artifact Hub search feature
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to https://artifacthub.io/
|
||||
|
||||
- In the search box on top, enter "owasp juice"
|
||||
|
||||
- Click on the "juice-shop" result (not "multi-juicer" or "juicy-ctf")
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Installing the chart
|
||||
|
||||
- Click on the "Install" button, it will show instructions
|
||||
|
||||
.exercise[
|
||||
|
||||
- First, add the repository for that chart:
|
||||
```bash
|
||||
helm repo add juice https://charts.securecodebox.io
|
||||
```
|
||||
|
||||
- Then, install the chart:
|
||||
```bash
|
||||
helm install my-juice-shop juice/juice-shop
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it is also possible to install directly a chart, with `--repo https://...`
|
||||
|
||||
---
|
||||
|
||||
@@ -285,22 +389,22 @@ It's OK to add a repo that already exists (it will merely update it).
|
||||
|
||||
- "Installing a chart" means creating a *release*
|
||||
|
||||
- We need to name that release
|
||||
- In the previous exemple, the release was named "my-juice-shop"
|
||||
|
||||
(or use the `--generate-name` to get Helm to generate one for us)
|
||||
- We can also use `--generate-name` to ask Helm to generate a name for us
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the tomcat chart that we found earlier:
|
||||
```bash
|
||||
helm install java4ever stable/tomcat
|
||||
```
|
||||
|
||||
- List the releases:
|
||||
```bash
|
||||
helm list
|
||||
```
|
||||
|
||||
- Check that we have a `my-juice-shop-...` Pod up and running:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
@@ -313,13 +417,13 @@ class: extra-details
|
||||
|
||||
- The `helm search` command only takes a search string argument
|
||||
|
||||
(e.g. `helm search tomcat`)
|
||||
(e.g. `helm search juice-shop`)
|
||||
|
||||
- With Helm 2, the name is optional:
|
||||
|
||||
`helm install stable/tomcat` will automatically generate a name
|
||||
`helm install juice/juice-shop` will automatically generate a name
|
||||
|
||||
`helm install --name java4ever stable/tomcat` will specify a name
|
||||
`helm install --name my-juice-shop juice/juice-shop` will specify a name
|
||||
|
||||
---
|
||||
|
||||
@@ -333,12 +437,12 @@ class: extra-details
|
||||
|
||||
- List all the resources created by this release:
|
||||
```bash
|
||||
kubectl get all --selector=release=java4ever
|
||||
kubectl get all --selector=app.kubernetes.io/instance=my-juice-shop
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: this `release` label wasn't added automatically by Helm.
|
||||
Note: this label wasn't added automatically by Helm.
|
||||
<br/>
|
||||
It is defined in that chart. In other words, not all charts will provide this label.
|
||||
|
||||
@@ -346,11 +450,11 @@ It is defined in that chart. In other words, not all charts will provide this la
|
||||
|
||||
## Configuring a release
|
||||
|
||||
- By default, `stable/tomcat` creates a service of type `LoadBalancer`
|
||||
- By default, `juice/juice-shop` creates a service of type `ClusterIP`
|
||||
|
||||
- We would like to change that to a `NodePort`
|
||||
|
||||
- We could use `kubectl edit service java4ever-tomcat`, but ...
|
||||
- We could use `kubectl edit service my-juice-shop`, but ...
|
||||
|
||||
... our changes would get overwritten next time we update that chart!
|
||||
|
||||
@@ -370,14 +474,14 @@ It is defined in that chart. In other words, not all charts will provide this la
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the README for tomcat:
|
||||
- Look at the README for the app:
|
||||
```bash
|
||||
helm show readme stable/tomcat
|
||||
helm show readme juice/juice-shop
|
||||
```
|
||||
|
||||
- Look at the values and their defaults:
|
||||
```bash
|
||||
helm show values stable/tomcat
|
||||
helm show values juice/juice-shop
|
||||
```
|
||||
|
||||
]
|
||||
@@ -394,18 +498,18 @@ The `readme` may or may not have (accurate) explanations for the values.
|
||||
|
||||
- Values can be set when installing a chart, or when upgrading it
|
||||
|
||||
- We are going to update `java4ever` to change the type of the service
|
||||
- We are going to update `my-juice-shop` to change the type of the service
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update `java4ever`:
|
||||
- Update `my-juice-shop`:
|
||||
```bash
|
||||
helm upgrade java4ever stable/tomcat --set service.type=NodePort
|
||||
helm upgrade my-juice-shop juice/juice-shop --set service.type=NodePort
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note that we have to specify the chart that we use (`stable/tomcat`),
|
||||
Note that we have to specify the chart that we use (`juice/my-juice-shop`),
|
||||
even if we just want to update some values.
|
||||
|
||||
We can set multiple values. If we want to set many values, we can use `-f`/`--values` and pass a YAML file with all the values.
|
||||
@@ -414,25 +518,21 @@ All unspecified values will take the default values defined in the chart.
|
||||
|
||||
---
|
||||
|
||||
## Connecting to tomcat
|
||||
## Connecting to the Juice Shop
|
||||
|
||||
- Let's check the tomcat server that we just installed
|
||||
|
||||
- Note: its readiness probe has a 60s delay
|
||||
|
||||
(so it will take 60s after the initial deployment before the service works)
|
||||
- Let's check the app that we just installed
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the node port allocated to the service:
|
||||
```bash
|
||||
kubectl get service java4ever-tomcat
|
||||
PORT=$(kubectl get service java4ever-tomcat -o jsonpath={..nodePort})
|
||||
kubectl get service my-juice-shop
|
||||
PORT=$(kubectl get service my-juice-shop -o jsonpath={..nodePort})
|
||||
```
|
||||
|
||||
- Connect to it, checking the demo app on `/sample/`:
|
||||
- Connect to it:
|
||||
```bash
|
||||
curl localhost:$PORT/sample/
|
||||
curl localhost:$PORT/
|
||||
```
|
||||
|
||||
]
|
||||
@@ -446,3 +546,17 @@ All unspecified values will take the default values defined in the chart.
|
||||
:FR:- Fonctionnement général de Helm
|
||||
:FR:- Installer des composants via Helm
|
||||
:FR:- Helm 2, Helm 3, et le *Helm Hub*
|
||||
|
||||
:T: Getting started with Helm and its concepts
|
||||
|
||||
:Q: Which comparison is the most adequate?
|
||||
:A: Helm is a firewall, charts are access lists
|
||||
:A: ✔️Helm is a package manager, charts are packages
|
||||
:A: Helm is an artefact repository, charts are artefacts
|
||||
:A: Helm is a CI/CD platform, charts are CI/CD pipelines
|
||||
|
||||
:Q: What's required to distribute a Helm chart?
|
||||
:A: A Helm commercial license
|
||||
:A: A Docker registry
|
||||
:A: An account on the Helm Hub
|
||||
:A: ✔️An HTTP server
|
||||
|
||||
@@ -12,22 +12,37 @@
|
||||
|
||||
---
|
||||
|
||||
## Adding the repo
|
||||
|
||||
- If you haven't done it before, you need to add the repo for that chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the repo that holds the chart for the OWASP Juice Shop:
|
||||
```bash
|
||||
helm repo add juice https://charts.securecodebox.io
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## We need a release
|
||||
|
||||
- We need to install something with Helm
|
||||
|
||||
- Let's use the `stable/tomcat` chart as an example
|
||||
- Let's use the `juice/juice-shop` chart as an example
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install a release called `tomcat` with the chart `stable/tomcat`:
|
||||
- Install a release called `orange` with the chart `juice/juice-shop`:
|
||||
```bash
|
||||
helm upgrade tomcat stable/tomcat --install
|
||||
helm upgrade orange juice/juice-shop --install
|
||||
```
|
||||
|
||||
- Let's upgrade that release, and change a value:
|
||||
```bash
|
||||
helm upgrade tomcat stable/tomcat --set ingress.enabled=true
|
||||
helm upgrade orange juice/juice-shop --set ingress.enabled=true
|
||||
```
|
||||
|
||||
]
|
||||
@@ -42,7 +57,7 @@
|
||||
|
||||
- View the history for that release:
|
||||
```bash
|
||||
helm history tomcat
|
||||
helm history orange
|
||||
```
|
||||
|
||||
]
|
||||
@@ -82,11 +97,11 @@ We should see a number of secrets with TYPE `helm.sh/release.v1`.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Examine the secret corresponding to the second release of `tomcat`:
|
||||
- Examine the secret corresponding to the second release of `orange`:
|
||||
```bash
|
||||
kubectl describe secret sh.helm.release.v1.tomcat.v2
|
||||
kubectl describe secret sh.helm.release.v1.orange.v2
|
||||
```
|
||||
(`v1` is the secret format; `v2` means revision 2 of the `tomcat` release)
|
||||
(`v1` is the secret format; `v2` means revision 2 of the `orange` release)
|
||||
|
||||
]
|
||||
|
||||
@@ -102,7 +117,7 @@ There is a key named `release`.
|
||||
|
||||
- Dump the secret:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
kubectl get secret sh.helm.release.v1.orange.v2 \
|
||||
-o go-template='{{ .data.release }}'
|
||||
```
|
||||
|
||||
@@ -120,7 +135,7 @@ Secrets are encoded in base64. We need to decode that!
|
||||
|
||||
- Decode the secret:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
kubectl get secret sh.helm.release.v1.orange.v2 \
|
||||
-o go-template='{{ .data.release | base64decode }}'
|
||||
```
|
||||
|
||||
@@ -144,7 +159,7 @@ Let's try one more round of decoding!
|
||||
|
||||
- Decode it twice:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
kubectl get secret sh.helm.release.v1.orange.v2 \
|
||||
-o go-template='{{ .data.release | base64decode | base64decode }}'
|
||||
```
|
||||
|
||||
@@ -164,7 +179,7 @@ Let's try one more round of decoding!
|
||||
|
||||
- Pipe the decoded release through `file -`:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
kubectl get secret sh.helm.release.v1.orange.v2 \
|
||||
-o go-template='{{ .data.release | base64decode | base64decode }}' \
|
||||
| file -
|
||||
```
|
||||
@@ -185,7 +200,7 @@ Gzipped data! It can be decoded with `gunzip -c`.
|
||||
|
||||
- Rerun the previous command, but with `| gunzip -c > release-info` :
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
kubectl get secret sh.helm.release.v1.orange.v2 \
|
||||
-o go-template='{{ .data.release | base64decode | base64decode }}' \
|
||||
| gunzip -c > release-info
|
||||
```
|
||||
@@ -211,7 +226,7 @@ If we inspect that JSON (e.g. with `jq keys release-info`), we see:
|
||||
- `config` (contains the values that we've set)
|
||||
- `info` (date of deployment, status messages)
|
||||
- `manifest` (YAML generated from the templates)
|
||||
- `name` (name of the release, so `tomcat`)
|
||||
- `name` (name of the release, so `orange`)
|
||||
- `namespace` (namespace where we deployed the release)
|
||||
- `version` (revision number within that release; starts at 1)
|
||||
|
||||
|
||||
191
slides/k8s/helm-values-schema-validation.md
Normal file
@@ -0,0 +1,191 @@
|
||||
# Helm and invalid values
|
||||
|
||||
- A lot of Helm charts let us specify an image tag like this:
|
||||
```bash
|
||||
helm install ... --set image.tag=v1.0
|
||||
```
|
||||
|
||||
- What happens if we make a small mistake, like this:
|
||||
```bash
|
||||
helm install ... --set imagetag=v1.0
|
||||
```
|
||||
|
||||
- Or even, like this:
|
||||
```bash
|
||||
helm install ... --set image=v1.0
|
||||
```
|
||||
|
||||
🤔
|
||||
|
||||
---
|
||||
|
||||
## Making mistakes
|
||||
|
||||
- In the first case:
|
||||
|
||||
- we set `imagetag=v1.0` instead of `image.tag=v1.0`
|
||||
|
||||
- Helm will ignore that value (if it's not used anywhere in templates)
|
||||
|
||||
- the chart is deployed with the default value instead
|
||||
|
||||
- In the second case:
|
||||
|
||||
- we set `image=v1.0` instead of `image.tag=v1.0`
|
||||
|
||||
- `image` will be a string instead of an object
|
||||
|
||||
- Helm will *probably* fail when trying to evaluate `image.tag`
|
||||
|
||||
---
|
||||
|
||||
## Preventing mistakes
|
||||
|
||||
- To prevent the first mistake, we need to tell Helm:
|
||||
|
||||
*"let me know if any additional (unknonw) value was set!"*
|
||||
|
||||
- To prevent the second mistake, we need to tell Helm:
|
||||
|
||||
*"`image` should be an object, and `image.tag` should be a string!"*
|
||||
|
||||
- We can do this with *values schema validation*
|
||||
|
||||
---
|
||||
|
||||
## Helm values schema validation
|
||||
|
||||
- We can write a spec representing the possible values accepted by the chart
|
||||
|
||||
- Helm will check the validity of the values before trying to install/upgrade
|
||||
|
||||
- If it finds problems, it will stop immediately
|
||||
|
||||
- The spec uses [JSON Schema](https://json-schema.org/):
|
||||
|
||||
*JSON Schema is a vocabulary that allows you to annotate and validate JSON documents.*
|
||||
|
||||
- JSON Schema is designed for JSON, but can easily work with YAML too
|
||||
|
||||
(or any language with `map|dict|associativearray` and `list|array|sequence|tuple`)
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- We need to put the JSON Schema spec in a file called `values.schema.json`
|
||||
|
||||
(at the root of our chart; right next to `values.yaml` etc.)
|
||||
|
||||
- The file is optional
|
||||
|
||||
- We don't need to register or declare it in `Chart.yaml` or anywhere
|
||||
|
||||
- Let's write a schema that will verify that ...
|
||||
|
||||
- `image.repository` is an official image (string without slashes or dots)
|
||||
|
||||
- `image.pullPolicy` can only be `Always`, `Never`, `IfNotPresent`
|
||||
|
||||
---
|
||||
|
||||
## `values.schema.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"repository": {
|
||||
"type": "string",
|
||||
"pattern": "^[a-z0-9-_]+$"
|
||||
},
|
||||
"pullPolicy": {
|
||||
"type": "string",
|
||||
"pattern": "^(Always|Never|IfNotPresent)$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing our schema
|
||||
|
||||
- Let's try to install a couple releases with that schema!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try an invalid `pullPolicy`:
|
||||
```bash
|
||||
helm install broken --set image.pullPolicy=ShallNotPass
|
||||
```
|
||||
|
||||
- Try an invalid value:
|
||||
```bash
|
||||
helm install should-break --set ImAgeTAg=toto
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- The first one fails, but the second one still passes ...
|
||||
|
||||
- Why?
|
||||
|
||||
---
|
||||
|
||||
## Bailing out on unkown properties
|
||||
|
||||
- We told Helm what properties (values) were valid
|
||||
|
||||
- We didn't say what to do about additional (unknown) properties!
|
||||
|
||||
- We can fix that with `"additionalProperties": false`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `values.schema.json` to add `"additionalProperties": false`
|
||||
```json
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
...
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing with unknown properties
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to pass an extra property:
|
||||
```bash
|
||||
helm install should-break --set ImAgeTAg=toto
|
||||
```
|
||||
|
||||
- Try to pass an extra nested property:
|
||||
```bash
|
||||
helm install does-it-work --set image.hello=world
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The first command should break.
|
||||
|
||||
The second will not.
|
||||
|
||||
`"additionalProperties": false` needs to be specified at each level.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Helm schema validation
|
||||
:FR:- Validation de schema Helm
|
||||
659
slides/k8s/hpa-v2.md
Normal file
@@ -0,0 +1,659 @@
|
||||
# Scaling with custom metrics
|
||||
|
||||
- The HorizontalPodAutoscaler v1 can only scale on Pod CPU usage
|
||||
|
||||
- Sometimes, we need to scale using other metrics:
|
||||
|
||||
- memory
|
||||
|
||||
- requests per second
|
||||
|
||||
- latency
|
||||
|
||||
- active sessions
|
||||
|
||||
- items in a work queue
|
||||
|
||||
- ...
|
||||
|
||||
- The HorizontalPodAutoscaler v2 can do it!
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
⚠️ Autoscaling on custom metrics is fairly complex!
|
||||
|
||||
- We need some metrics system
|
||||
|
||||
(Prometheus is a popular option, but others are possible too)
|
||||
|
||||
- We need our metrics (latency, traffic...) to be fed in the system
|
||||
|
||||
(with Prometheus, this might require a custom exporter)
|
||||
|
||||
- We need to expose these metrics to Kubernetes
|
||||
|
||||
(Kubernetes doesn't "speak" the Prometheus API)
|
||||
|
||||
- Then we can set up autoscaling!
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
|
||||
- We will deploy the DockerCoins demo app
|
||||
|
||||
(one of its components has a bottleneck; its latency will increase under load)
|
||||
|
||||
- We will use Prometheus to collect and store metrics
|
||||
|
||||
- We will deploy a tiny HTTP latency monitor (a Prometheus *exporter*)
|
||||
|
||||
- We will deploy the "Prometheus adapter"
|
||||
|
||||
(mapping Prometheus metrics to Kubernetes-compatible metrics)
|
||||
|
||||
- We will create an HorizontalPodAutoscaler 🎉
|
||||
|
||||
---
|
||||
|
||||
## Deploying DockerCoins
|
||||
|
||||
- That's the easy part!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace and switch to it:
|
||||
```bash
|
||||
kubectl create namespace customscaling
|
||||
kns customscaling
|
||||
```
|
||||
|
||||
- Deploy DockerCoins, and scale up the `worker` Deployment:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/dockercoins.yaml
|
||||
kubectl scale deployment worker --replicas=10
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Current state of affairs
|
||||
|
||||
- The `rng` service is a bottleneck
|
||||
|
||||
(it cannot handle more than 10 requests/second)
|
||||
|
||||
- With enough traffic, its latency increases
|
||||
|
||||
(by about 100ms per `worker` Pod after the 3rd worker)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the `webui` port and open it in your browser:
|
||||
```bash
|
||||
kubectl get service webui
|
||||
```
|
||||
|
||||
- Check the `rng` ClusterIP and test it with e.g. `httping`:
|
||||
```bash
|
||||
kubectl get service rng
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Measuring latency
|
||||
|
||||
- We will use a tiny custom Prometheus exporter, [httplat](https://github.com/jpetazzo/httplat)
|
||||
|
||||
- `httplat` exposes Prometheus metrics on port 9080 (by default)
|
||||
|
||||
- It monitors exactly one URL, that must be passed as a command-line argument
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy `httplat`:
|
||||
```bash
|
||||
kubectl create deployment httplat --image=jpetazzo/httplat -- httplat http://rng/
|
||||
```
|
||||
|
||||
- Expose it:
|
||||
```bash
|
||||
kubectl expose deployment httplat --port=9080
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Measuring latency in the real world
|
||||
|
||||
- We are using this tiny custom exporter for simplicity
|
||||
|
||||
- A more common method to collect latency is to use a service mesh
|
||||
|
||||
- A service mesh can usually collect latency for *all* services automatically
|
||||
|
||||
---
|
||||
|
||||
## Install Prometheus
|
||||
|
||||
- We will use the Prometheus community Helm chart
|
||||
|
||||
(because we can configure it dynamically with annotations)
|
||||
|
||||
.exercise[
|
||||
|
||||
- If it's not installed yet on the cluster, install Prometheus:
|
||||
```bash
|
||||
helm repo add prometheus-community
|
||||
https://prometheus-community.github.io/helm-charts
|
||||
helm upgrade prometheus prometheus-community/prometheus \
|
||||
--install \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Configure Prometheus
|
||||
|
||||
- We can use annotations to tell Prometheus to collect the metrics
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tell Prometheus to "scrape" our latency exporter:
|
||||
```bash
|
||||
kubectl annotate service httplat \
|
||||
prometheus.io/scrape=true \
|
||||
prometheus.io/port=9080 \
|
||||
prometheus.io/path=/metrics
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If you deployed Prometheus differently, you might have to configure it manually.
|
||||
|
||||
You'll need to instruct it to scrape http://httplat.customscaling.svc:9080/metrics.
|
||||
|
||||
---
|
||||
|
||||
## Make sure that metrics get collected
|
||||
|
||||
- Before moving on, confirm that Prometheus has our metrics
|
||||
|
||||
.exercise[
|
||||
|
||||
- Connect to Prometheus
|
||||
|
||||
(if you installed it like instructed above, it is exposed as a NodePort on port 30090)
|
||||
|
||||
- Check that `httplat` metrics are available
|
||||
|
||||
- You can try to graph the following PromQL expression:
|
||||
```
|
||||
rate(httplat_latency_seconds_sum[2m])/rate(httplat_latency_seconds_count[2m])
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- Make sure that the exporter works:
|
||||
|
||||
- get the ClusterIP of the exporter with `kubectl get svc httplat`
|
||||
|
||||
- `curl http://<ClusterIP>:9080/metrics`
|
||||
|
||||
- check that the result includes the `httplat` histogram
|
||||
|
||||
- Make sure that Prometheus is scraping the exporter:
|
||||
|
||||
- go to `Status` / `Targets` in Prometheus
|
||||
|
||||
- make sure that `httplat` shows up in there
|
||||
|
||||
---
|
||||
|
||||
## Creating the autoscaling policy
|
||||
|
||||
- We need custom YAML (we can't use the `kubectl autoscale` command)
|
||||
|
||||
- It must specify `scaleTargetRef`, the resource to scale
|
||||
|
||||
- any resource with a `scale` sub-resource will do
|
||||
|
||||
- this includes Deployment, ReplicaSet, StatefulSet...
|
||||
|
||||
- It must specify one or more `metrics` to look at
|
||||
|
||||
- if multiple metrics are given, the autoscaler will "do the math" for each one
|
||||
|
||||
- it will then keep the largest result
|
||||
|
||||
---
|
||||
|
||||
## Details about the `metrics` list
|
||||
|
||||
- Each item will look like this:
|
||||
```yaml
|
||||
- type: <TYPE-OF-METRIC>
|
||||
<TYPE-OF-METRIC>:
|
||||
metric:
|
||||
name: <NAME-OF-METRIC>
|
||||
<...optional selector (mandatory for External metrics)...>
|
||||
target:
|
||||
type: <TYPE-OF-TARGET>
|
||||
<TYPE-OF-TARGET>: <VALUE>
|
||||
<describedObject field, for Object metrics>
|
||||
```
|
||||
|
||||
`<TYPE-OF-METRIC>` can be `Resource`, `Pods`, `Object`, or `External`.
|
||||
|
||||
`<TYPE-OF-TARGET>` can be `Utilization`, `Value`, or `AverageValue`.
|
||||
|
||||
Let's explain the 4 different `<TYPE-OF-METRIC>` values!
|
||||
|
||||
---
|
||||
|
||||
## `Resource`
|
||||
|
||||
Use "classic" metrics served by `metrics-server` (`cpu` and `memory`).
|
||||
|
||||
```yaml
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: 50
|
||||
```
|
||||
|
||||
Compute average *utilization* (usage/requests) across pods.
|
||||
|
||||
It's also possible to specify `Value` or `AverageValue` instead of `Utilization`.
|
||||
|
||||
(To scale according to "raw" CPU or memory usage.)
|
||||
|
||||
---
|
||||
|
||||
## `Pods`
|
||||
|
||||
Use custom metrics. These are still "per-Pod" metrics.
|
||||
|
||||
```yaml
|
||||
- type: Pods
|
||||
pods:
|
||||
metric:
|
||||
name: packets-per-second
|
||||
target:
|
||||
type: AverageValue
|
||||
averageValue: 1k
|
||||
```
|
||||
|
||||
`type:` *must* be `AverageValue`.
|
||||
|
||||
(It cannot be `Utilization`, since these can't be used in Pod `requests`.)
|
||||
|
||||
---
|
||||
|
||||
## `Object`
|
||||
|
||||
Use custom metrics. These metrics are "linked" to any arbitrary resource.
|
||||
|
||||
(E.g. a Deployment, Service, Ingress, ...)
|
||||
|
||||
```yaml
|
||||
- type: Object
|
||||
object:
|
||||
metric:
|
||||
name: requests-per-second
|
||||
describedObject:
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
name: main-route
|
||||
target:
|
||||
type: AverageValue
|
||||
value: 100
|
||||
```
|
||||
|
||||
`type:` can be `Value` or `AverageValue` (see next slide for details).
|
||||
|
||||
---
|
||||
|
||||
## `Value` vs `AverageValue`
|
||||
|
||||
- `Value`
|
||||
|
||||
- use the value as-is
|
||||
|
||||
- useful to pace a client or producer
|
||||
|
||||
- "target a specific total load on a specific endpoint or queue"
|
||||
|
||||
- `AverageValue`
|
||||
|
||||
- divide the value by the number of pods
|
||||
|
||||
- useful to scale a server or consumer
|
||||
|
||||
- "scale our systems to meet a given SLA/SLO"
|
||||
|
||||
---
|
||||
|
||||
## `External`
|
||||
|
||||
Use arbitrary metrics. The series to use is specified with a label selector.
|
||||
|
||||
```yaml
|
||||
- type: External
|
||||
external:
|
||||
metric:
|
||||
name: queue_messages_ready
|
||||
selector: "queue=worker_tasks"
|
||||
target:
|
||||
type: AverageValue
|
||||
averageValue: 30
|
||||
```
|
||||
|
||||
The `selector` will be passed along when querying the metrics API.
|
||||
|
||||
Its meaninng is implementation-dependent.
|
||||
|
||||
It may or may not correspond to Kubernetes labels.
|
||||
|
||||
---
|
||||
|
||||
## One more thing ...
|
||||
|
||||
- We can give a `behavior` set of options
|
||||
|
||||
- Indicates:
|
||||
|
||||
- how much to scale up/down in a single step
|
||||
|
||||
- a *stabilization window* to avoid hysteresis effects
|
||||
|
||||
- The default stabilization window is 15 seconds for `scaleUp`
|
||||
|
||||
(we might want to change that!)
|
||||
|
||||
---
|
||||
|
||||
Putting togeher @@LINK[k8s/hpa-v2-pa-httplat.yaml]:
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
@@INCLUDE[k8s/hpa-v2-pa-httplat.yaml]
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating the autoscaling policy
|
||||
|
||||
- We will register the policy
|
||||
|
||||
- Of course, it won't quite work yet (we're missing the *Prometheus adapter*)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the HorizontalPodAutoscaler:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/hpa-v2-pa-httplat.yaml
|
||||
```
|
||||
|
||||
- Check the logs of the `controller-manager`:
|
||||
```bash
|
||||
stern --namespace=kube-system --tail=10 controller-manager
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
After a little while we should see messages like this:
|
||||
```
|
||||
no custom metrics API (custom.metrics.k8s.io) registered
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## `custom.metrics.k8s.io`
|
||||
|
||||
- The HorizontalPodAutoscaler will get the metrics *from the Kubernetes API itself*
|
||||
|
||||
- In our specific case, it will access a resource like this one:
|
||||
.small[
|
||||
```
|
||||
/apis/custom.metrics.k8s.io/v1beta1/namespaces/customscaling/services/httplat/httplat_latency_seconds
|
||||
```
|
||||
]
|
||||
|
||||
- By default, the Kubernetes API server doesn't implement `custom.metrics.k8s.io`
|
||||
|
||||
(we can have a look at `kubectl get apiservices`)
|
||||
|
||||
- We need to:
|
||||
|
||||
- start an API service implementing this API group
|
||||
|
||||
- register it with our API server
|
||||
|
||||
---
|
||||
|
||||
## The Prometheus adapter
|
||||
|
||||
- The Prometheus adapter is an open source project:
|
||||
|
||||
https://github.com/DirectXMan12/k8s-prometheus-adapter
|
||||
|
||||
- It's a Kubernetes API service implementing API group `custom.metrics.k8s.io`
|
||||
|
||||
- It maps the requests it receives to Prometheus metrics
|
||||
|
||||
- Exactly what we need!
|
||||
|
||||
---
|
||||
|
||||
## Deploying the Prometheus adapter
|
||||
|
||||
- There is ~~an app~~ a Helm chart for that
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the Prometheus adapter:
|
||||
```bash
|
||||
helm upgrade prometheus-adapter prometheus-community/prometheus-adapter \
|
||||
--install --namespace=kube-system \
|
||||
--set prometheus.url=http://prometheus-server.kube-system.svc \
|
||||
--set prometheus.port=80
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- It comes with some default mappings
|
||||
|
||||
- But we will need to add `httplat` to these mappings
|
||||
|
||||
---
|
||||
|
||||
## Configuring the Prometheus adapter
|
||||
|
||||
- The Prometheus adapter can be configured/customized through a ConfigMap
|
||||
|
||||
- We are going to edit that ConfigMap, then restart the adapter
|
||||
|
||||
- We need to add a rule that will say:
|
||||
|
||||
- all the metrics series named `httplat_latency_seconds_sum` ...
|
||||
|
||||
- ... belong to *Services* ...
|
||||
|
||||
- ... the name of the Service and its Namespace are indicated by the `kubernetes_name` and `kubernetes_namespace` Prometheus tags respectively ...
|
||||
|
||||
- ... and the exact value to use should be the following PromQL expression
|
||||
|
||||
---
|
||||
|
||||
## The mapping rule
|
||||
|
||||
Here is the rule that we need to add to the configuration:
|
||||
|
||||
```yaml
|
||||
- seriesQuery: |
|
||||
httplat_latency_seconds_sum{kubernetes_namespace!="",kubernetes_name!=""}
|
||||
resources:
|
||||
overrides:
|
||||
kubernetes_namespace:
|
||||
resource: namespace
|
||||
kubernetes_name:
|
||||
resource: service
|
||||
name:
|
||||
matches: "httplat_latency_seconds_sum"
|
||||
as: "httplat_latency_seconds"
|
||||
metricsQuery: |
|
||||
rate(httplat_latency_seconds_sum{<<.LabelMatchers>>}[2m])
|
||||
/rate(httplat_latency_seconds_count{<<.LabelMatchers>>}[2m])
|
||||
```
|
||||
|
||||
(I built it following the [walkthrough](https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config-walkthrough.md
|
||||
) in the Prometheus adapter documentation.)
|
||||
|
||||
---
|
||||
|
||||
## Editing the adapter's configuration
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the adapter's ConfigMap:
|
||||
```bash
|
||||
kubectl edit configmap prometheus-adapter --namespace=kube-system
|
||||
```
|
||||
|
||||
- Add the new rule in the `rules` section, at the end of the configuration file
|
||||
|
||||
- Save, quit
|
||||
|
||||
- Restart the Prometheus adapter:
|
||||
```bash
|
||||
kubectl rollout restart deployment --namespace=kube-system prometheus-adapter
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Witness the marvel of custom autoscaling
|
||||
|
||||
(Sort of)
|
||||
|
||||
- After a short while, the `rng` Deployment will scale up
|
||||
|
||||
- It should scale up until the latency drops below 100ms
|
||||
|
||||
(and continue to scale up a little bit more after that)
|
||||
|
||||
- Then, since the latency will be well below 100ms, it will scale down
|
||||
|
||||
- ... and back up again, etc.
|
||||
|
||||
(See pictures on next slides!)
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## What's going on?
|
||||
|
||||
- The autoscaler's information is slightly out of date
|
||||
|
||||
(not by much; probably between 1 and 2 minute)
|
||||
|
||||
- It's enough to cause the oscillations to happen
|
||||
|
||||
- One possible fix is to tell the autoscaler to wait a bit after each action
|
||||
|
||||
- It will reduce oscillations, but will also slow down its reaction time
|
||||
|
||||
(and therefore, how fast it reacts to a peak of traffic)
|
||||
|
||||
---
|
||||
|
||||
## What's going on? Take 2
|
||||
|
||||
- As soon as the measured latency is *significantly* below our target (100ms) ...
|
||||
|
||||
the autoscaler tries to scale down
|
||||
|
||||
- If the latency is measured at 20ms ...
|
||||
|
||||
the autoscaler will try to *divide the number of pods by five!*
|
||||
|
||||
- One possible solution: apply a formula to the measured latency,
|
||||
so that values between e.g. 10 and 100ms get very close to 100ms.
|
||||
|
||||
- Another solution: instead of targetting for a specific latency,
|
||||
target a 95th percentile latency or something similar, using
|
||||
a more advanced PromQL expression (and leveraging the fact that
|
||||
we have histograms instead of raw values).
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Check that the adapter registered itself correctly:
|
||||
```bash
|
||||
kubectl get apiservices | grep metrics
|
||||
```
|
||||
|
||||
Check that the adapter correctly serves metrics:
|
||||
```bash
|
||||
kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1
|
||||
```
|
||||
|
||||
Check that our `httplat` metrics are available:
|
||||
```bash
|
||||
kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1\
|
||||
/namespaces/customscaling/services/httplat/httplat_latency_seconds
|
||||
```
|
||||
|
||||
Also check the logs of the `prometheus-adapter` and the `kube-controller-manager`.
|
||||
|
||||
---
|
||||
|
||||
## Useful links
|
||||
|
||||
- [Horizontal Pod Autoscaler walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/) in the Kubernetes documentation
|
||||
|
||||
- [Autoscaling design proposal](https://github.com/kubernetes/community/tree/master/contributors/design-proposals/autoscaling)
|
||||
|
||||
- [Kubernetes custom metrics API alternative implementations](https://github.com/kubernetes/metrics/blob/master/IMPLEMENTATIONS.md)
|
||||
|
||||
- [Prometheus adapter configuration walkthrough](https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config-walkthrough.md)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Autoscaling with custom metrics
|
||||
:FR:- Suivi de charge avancé (HPAv2)
|
||||
@@ -586,7 +586,7 @@ class: extra-details
|
||||
- Example 3: canary for shipping physical goods
|
||||
|
||||
- 1% of orders are shipped with the canary process
|
||||
- the reamining 99% are shipped with the normal process
|
||||
- the remaining 99% are shipped with the normal process
|
||||
|
||||
- We're going to implement example 1 (per-request routing)
|
||||
|
||||
@@ -638,7 +638,7 @@ spec:
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: wensledale
|
||||
serviceName: wensleydale
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
|
||||