Compare commits
197 Commits
gitpod
...
2021-03-nr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
703a5e137c | ||
|
|
b9036024dc | ||
|
|
56fe63cf4e | ||
|
|
4071b18a39 | ||
|
|
b816d075d4 | ||
|
|
6303b67b86 | ||
|
|
4f3bb9beb2 | ||
|
|
1f34da55b3 | ||
|
|
f30792027f | ||
|
|
74679ab77e | ||
|
|
71ce2eb31a | ||
|
|
eb96dd21bb | ||
|
|
e82d2812aa | ||
|
|
9c8c3ef537 | ||
|
|
2f2948142a | ||
|
|
2516b2d32b | ||
|
|
42f4b65c87 | ||
|
|
989a62b5ff | ||
|
|
b5eb59ab80 | ||
|
|
10920509c3 | ||
|
|
955149e019 | ||
|
|
111ff30c38 | ||
|
|
6c038a5d33 | ||
|
|
6737a20840 | ||
|
|
1d1060a319 | ||
|
|
93e9a60634 | ||
|
|
de2c0e72c3 | ||
|
|
41204c948b | ||
|
|
553b1f7871 | ||
|
|
bd168f7676 | ||
|
|
3a527649d1 | ||
|
|
ecbbcf8b51 | ||
|
|
29edb1aefe | ||
|
|
bd3c91f342 | ||
|
|
fa709f0cb4 | ||
|
|
543b44fb29 | ||
|
|
536a9cc44b | ||
|
|
2ff3d88bab | ||
|
|
295ee9b6b4 | ||
|
|
17c5f6de01 | ||
|
|
556dbb965c | ||
|
|
32250f8053 | ||
|
|
bdede6de07 | ||
|
|
eefdc21488 | ||
|
|
e145428910 | ||
|
|
76789b6113 | ||
|
|
f9660ba9dc | ||
|
|
c2497508f8 | ||
|
|
b5d3b213b1 | ||
|
|
b4c76ad11d | ||
|
|
b251ff3812 | ||
|
|
ede4ea0dd5 | ||
|
|
2ab06c6dfd | ||
|
|
3a01deb039 | ||
|
|
b88f63e1f7 | ||
|
|
918311ac51 | ||
|
|
73e8110f09 | ||
|
|
ecb5106d59 | ||
|
|
e4d8cd4952 | ||
|
|
c4aedbd327 | ||
|
|
2fb3584b1b | ||
|
|
cb90cc9a1e | ||
|
|
bf28dff816 | ||
|
|
b5cb871c69 | ||
|
|
aa8f538574 | ||
|
|
ebf2e23785 | ||
|
|
0553a1ba8b | ||
|
|
9d47177028 | ||
|
|
9d4a035497 | ||
|
|
6fe74cb35c | ||
|
|
43aa41ed51 | ||
|
|
f6e810f648 | ||
|
|
4c710d6826 | ||
|
|
410c98399e | ||
|
|
19c9843a81 | ||
|
|
69d084e04a | ||
|
|
1300d76890 | ||
|
|
0040313371 | ||
|
|
c9e04b906d | ||
|
|
41f66f4144 | ||
|
|
aced587fd0 | ||
|
|
749b3d1648 | ||
|
|
c40cc71bbc | ||
|
|
69b775ef27 | ||
|
|
3bfc14c5f7 | ||
|
|
97984af8a2 | ||
|
|
9b31c45899 | ||
|
|
c0db28d439 | ||
|
|
0e49bfa837 | ||
|
|
fc9c0a6285 | ||
|
|
d4914fa168 | ||
|
|
e4edd9445c | ||
|
|
ba7deefce5 | ||
|
|
be104f1b44 | ||
|
|
5c329b0b79 | ||
|
|
78ffd22499 | ||
|
|
33174a1682 | ||
|
|
d402a2ea93 | ||
|
|
1fc3abcffd | ||
|
|
c1020f24b1 | ||
|
|
4fc81209d4 | ||
|
|
ed841711c5 | ||
|
|
07457af6f7 | ||
|
|
2d4961fbd3 | ||
|
|
14679999be | ||
|
|
29c6d2876a | ||
|
|
a02e7429ad | ||
|
|
fee0be7f09 | ||
|
|
d98fcbce87 | ||
|
|
35320837e5 | ||
|
|
d73e597198 | ||
|
|
b4c0378114 | ||
|
|
efdc4fcfa9 | ||
|
|
c32fcc81bb | ||
|
|
f6930042bd | ||
|
|
2e2767b090 | ||
|
|
115cc5e0c0 | ||
|
|
d252fe254b | ||
|
|
7d96562042 | ||
|
|
4ded8c699d | ||
|
|
620a3df798 | ||
|
|
d28723f07a | ||
|
|
f2334d2d1b | ||
|
|
ddf79eebc7 | ||
|
|
6467264ff5 | ||
|
|
55fcff9333 | ||
|
|
8fb7ea3908 | ||
|
|
7dd72f123f | ||
|
|
ff95066006 | ||
|
|
8146c4dabe | ||
|
|
17aea33beb | ||
|
|
9770f81a1c | ||
|
|
0cb9095303 | ||
|
|
ffded8469b | ||
|
|
0e892cf8b4 | ||
|
|
b87efbd6e9 | ||
|
|
1a24b530d6 | ||
|
|
122ffec5c2 | ||
|
|
276a2dbdda | ||
|
|
2836b58078 | ||
|
|
0d065788a4 | ||
|
|
14271a4df0 | ||
|
|
412d029d0c | ||
|
|
f960230f8e | ||
|
|
774c8a0e31 | ||
|
|
4671a981a7 | ||
|
|
b9743a5f8c | ||
|
|
df4980750c | ||
|
|
9467c7309e | ||
|
|
86b0380a77 | ||
|
|
eb9052ae9a | ||
|
|
8f85332d8a | ||
|
|
0479ad2285 | ||
|
|
986d7eb9c2 | ||
|
|
3fafbb8d4e | ||
|
|
5a24df3fd4 | ||
|
|
1bbfba0531 | ||
|
|
8d98431ba0 | ||
|
|
c31c81a286 | ||
|
|
a0314fc5f5 | ||
|
|
3f088236a4 | ||
|
|
ce4e2ffe46 | ||
|
|
c3a05a6393 | ||
|
|
40b2b8e62e | ||
|
|
efdcf4905d | ||
|
|
bdb57c05b4 | ||
|
|
af0762a0a2 | ||
|
|
0d6c364a95 | ||
|
|
690a1eb75c | ||
|
|
c796a6bfc1 | ||
|
|
0b10d3d40d | ||
|
|
cdb50925da | ||
|
|
ca1f8ec828 | ||
|
|
7302d3533f | ||
|
|
d3c931e602 | ||
|
|
7402c8e6a8 | ||
|
|
1de539bff8 | ||
|
|
a6c7d69986 | ||
|
|
b0bff595cf | ||
|
|
6f806ed200 | ||
|
|
0c8b20f6b6 | ||
|
|
2ba35e1f8d | ||
|
|
eb0d9bed2a | ||
|
|
bab493a926 | ||
|
|
f4f2d83fa4 | ||
|
|
9f049951ab | ||
|
|
7257a5c594 | ||
|
|
102aef5ac5 | ||
|
|
d2b3a1d663 | ||
|
|
d84ada0927 | ||
|
|
0e04b4a07d | ||
|
|
aef910b4b7 | ||
|
|
298b6db20c | ||
|
|
7ec6e871c9 | ||
|
|
a0558e4ee5 | ||
|
|
16a62f9f84 | ||
|
|
2ce50007d2 |
@@ -9,21 +9,21 @@ services:
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
image: k8s.gcr.io/etcd:3.4.9
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
|
||||
"Edit the CLUSTER placeholder first. Then, remove this line.":
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
|
||||
@@ -9,20 +9,20 @@ services:
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
image: k8s.gcr.io/etcd:3.4.9
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-controller-manager --master http://localhost:8080
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
|
||||
49
dockercoins/Tiltfile
Normal file
@@ -0,0 +1,49 @@
|
||||
k8s_yaml(blob('''
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
name: registry
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
containers:
|
||||
- image: registry
|
||||
name: registry
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
name: registry
|
||||
spec:
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
targetPort: 5000
|
||||
nodePort: 30555
|
||||
selector:
|
||||
app: registry
|
||||
type: NodePort
|
||||
'''))
|
||||
default_registry('localhost:30555')
|
||||
docker_build('dockercoins/hasher', 'hasher')
|
||||
docker_build('dockercoins/rng', 'rng')
|
||||
docker_build('dockercoins/webui', 'webui')
|
||||
docker_build('dockercoins/worker', 'worker')
|
||||
k8s_yaml('../k8s/dockercoins.yaml')
|
||||
|
||||
# Uncomment the following line to let tilt run with the default kubeadm cluster-admin context.
|
||||
#allow_k8s_contexts('kubernetes-admin@kubernetes')
|
||||
|
||||
# While we're here: if you're controlling a remote cluster, uncomment that line.
|
||||
# It will create a port forward so that you can access the remote registry.
|
||||
#k8s_resource(workload='registry', port_forwards='30555:5000')
|
||||
33
k8s/certbot.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: certbot
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: certbot
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /.well-known/acme-challenge/
|
||||
backend:
|
||||
serviceName: certbot
|
||||
servicePort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: certbot
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: A.B.C.D
|
||||
ports:
|
||||
- port: 8000
|
||||
protocol: TCP
|
||||
|
||||
11
k8s/cm-certificate.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: xyz.A.B.C.D.nip.io
|
||||
spec:
|
||||
secretName: xyz.A.B.C.D.nip.io
|
||||
dnsNames:
|
||||
- xyz.A.B.C.D.nip.io
|
||||
issuerRef:
|
||||
name: letsencrypt-staging
|
||||
kind: ClusterIssuer
|
||||
18
k8s/cm-clusterissuer.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
# Remember to update this if you use this manifest to obtain real certificates :)
|
||||
email: hello@example.com
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
# To use the production environment, use the following line instead:
|
||||
#server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: issuer-letsencrypt-staging
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
@@ -11,25 +15,4 @@ spec:
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- taste
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
|
||||
|
||||
37
k8s/coffee-3.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
required: [ spec ]
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
required: [ taste ]
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
@@ -9,9 +9,9 @@ spec:
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: robusta
|
||||
name: excelsa
|
||||
spec:
|
||||
taste: stronger
|
||||
taste: fruity
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
@@ -23,7 +23,12 @@ spec:
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: excelsa
|
||||
name: robusta
|
||||
spec:
|
||||
taste: fruity
|
||||
|
||||
taste: stronger
|
||||
bitterness: high
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: java
|
||||
|
||||
77
k8s/consul-1.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
# Basic Consul cluster using Cloud Auto-Join.
|
||||
# Caveats:
|
||||
# - no actual persistence
|
||||
# - scaling down to 1 will break the cluster
|
||||
# - pods may be colocated
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.8"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
@@ -1,5 +1,9 @@
|
||||
# Better Consul cluster.
|
||||
# There is still no actual persistence, but:
|
||||
# - podAntiaffinity prevents pod colocation
|
||||
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
@@ -11,17 +15,16 @@ rules:
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
@@ -68,11 +71,16 @@ spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.6"
|
||||
image: "consul:1.8"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\""
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
104
k8s/consul-3.yaml
Normal file
@@ -0,0 +1,104 @@
|
||||
# Even better Consul cluster.
|
||||
# That one uses a volumeClaimTemplate to achieve true persistence.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- persistentconsul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.8"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
@@ -1,3 +1,10 @@
|
||||
# This file is based on the following manifest:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
# It adds the "skip login" flag, as well as an insecure hack to defeat SSL.
|
||||
# As its name implies, it is INSECURE and you should not use it in production,
|
||||
# or on clusters that contain any kind of important or sensitive data, or on
|
||||
# clusters that have a life span of more than a few hours.
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -187,7 +194,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0-rc2
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
@@ -226,7 +233,7 @@ spec:
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
@@ -272,7 +279,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.2
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
@@ -293,7 +300,7 @@ spec:
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
305
k8s/dashboard-recommended.yaml
Normal file
@@ -0,0 +1,305 @@
|
||||
# This is a copy of the following file:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
336
k8s/dashboard-with-token.yaml
Normal file
@@ -0,0 +1,336 @@
|
||||
# This file is based on the following manifest:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
# It adds a ServiceAccount that has cluster-admin privileges on the cluster,
|
||||
# and exposes the dashboard on a NodePort. It makes it easier to do quick demos
|
||||
# of the Kubernetes dashboard, without compromising the security too much.
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-cluster-admin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
30
k8s/event-node.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
kind: Event
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
generateName: hello-
|
||||
labels:
|
||||
container.training/test: ""
|
||||
|
||||
#eventTime: "2020-07-04T00:00:00.000000Z"
|
||||
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
|
||||
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
|
||||
#count: 42
|
||||
|
||||
involvedObject:
|
||||
kind: Node
|
||||
apiVersion: v1
|
||||
name: kind-control-plane
|
||||
# Note: the uid should be the Node name (not the uid of the Node).
|
||||
# This might be specific to global objects.
|
||||
uid: kind-control-plane
|
||||
|
||||
type: Warning
|
||||
reason: NodeOverheat
|
||||
message: "Node temperature exceeds critical threshold"
|
||||
action: Hello
|
||||
source:
|
||||
component: thermal-probe
|
||||
#host: node1
|
||||
#reportingComponent: ""
|
||||
#reportingInstance: ""
|
||||
|
||||
36
k8s/event-pod.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
kind: Event
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
# One convention is to use <objectname>.<timestamp>,
|
||||
# where the timestamp is taken with a nanosecond
|
||||
# precision and expressed in hexadecimal.
|
||||
# Example: web-5dcb957ccc-fjvzc.164689730a36ec3d
|
||||
name: hello.1234567890
|
||||
# The label doesn't serve any purpose, except making
|
||||
# it easier to identify or delete that specific event.
|
||||
labels:
|
||||
container.training/test: ""
|
||||
|
||||
#eventTime: "2020-07-04T00:00:00.000000Z"
|
||||
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
|
||||
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
|
||||
#count: 42
|
||||
|
||||
involvedObject:
|
||||
### These 5 lines should be updated to refer to an object.
|
||||
### Make sure to put the correct "uid", because it is what
|
||||
### "kubectl describe" is using to gather relevant events.
|
||||
#apiVersion: v1
|
||||
#kind: Pod
|
||||
#name: magic-bean
|
||||
#namespace: blue
|
||||
#uid: 7f28fda8-6ef4-4580-8d87-b55721fcfc30
|
||||
|
||||
type: Normal
|
||||
reason: BackupSuccessful
|
||||
message: "Object successfully dumped to gitops repository"
|
||||
source:
|
||||
component: gitops-sync
|
||||
#reportingComponent: ""
|
||||
#reportingInstance: ""
|
||||
|
||||
@@ -52,7 +52,7 @@ data:
|
||||
- add_kubernetes_metadata:
|
||||
in_cluster: true
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
@@ -60,6 +60,9 @@ metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: filebeat
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
||||
@@ -27,7 +27,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
29
k8s/hpa-v2-pa-httplat.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
kind: HorizontalPodAutoscaler
|
||||
apiVersion: autoscaling/v2beta2
|
||||
metadata:
|
||||
name: rng
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: rng
|
||||
minReplicas: 1
|
||||
maxReplicas: 20
|
||||
behavior:
|
||||
scaleUp:
|
||||
stabilizationWindowSeconds: 60
|
||||
scaleDown:
|
||||
stabilizationWindowSeconds: 180
|
||||
metrics:
|
||||
- type: Object
|
||||
object:
|
||||
describedObject:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
name: httplat
|
||||
metric:
|
||||
name: httplat_latency_seconds
|
||||
target:
|
||||
type: Value
|
||||
value: 0.1
|
||||
|
||||
@@ -3,6 +3,10 @@ kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
|
||||
@@ -1,162 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
63
k8s/kyverno-namespace-setup.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: setup-namespace
|
||||
spec:
|
||||
rules:
|
||||
- name: setup-limitrange
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: LimitRange
|
||||
name: default-limitrange
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
limits:
|
||||
- type: Container
|
||||
min:
|
||||
cpu: 0.1
|
||||
memory: 0.1
|
||||
max:
|
||||
cpu: 2
|
||||
memory: 2Gi
|
||||
default:
|
||||
cpu: 0.25
|
||||
memory: 500Mi
|
||||
defaultRequest:
|
||||
cpu: 0.25
|
||||
memory: 250Mi
|
||||
- name: setup-resourcequota
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: ResourceQuota
|
||||
name: default-resourcequota
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
hard:
|
||||
requests.cpu: "10"
|
||||
requests.memory: 10Gi
|
||||
limits.cpu: "20"
|
||||
limits.memory: 20Gi
|
||||
- name: setup-networkpolicy
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: NetworkPolicy
|
||||
name: default-networkpolicy
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
|
||||
22
k8s/kyverno-pod-color-1.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-1
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: ensure-pod-color-is-valid
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
selector:
|
||||
matchExpressions:
|
||||
- key: color
|
||||
operator: Exists
|
||||
- key: color
|
||||
operator: NotIn
|
||||
values: [ red, green, blue ]
|
||||
validate:
|
||||
message: "If it exists, the label color must be red, green, or blue."
|
||||
deny: {}
|
||||
21
k8s/kyverno-pod-color-2.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-2
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Once label color has been added, it cannot be changed."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotEqual
|
||||
value: "{{ request.object.metadata.labels.color }}"
|
||||
|
||||
25
k8s/kyverno-pod-color-3.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-3
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-removal
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
selector:
|
||||
matchExpressions:
|
||||
- key: color
|
||||
operator: DoesNotExist
|
||||
validate:
|
||||
message: "Once label color has been added, it cannot be removed."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotIn
|
||||
value: []
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
|
||||
@@ -22,7 +22,10 @@ spec:
|
||||
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:11
|
||||
image: postgres:12
|
||||
env:
|
||||
- name: POSTGRES_HOST_AUTH_METHOD
|
||||
value: trust
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
@@ -1,28 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "2"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
spec:
|
||||
@@ -34,34 +23,19 @@ spec:
|
||||
image: alpine
|
||||
imagePullPolicy: Always
|
||||
name: socat
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
||||
|
||||
17
k8s/test.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
103
k8s/traefik-v1.yaml
Normal file
@@ -0,0 +1,103 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
122
k8s/traefik-v2.yaml
Normal file
@@ -0,0 +1,122 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --accesslog
|
||||
- --api
|
||||
- --api.insecure
|
||||
- --log.level=INFO
|
||||
- --metrics.prometheus
|
||||
- --providers.kubernetesingress
|
||||
- --entrypoints.http.Address=:80
|
||||
- --entrypoints.https.Address=:443
|
||||
- --entrypoints.https.http.tls.certResolver=default
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
103
k8s/traefik.yaml
@@ -1,103 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
1
k8s/traefik.yaml
Symbolic link
@@ -0,0 +1 @@
|
||||
traefik-v2.yaml
|
||||
@@ -8,24 +8,24 @@ metadata:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
name: user=jean.doe
|
||||
rules:
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resourceNames: [ users:jean.doe ]
|
||||
resourceNames: [ user=jean.doe ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ get, create, delete, watch ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
name: user=jean.doe
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: users:jean.doe
|
||||
name: user=jean.doe
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jean.doe
|
||||
24
prepare-vms/infra/example.openstack-cli
Normal file
@@ -0,0 +1,24 @@
|
||||
INFRACLASS=openstack-cli
|
||||
|
||||
# Copy that file to e.g. openstack or ovh, then customize it.
|
||||
# Some Openstack providers (like OVHcloud) will let you download
|
||||
# a file containing credentials. That's what you need to use.
|
||||
# The file below contains some example values.
|
||||
export OS_AUTH_URL=https://auth.cloud.ovh.net/v3/
|
||||
export OS_IDENTITY_API_VERSION=3
|
||||
export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME:-"Default"}
|
||||
export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME:-"Default"}
|
||||
export OS_TENANT_ID=abcd1234
|
||||
export OS_TENANT_NAME="0123456"
|
||||
export OS_USERNAME="user-xyz123"
|
||||
export OS_PASSWORD=AbCd1234
|
||||
export OS_REGION_NAME="GRA7"
|
||||
|
||||
# And then some values to indicate server type, image, etc.
|
||||
# You can see available flavors with `openstack flavor list`
|
||||
export OS_FLAVOR=s1-4
|
||||
# You can see available images with `openstack image list`
|
||||
export OS_IMAGE=896c5f54-51dc-44f0-8c22-ce99ba7164df
|
||||
# You can create a key with `openstack keypair create --public-key ~/.ssh/id_rsa.pub containertraining`
|
||||
export OS_KEY=containertraining
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
INFRACLASS=openstack
|
||||
INFRACLASS=openstack-tf
|
||||
|
||||
# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix")
|
||||
# and customize the variables below.
|
||||
export TF_VAR_user="jpetazzo"
|
||||
@@ -6,4 +7,4 @@ export TF_VAR_tenant="training"
|
||||
export TF_VAR_domain="Default"
|
||||
export TF_VAR_password="..."
|
||||
export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3"
|
||||
export TF_VAR_flavor="GP1.S"
|
||||
export TF_VAR_flavor="GP1.S"
|
||||
5
prepare-vms/infra/hetzner
Normal file
@@ -0,0 +1,5 @@
|
||||
INFRACLASS=hetzner
|
||||
if ! [ -f ~/.config/hcloud/cli.toml ]; then
|
||||
warn "~/.config/hcloud/cli.toml not found."
|
||||
warn "Make sure that the Hetzner CLI (hcloud) is installed and configured."
|
||||
fi
|
||||
3
prepare-vms/infra/scaleway
Normal file
@@ -0,0 +1,3 @@
|
||||
INFRACLASS=scaleway
|
||||
#SCW_INSTANCE_TYPE=DEV1-L
|
||||
#SCW_ZONE=fr-par-2
|
||||
@@ -43,6 +43,16 @@ _cmd_cards() {
|
||||
info "$0 www"
|
||||
}
|
||||
|
||||
_cmd clean "Remove information about stopped clusters"
|
||||
_cmd_clean() {
|
||||
for TAG in tags/*; do
|
||||
if grep -q ^stopped$ "$TAG/status"; then
|
||||
info "Removing $TAG..."
|
||||
rm -rf "$TAG"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
_cmd deploy "Install Docker on a bunch of running VMs"
|
||||
_cmd_deploy() {
|
||||
TAG=$1
|
||||
@@ -65,6 +75,27 @@ _cmd_deploy() {
|
||||
sleep 1
|
||||
done"
|
||||
|
||||
# Special case for scaleway since it doesn't come with sudo
|
||||
if [ "$INFRACLASS" = "scaleway" ]; then
|
||||
pssh -l root "
|
||||
grep DEBIAN_FRONTEND /etc/environment || echo DEBIAN_FRONTEND=noninteractive >> /etc/environment
|
||||
grep cloud-init /etc/sudoers && rm /etc/sudoers
|
||||
apt-get update && apt-get install sudo -y"
|
||||
fi
|
||||
|
||||
# FIXME
|
||||
# Special case for hetzner since it doesn't have an ubuntu user
|
||||
#if [ "$INFRACLASS" = "hetzner" ]; then
|
||||
# pssh -l root "
|
||||
#[ -d /home/ubuntu ] ||
|
||||
# useradd ubuntu -m -s /bin/bash
|
||||
#echo 'ubuntu ALL=(ALL:ALL) NOPASSWD:ALL' > /etc/sudoers.d/ubuntu
|
||||
#[ -d /home/ubuntu/.ssh ] ||
|
||||
# install --owner=ubuntu --mode=700 --directory /home/ubuntu/.ssh
|
||||
#[ -f /home/ubuntu/.ssh/authorized_keys ] ||
|
||||
# install --owner=ubuntu --mode=600 /root/.ssh/authorized_keys --target-directory /home/ubuntu/.ssh"
|
||||
#fi
|
||||
|
||||
# Copy settings and install Python YAML parser
|
||||
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
|
||||
pssh "
|
||||
@@ -131,19 +162,19 @@ _cmd_kubebins() {
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.3/etcd-v3.4.3-linux-amd64.tar.gz \
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
##VERSION##
|
||||
curl -L https://dl.k8s.io/v1.17.2/kubernetes-server-linux-amd64.tar.gz \
|
||||
curl -L https://dl.k8s.io/v1.18.10/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx \
|
||||
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
|
||||
fi
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.6/cni-plugins-amd64-v0.7.6.tgz \
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
@@ -173,13 +204,15 @@ _cmd_kube() {
|
||||
pssh --timeout 200 "
|
||||
sudo apt-get update -q &&
|
||||
sudo apt-get install -qy kubelet$EXTRA_APTGET kubeadm$EXTRA_APTGET kubectl$EXTRA_APTGET &&
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
|
||||
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
|
||||
|
||||
# Initialize kube master
|
||||
pssh --timeout 200 "
|
||||
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token &&
|
||||
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4)
|
||||
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4) --ignore-preflight-errors=NumCPU
|
||||
fi"
|
||||
|
||||
# Put kubeconfig in ubuntu's and docker's accounts
|
||||
@@ -212,17 +245,23 @@ _cmd_kube() {
|
||||
if i_am_first_node; then
|
||||
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd kubetools "Install a bunch of CLI tools for Kubernetes"
|
||||
_cmd_kubetools() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# Install kubectx and kubens
|
||||
pssh "
|
||||
[ -d kubectx ] || git clone https://github.com/ahmetb/kubectx &&
|
||||
sudo ln -sf /home/ubuntu/kubectx/kubectx /usr/local/bin/kctx &&
|
||||
sudo ln -sf /home/ubuntu/kubectx/kubens /usr/local/bin/kns &&
|
||||
sudo cp /home/ubuntu/kubectx/completion/*.bash /etc/bash_completion.d &&
|
||||
sudo ln -sf \$HOME/kubectx/kubectx /usr/local/bin/kctx &&
|
||||
sudo ln -sf \$HOME/kubectx/kubens /usr/local/bin/kns &&
|
||||
sudo cp \$HOME/kubectx/completion/*.bash /etc/bash_completion.d &&
|
||||
[ -d kube-ps1 ] || git clone https://github.com/jonmosco/kube-ps1 &&
|
||||
sudo -u docker sed -i s/docker-prompt/kube_ps1/ /home/docker/.bashrc &&
|
||||
sudo -u docker tee -a /home/docker/.bashrc <<EOF
|
||||
. /home/ubuntu/kube-ps1/kube-ps1.sh
|
||||
. \$HOME/kube-ps1/kube-ps1.sh
|
||||
KUBE_PS1_PREFIX=""
|
||||
KUBE_PS1_SUFFIX=""
|
||||
KUBE_PS1_SYMBOL_ENABLE="false"
|
||||
@@ -246,11 +285,22 @@ EOF"
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
fi"
|
||||
|
||||
# Install kustomize
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kustomize ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v3.6.1/kustomize_v3.6.1_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx kustomize
|
||||
echo complete -C /usr/local/bin/kustomize kustomize | sudo tee /etc/bash_completion.d/kustomize
|
||||
fi"
|
||||
|
||||
# Install ship
|
||||
# Note: 0.51.3 is the last version that doesn't display GIN-debug messages
|
||||
# (don't want to get folks confused by that!)
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ship ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
|
||||
curl -L https://github.com/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
@@ -262,7 +312,54 @@ EOF"
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
# Install the krew package manager
|
||||
pssh "
|
||||
if [ ! -d /home/docker/.krew ]; then
|
||||
cd /tmp &&
|
||||
curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.tar.gz |
|
||||
tar -zxf- &&
|
||||
sudo -u docker -H ./krew-linux_amd64 install krew &&
|
||||
echo export PATH=/home/docker/.krew/bin:\\\$PATH | sudo -u docker tee -a /home/docker/.bashrc
|
||||
fi"
|
||||
|
||||
# Install k9s and popeye
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/k9s ]; then
|
||||
FILENAME=k9s_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin k9s
|
||||
fi
|
||||
if [ ! -x /usr/local/bin/popeye ]; then
|
||||
FILENAME=popeye_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin popeye
|
||||
fi"
|
||||
|
||||
# Install Tilt
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/tilt ]; then
|
||||
curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
|
||||
fi"
|
||||
|
||||
# Install Skaffold
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/skaffold ]; then
|
||||
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 &&
|
||||
sudo install skaffold /usr/local/bin/
|
||||
fi"
|
||||
|
||||
# Install Kompose
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kompose ]; then
|
||||
curl -Lo kompose https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-amd64 &&
|
||||
sudo install kompose /usr/local/bin
|
||||
fi"
|
||||
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubeseal ]; then
|
||||
curl -Lo kubeseal https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.13.1/kubeseal-linux-amd64 &&
|
||||
sudo install kubeseal /usr/local/bin
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
|
||||
@@ -284,29 +381,44 @@ _cmd_kubetest() {
|
||||
set -e
|
||||
if i_am_first_node; then
|
||||
which kubectl
|
||||
for NODE in \$(awk /[0-9]\$/\ {print\ \\\$2} /etc/hosts); do
|
||||
for NODE in \$(grep [0-9]\$ /etc/hosts | grep -v ^127 | awk {print\ \\\$2}); do
|
||||
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
|
||||
done
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd ids "(FIXME) List the instance IDs belonging to a given tag or token"
|
||||
_cmd_ids() {
|
||||
_cmd ips "Show the IP addresses for a given tag"
|
||||
_cmd_ips() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
|
||||
info "Looking up by tag:"
|
||||
aws_get_instance_ids_by_tag $TAG
|
||||
|
||||
# Just in case we managed to create instances but weren't able to tag them
|
||||
info "Looking up by token:"
|
||||
aws_get_instance_ids_by_client_token $TAG
|
||||
SETTINGS=tags/$TAG/settings.yaml
|
||||
CLUSTERSIZE=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
while true; do
|
||||
for I in $(seq $CLUSTERSIZE); do
|
||||
read ip || return 0
|
||||
printf "%s\t" "$ip"
|
||||
done
|
||||
printf "\n"
|
||||
done < tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
_cmd list "List available groups for a given infrastructure"
|
||||
_cmd list "List all VMs on a given infrastructure (or all infras if no arg given)"
|
||||
_cmd_list() {
|
||||
need_infra $1
|
||||
infra_list
|
||||
case "$1" in
|
||||
"")
|
||||
for INFRA in infra/*; do
|
||||
$0 list $INFRA
|
||||
done
|
||||
;;
|
||||
*/example.*)
|
||||
;;
|
||||
*)
|
||||
need_infra $1
|
||||
sep "Listing instances for $1"
|
||||
infra_list
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_cmd listall "List VMs running on all configured infrastructures"
|
||||
@@ -329,7 +441,7 @@ _cmd_maketag() {
|
||||
if [ -z $USER ]; then
|
||||
export USER=anonymous
|
||||
fi
|
||||
MS=$(($(date +%N)/1000000))
|
||||
MS=$(($(date +%N | tr -d 0)/1000000))
|
||||
date +%Y-%m-%d-%H-%M-$MS-$USER
|
||||
}
|
||||
|
||||
@@ -400,16 +512,6 @@ _cmd_opensg() {
|
||||
infra_opensg
|
||||
}
|
||||
|
||||
_cmd portworx "Prepare the nodes for Portworx deployment"
|
||||
_cmd_portworx() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo truncate --size 10G /portworx.blk &&
|
||||
sudo losetup /dev/loop4 /portworx.blk"
|
||||
}
|
||||
|
||||
_cmd disableaddrchecks "Disable source/destination IP address checks"
|
||||
_cmd_disableaddrchecks() {
|
||||
TAG=$1
|
||||
@@ -446,6 +548,17 @@ _cmd_remap_nodeports() {
|
||||
if i_am_first_node && ! grep -q '$ADD_LINE' $MANIFEST_FILE; then
|
||||
sudo sed -i 's/\($FIND_LINE\)\$/\1\n$ADD_LINE/' $MANIFEST_FILE
|
||||
fi"
|
||||
|
||||
info "If you have manifests hard-coding nodePort values,"
|
||||
info "you might want to patch them with a command like:"
|
||||
info "
|
||||
|
||||
if i_am_first_node; then
|
||||
kubectl -n kube-system patch svc prometheus-server \\
|
||||
-p 'spec: { ports: [ {port: 80, nodePort: 10101} ]}'
|
||||
fi
|
||||
|
||||
"
|
||||
}
|
||||
|
||||
_cmd quotas "Check our infrastructure quotas (max instances)"
|
||||
@@ -454,18 +567,6 @@ _cmd_quotas() {
|
||||
infra_quotas
|
||||
}
|
||||
|
||||
_cmd retag "(FIXME) Apply a new tag to a group of VMs"
|
||||
_cmd_retag() {
|
||||
OLDTAG=$1
|
||||
NEWTAG=$2
|
||||
TAG=$OLDTAG
|
||||
need_tag
|
||||
if [[ -z "$NEWTAG" ]]; then
|
||||
die "You must specify a new tag to apply."
|
||||
fi
|
||||
aws_tag_instances $OLDTAG $NEWTAG
|
||||
}
|
||||
|
||||
_cmd ssh "Open an SSH session to the first node of a tag"
|
||||
_cmd_ssh() {
|
||||
TAG=$1
|
||||
@@ -483,6 +584,7 @@ _cmd_start() {
|
||||
--settings) SETTINGS=$2; shift 2;;
|
||||
--count) COUNT=$2; shift 2;;
|
||||
--tag) TAG=$2; shift 2;;
|
||||
--students) STUDENTS=$2; shift 2;;
|
||||
*) die "Unrecognized parameter: $1."
|
||||
esac
|
||||
done
|
||||
@@ -494,8 +596,14 @@ _cmd_start() {
|
||||
die "Please add --settings flag to specify which settings file to use."
|
||||
fi
|
||||
if [ -z "$COUNT" ]; then
|
||||
COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
warning "No --count option was specified. Using value from settings file ($COUNT)."
|
||||
CLUSTERSIZE=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
if [ -z "$STUDENTS" ]; then
|
||||
warning "Neither --count nor --students was specified."
|
||||
warning "According to the settings file, the cluster size is $CLUSTERSIZE."
|
||||
warning "Deploying one cluster of $CLUSTERSIZE nodes."
|
||||
STUDENTS=1
|
||||
fi
|
||||
COUNT=$(($STUDENTS*$CLUSTERSIZE))
|
||||
fi
|
||||
|
||||
# Check that the specified settings and infrastructure are valid.
|
||||
@@ -513,11 +621,43 @@ _cmd_start() {
|
||||
infra_start $COUNT
|
||||
sep
|
||||
info "Successfully created $COUNT instances with tag $TAG"
|
||||
sep
|
||||
echo created > tags/$TAG/status
|
||||
|
||||
info "To deploy Docker on these instances, you can run:"
|
||||
info "$0 deploy $TAG"
|
||||
# If the settings.yaml file has a "steps" field,
|
||||
# automatically execute all the actions listed in that field.
|
||||
# If an action fails, retry it up to 10 times.
|
||||
python -c 'if True: # hack to deal with indentation
|
||||
import sys, yaml
|
||||
settings = yaml.safe_load(sys.stdin)
|
||||
print ("\n".join(settings.get("steps", [])))
|
||||
' < tags/$TAG/settings.yaml \
|
||||
| while read step; do
|
||||
if [ -z "$step" ]; then
|
||||
break
|
||||
fi
|
||||
sep
|
||||
info "Automatically executing step '$step'."
|
||||
TRY=1
|
||||
MAXTRY=10
|
||||
while ! $0 $step $TAG ; do
|
||||
TRY=$(($TRY+1))
|
||||
if [ $TRY -gt $MAXTRY ]; then
|
||||
error "This step ($step) failed after $MAXTRY attempts."
|
||||
info "You can troubleshoot the situation manually, or terminate these instances with:"
|
||||
info "$0 stop $TAG"
|
||||
die "Giving up."
|
||||
else
|
||||
sep
|
||||
info "Step '$step' failed. Let's wait 10 seconds and try again."
|
||||
info "(Attempt $TRY out of $MAXTRY.)"
|
||||
sleep 10
|
||||
fi
|
||||
done
|
||||
done
|
||||
sep
|
||||
info "Deployment successful."
|
||||
info "To log into the first machine of that batch, you can run:"
|
||||
info "$0 ssh $TAG"
|
||||
info "To terminate these instances, you can run:"
|
||||
info "$0 stop $TAG"
|
||||
}
|
||||
@@ -581,8 +721,8 @@ _cmd_helmprom() {
|
||||
need_tag
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
sudo -u docker -H helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
sudo -u docker -H helm install prometheus stable/prometheus \
|
||||
sudo -u docker -H helm repo add prometheus-community https://prometheus-community.github.io/helm-charts/
|
||||
sudo -u docker -H helm install prometheus prometheus-community/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
@@ -615,11 +755,12 @@ _cmd_webssh() {
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install python-tornado python-paramiko -y"
|
||||
pssh "
|
||||
[ -d webssh ] || git clone https://github.com/jpetazzo/webssh"
|
||||
cd /opt
|
||||
[ -d webssh ] || sudo git clone https://github.com/jpetazzo/webssh"
|
||||
pssh "
|
||||
for KEYFILE in /etc/ssh/*.pub; do
|
||||
read a b c < \$KEYFILE; echo localhost \$a \$b
|
||||
done > webssh/known_hosts"
|
||||
done | sudo tee /opt/webssh/known_hosts"
|
||||
pssh "cat >webssh.service <<EOF
|
||||
[Unit]
|
||||
Description=webssh
|
||||
@@ -628,7 +769,7 @@ Description=webssh
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/home/ubuntu/webssh
|
||||
WorkingDirectory=/opt/webssh
|
||||
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
@@ -651,11 +792,6 @@ _cmd_www() {
|
||||
python3 -m http.server
|
||||
}
|
||||
|
||||
greet() {
|
||||
IAMUSER=$(aws iam get-user --query 'User.UserName')
|
||||
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
|
||||
}
|
||||
|
||||
pull_tag() {
|
||||
# Pre-pull a bunch of images
|
||||
pssh --timeout 900 'for I in \
|
||||
@@ -745,27 +881,3 @@ make_key_name() {
|
||||
SHORT_FINGERPRINT=$(ssh-add -l | grep RSA | head -n1 | cut -d " " -f 2 | tr -d : | cut -c 1-8)
|
||||
echo "${SHORT_FINGERPRINT}-${USER}"
|
||||
}
|
||||
|
||||
sync_keys() {
|
||||
# make sure ssh-add -l contains "RSA"
|
||||
ssh-add -l | grep -q RSA \
|
||||
|| die "The output of \`ssh-add -l\` doesn't contain 'RSA'. Start the agent, add your keys?"
|
||||
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
info "Syncing keys... "
|
||||
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
|
||||
aws ec2 import-key-pair --key-name $AWS_KEY_NAME \
|
||||
--public-key-material "$(ssh-add -L \
|
||||
| grep -i RSA \
|
||||
| head -n1 \
|
||||
| cut -d " " -f 1-2)" &>/dev/null
|
||||
|
||||
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
|
||||
die "Somehow, importing the key didn't work. Make sure that 'ssh-add -l | grep RSA | head -n1' returns an RSA key?"
|
||||
else
|
||||
info "Imported new key $AWS_KEY_NAME."
|
||||
fi
|
||||
else
|
||||
info "Using existing key $AWS_KEY_NAME."
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
if ! command -v aws >/dev/null; then
|
||||
warn "AWS CLI (aws) not found."
|
||||
fi
|
||||
|
||||
infra_list() {
|
||||
aws_display_tags
|
||||
aws ec2 describe-instances --output json |
|
||||
jq -r '.Reservations[].Instances[] | [.InstanceId, .ClientToken, .State.Name, .InstanceType ] | @tsv'
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
greet
|
||||
aws_greet
|
||||
|
||||
max_instances=$(aws ec2 describe-account-attributes \
|
||||
--attribute-names max-instances \
|
||||
@@ -21,10 +26,10 @@ infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
# Print our AWS username, to ease the pain of credential-juggling
|
||||
greet
|
||||
aws_greet
|
||||
|
||||
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
|
||||
key_name=$(sync_keys)
|
||||
key_name=$(aws_sync_keys)
|
||||
|
||||
AMI=$(aws_get_ami) # Retrieve the AWS image ID
|
||||
if [ -z "$AMI" ]; then
|
||||
@@ -61,7 +66,7 @@ infra_start() {
|
||||
aws_tag_instances $TAG $TAG
|
||||
|
||||
# Wait until EC2 API tells us that the instances are running
|
||||
wait_until_tag_is_running $TAG $COUNT
|
||||
aws_wait_until_tag_is_running $TAG $COUNT
|
||||
|
||||
aws_get_instance_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
@@ -98,7 +103,7 @@ infra_disableaddrchecks() {
|
||||
done
|
||||
}
|
||||
|
||||
wait_until_tag_is_running() {
|
||||
aws_wait_until_tag_is_running() {
|
||||
max_retry=100
|
||||
i=0
|
||||
done_count=0
|
||||
@@ -214,3 +219,32 @@ aws_get_ami() {
|
||||
##VERSION##
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
|
||||
}
|
||||
|
||||
aws_greet() {
|
||||
IAMUSER=$(aws iam get-user --query 'User.UserName')
|
||||
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
|
||||
}
|
||||
|
||||
aws_sync_keys() {
|
||||
# make sure ssh-add -l contains "RSA"
|
||||
ssh-add -l | grep -q RSA \
|
||||
|| die "The output of \`ssh-add -l\` doesn't contain 'RSA'. Start the agent, add your keys?"
|
||||
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
info "Syncing keys... "
|
||||
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
|
||||
aws ec2 import-key-pair --key-name $AWS_KEY_NAME \
|
||||
--public-key-material "$(ssh-add -L \
|
||||
| grep -i RSA \
|
||||
| head -n1 \
|
||||
| cut -d " " -f 1-2)" &>/dev/null
|
||||
|
||||
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
|
||||
die "Somehow, importing the key didn't work. Make sure that 'ssh-add -l | grep RSA | head -n1' returns an RSA key?"
|
||||
else
|
||||
info "Imported new key $AWS_KEY_NAME."
|
||||
fi
|
||||
else
|
||||
info "Using existing key $AWS_KEY_NAME."
|
||||
fi
|
||||
}
|
||||
|
||||
57
prepare-vms/lib/infra/hetzner.sh
Normal file
@@ -0,0 +1,57 @@
|
||||
if ! command -v hcloud >/dev/null; then
|
||||
warn "Hetzner CLI (hcloud) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/hcloud/cli.toml ]; then
|
||||
warn "~/.config/hcloud/cli.toml not found."
|
||||
fi
|
||||
|
||||
infra_list() {
|
||||
[ "$(hcloud server list -o json)" = "null" ] && return
|
||||
|
||||
hcloud server list -o json |
|
||||
jq -r '.[] | [.id, .name , .status, .server_type.name] | @tsv'
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
HETZNER_INSTANCE_TYPE=${HETZNER_INSTANCE_TYPE-cx21}
|
||||
HETZNER_DATACENTER=${HETZNER_DATACENTER-nbg1-dc3}
|
||||
HETZNER_IMAGE=${HETZNER_IMAGE-168855}
|
||||
|
||||
for I in $(seq 1 $COUNT); do
|
||||
NAME=$(printf "%s-%03d" $TAG $I)
|
||||
sep "Starting instance $I/$COUNT"
|
||||
info " Datacenter: $HETZNER_DATACENTER"
|
||||
info " Name: $NAME"
|
||||
info " Instance type: $HETZNER_INSTANCE_TYPE"
|
||||
hcloud server create \
|
||||
--type=${HETZNER_INSTANCE_TYPE} \
|
||||
--datacenter=${HETZNER_DATACENTER} \
|
||||
--image=${HETZNER_IMAGE} \
|
||||
--name=$NAME \
|
||||
--label=tag=$TAG \
|
||||
--ssh-key ~/.ssh/id_rsa.pub
|
||||
done
|
||||
|
||||
hetzner_get_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
for ID in $(hetzner_get_ids_by_tag $TAG); do
|
||||
info "Scheduling deletion of instance $ID..."
|
||||
hcloud server delete $ID &
|
||||
done
|
||||
info "Waiting for deletion to complete..."
|
||||
wait
|
||||
}
|
||||
|
||||
hetzner_get_ids_by_tag() {
|
||||
TAG=$1
|
||||
hcloud server list --selector=tag=$TAG -o json | jq -r .[].name
|
||||
}
|
||||
|
||||
hetzner_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
hcloud server list --selector=tag=$TAG -o json | jq -r .[].public_net.ipv4.ip
|
||||
}
|
||||
53
prepare-vms/lib/infra/openstack-cli.sh
Normal file
@@ -0,0 +1,53 @@
|
||||
infra_list() {
|
||||
openstack server list -f json |
|
||||
jq -r '.[] | [.ID, .Name , .Status, .Flavor] | @tsv'
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
sep "Starting $COUNT instances"
|
||||
info " Region: $OS_REGION_NAME"
|
||||
info " User: $OS_USERNAME"
|
||||
info " Flavor: $OS_FLAVOR"
|
||||
info " Image: $OS_IMAGE"
|
||||
openstack server create \
|
||||
--flavor $OS_FLAVOR \
|
||||
--image $OS_IMAGE \
|
||||
--key-name $OS_KEY \
|
||||
--min $COUNT --max $COUNT \
|
||||
--property workshopctl=$TAG \
|
||||
$TAG
|
||||
|
||||
sep "Waiting for IP addresses to be available"
|
||||
GOT=0
|
||||
while [ "$GOT" != "$COUNT" ]; do
|
||||
echo "Got $GOT/$COUNT IP addresses."
|
||||
oscli_get_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
GOT="$(wc -l < tags/$TAG/ips.txt)"
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
info "Counting instances..."
|
||||
oscli_get_instances_json $TAG |
|
||||
jq -r .[].Name |
|
||||
wc -l
|
||||
info "Deleting instances..."
|
||||
oscli_get_instances_json $TAG |
|
||||
jq -r .[].Name |
|
||||
xargs -P10 -n1 openstack server delete
|
||||
info "Done."
|
||||
}
|
||||
|
||||
oscli_get_instances_json() {
|
||||
TAG=$1
|
||||
openstack server list -f json --name "${TAG}-[0-9]*"
|
||||
}
|
||||
|
||||
oscli_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
oscli_get_instances_json $TAG |
|
||||
jq -r .[].Networks | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' || true
|
||||
}
|
||||
51
prepare-vms/lib/infra/scaleway.sh
Normal file
@@ -0,0 +1,51 @@
|
||||
if ! command -v scw >/dev/null; then
|
||||
warn "Scaleway CLI (scw) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/scw/config.yaml ]; then
|
||||
warn "~/.config/scw/config.yaml not found."
|
||||
fi
|
||||
|
||||
SCW_INSTANCE_TYPE=${SCW_INSTANCE_TYPE-DEV1-M}
|
||||
SCW_ZONE=${SCW_ZONE-fr-par-1}
|
||||
|
||||
infra_list() {
|
||||
scw instance server list -o json |
|
||||
jq -r '.[] | [.id, .name, .state, .commercial_type] | @tsv'
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
for I in $(seq 1 $COUNT); do
|
||||
NAME=$(printf "%s-%03d" $TAG $I)
|
||||
sep "Starting instance $I/$COUNT"
|
||||
info " Zone: $SCW_ZONE"
|
||||
info " Name: $NAME"
|
||||
info " Instance type: $SCW_INSTANCE_TYPE"
|
||||
scw instance server create \
|
||||
type=${SCW_INSTANCE_TYPE} zone=${SCW_ZONE} \
|
||||
image=ubuntu_bionic name=${NAME}
|
||||
done
|
||||
sep
|
||||
|
||||
scw_get_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
info "Counting instances..."
|
||||
scw_get_ids_by_tag $TAG | wc -l
|
||||
info "Deleting instances..."
|
||||
scw_get_ids_by_tag $TAG |
|
||||
xargs -n1 -P10 \
|
||||
scw instance server delete zone=${SCW_ZONE} force-shutdown=true with-ip=true
|
||||
}
|
||||
|
||||
scw_get_ids_by_tag() {
|
||||
TAG=$1
|
||||
scw instance server list zone=${SCW_ZONE} name=$TAG -o json | jq -r .[].id
|
||||
}
|
||||
|
||||
scw_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
scw instance server list zone=${SCW_ZONE} name=$TAG -o json | jq -r .[].public_ip.address
|
||||
}
|
||||
23
prepare-vms/lib/infra/unimplemented.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
infra_disableaddrchecks() {
|
||||
die "unimplemented"
|
||||
}
|
||||
|
||||
infra_list() {
|
||||
die "unimplemented"
|
||||
}
|
||||
|
||||
infra_opensg() {
|
||||
die "unimplemented"
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
die "unimplemented"
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
die "unimplemented"
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
die "unimplemented"
|
||||
}
|
||||
@@ -37,7 +37,7 @@ def system(cmd):
|
||||
td = str(t2-t1)[:5]
|
||||
f.write(bold("[{}] in {}s\n".format(retcode, td)))
|
||||
STEP += 1
|
||||
with open("/home/ubuntu/.bash_history", "a") as f:
|
||||
with open(os.environ["HOME"] + "/.bash_history", "a") as f:
|
||||
f.write("{}\n".format(cmd))
|
||||
if retcode != 0:
|
||||
msg = "The following command failed with exit code {}:\n".format(retcode)
|
||||
@@ -114,7 +114,7 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
|
||||
|
||||
system("sudo service ssh restart")
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install git jq")
|
||||
system("sudo apt-get -qy install git jid jq")
|
||||
system("sudo apt-get -qy install emacs-nox joe")
|
||||
|
||||
#######################
|
||||
|
||||
@@ -18,7 +18,13 @@ pssh() {
|
||||
echo "[parallel-ssh] $@"
|
||||
export PSSH=$(which pssh || which parallel-ssh)
|
||||
|
||||
$PSSH -h $HOSTFILE -l ubuntu \
|
||||
if [ "$INFRACLASS" = hetzner ]; then
|
||||
LOGIN=root
|
||||
else
|
||||
LOGIN=ubuntu
|
||||
fi
|
||||
|
||||
$PSSH -h $HOSTFILE -l $LOGIN \
|
||||
--par 100 \
|
||||
-O LogLevel=ERROR \
|
||||
-O UserKnownHostsFile=/dev/null \
|
||||
|
||||
@@ -1,49 +1,71 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
There are two ways to use this script:
|
||||
|
||||
1. Pass a file name and a tag name as a single argument.
|
||||
It will load a list of domains from the given file (one per line),
|
||||
and assign them to the clusters corresponding to that tag.
|
||||
There should be more domains than clusters.
|
||||
Example: ./map-dns.py domains.txt 2020-08-15-jp
|
||||
|
||||
2. Pass a domain as the 1st argument, and IP addresses then.
|
||||
It will configure the domain with the listed IP addresses.
|
||||
Example: ./map-dns.py open-duck.site 1.2.3.4 2.3.4.5 3.4.5.6
|
||||
|
||||
In both cases, the domains should be configured to use GANDI LiveDNS.
|
||||
"""
|
||||
import os
|
||||
import requests
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
# configurable stuff
|
||||
domains_file = "../../plentydomains/domains.txt"
|
||||
# This can be tweaked if necessary.
|
||||
config_file = os.path.join(
|
||||
os.environ["HOME"], ".config/gandi/config.yaml")
|
||||
tag = "test"
|
||||
os.environ["HOME"], ".config/gandi/config.yaml")
|
||||
apiurl = "https://dns.api.gandi.net/api/v5/domains"
|
||||
|
||||
# inferred stuff
|
||||
domains = open(domains_file).read().split()
|
||||
apikey = yaml.safe_load(open(config_file))["apirest"]["key"]
|
||||
ips = open(f"tags/{tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
|
||||
# now do the fucking work
|
||||
# Figure out if we're called for a bunch of domains, or just one.
|
||||
domain_or_domain_file = sys.argv[1]
|
||||
if os.path.isfile(domain_or_domain_file):
|
||||
domains = open(domain_or_domain_file).read().split()
|
||||
domains = [ d for d in domains if not d.startswith('#') ]
|
||||
tag = sys.argv[2]
|
||||
ips = open(f"tags/{tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
else:
|
||||
domains = [domain_or_domain_file]
|
||||
ips = sys.argv[2:]
|
||||
clustersize = len(ips)
|
||||
|
||||
# Now, do the work.
|
||||
while domains and ips:
|
||||
domain = domains[0]
|
||||
domains = domains[1:]
|
||||
cluster = ips[:clustersize]
|
||||
ips = ips[clustersize:]
|
||||
print(f"{domain} => {cluster}")
|
||||
zone = ""
|
||||
node = 0
|
||||
for ip in cluster:
|
||||
node += 1
|
||||
zone += f"@ 300 IN A {ip}\n"
|
||||
zone += f"* 300 IN A {ip}\n"
|
||||
zone += f"node{node} 300 IN A {ip}\n"
|
||||
r = requests.put(
|
||||
f"{apiurl}/{domain}/records",
|
||||
headers={"x-api-key": apikey},
|
||||
data=zone)
|
||||
print(r.text)
|
||||
domain = domains[0]
|
||||
domains = domains[1:]
|
||||
cluster = ips[:clustersize]
|
||||
ips = ips[clustersize:]
|
||||
print(f"{domain} => {cluster}")
|
||||
zone = ""
|
||||
node = 0
|
||||
for ip in cluster:
|
||||
node += 1
|
||||
zone += f"@ 300 IN A {ip}\n"
|
||||
zone += f"* 300 IN A {ip}\n"
|
||||
zone += f"node{node} 300 IN A {ip}\n"
|
||||
r = requests.put(
|
||||
f"{apiurl}/{domain}/records",
|
||||
headers={"x-api-key": apikey},
|
||||
data=zone)
|
||||
print(r.text)
|
||||
|
||||
#r = requests.get(
|
||||
# f"{apiurl}/{domain}/records",
|
||||
# headers={"x-api-key": apikey},
|
||||
# )
|
||||
#r = requests.get(
|
||||
# f"{apiurl}/{domain}/records",
|
||||
# headers={"x-api-key": apikey},
|
||||
# )
|
||||
|
||||
if domains:
|
||||
print(f"Good, we have {len(domains)} domains left.")
|
||||
print(f"Good, we have {len(domains)} domains left.")
|
||||
|
||||
if ips:
|
||||
print(f"Crap, we have {len(ips)} IP addresses left.")
|
||||
print(f"Crap, we have {len(ips)} IP addresses left.")
|
||||
|
||||
@@ -21,3 +21,9 @@ machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
steps:
|
||||
- deploy
|
||||
- webssh
|
||||
- tailhist
|
||||
- cards
|
||||
|
||||
@@ -20,3 +20,11 @@ machine_version: 0.14.0
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
steps:
|
||||
- deploy
|
||||
- webssh
|
||||
- tailhist
|
||||
- kube
|
||||
- kubetools
|
||||
- cards
|
||||
- kubetest
|
||||
|
||||
@@ -35,6 +35,8 @@ TAG=$PREFIX-$SETTINGS
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl disabledocker $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kubenet
|
||||
@@ -48,6 +50,8 @@ TAG=$PREFIX-$SETTINGS
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kuberouter
|
||||
@@ -61,6 +65,8 @@ TAG=$PREFIX-$SETTINGS
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
#INFRA=infra/aws-us-west-1
|
||||
@@ -76,5 +82,7 @@ TAG=$PREFIX-$SETTINGS
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kube $TAG 1.15.9
|
||||
retry 5 ./workshopctl kube $TAG 1.17.13
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
resource "openstack_compute_instance_v2" "machine" {
|
||||
count = "${var.count}"
|
||||
name = "${format("%s-%04d", "${var.prefix}", count.index+1)}"
|
||||
image_name = "Ubuntu 16.04.5 (Xenial Xerus)"
|
||||
image_name = "Ubuntu 18.04.4 20200324"
|
||||
flavor_name = "${var.flavor}"
|
||||
security_groups = ["${openstack_networking_secgroup_v2.full_access.name}"]
|
||||
key_pair = "${openstack_compute_keypair_v2.ssh_deploy_key.name}"
|
||||
|
||||
@@ -15,7 +15,6 @@ for lib in lib/*.sh; do
|
||||
done
|
||||
|
||||
DEPENDENCIES="
|
||||
aws
|
||||
ssh
|
||||
curl
|
||||
jq
|
||||
|
||||
@@ -1,15 +1,24 @@
|
||||
# Uncomment and/or edit one of the the following lines if necessary.
|
||||
#/ /kube-halfday.yml.html 200
|
||||
#/ /kube-fullday.yml.html 200
|
||||
#/ /kube-twodays.yml.html 200
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /kube.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
#/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
|
||||
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
|
||||
/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=SWEETFEBSALEC1
|
||||
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=SWEETFEBSALEC4
|
||||
/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
|
||||
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
|
||||
#/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=DOCKERALLDAY
|
||||
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=DOCKERALLDAY
|
||||
|
||||
# Shortlink for the QRCode
|
||||
/q /qrcode.html 200
|
||||
|
||||
# Shortlinks for next training in English and French
|
||||
#/next https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
|
||||
/next https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
|
||||
/hi5 https://enix.io/fr/services/formation/online/
|
||||
|
||||
# Survey form
|
||||
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform
|
||||
|
||||
@@ -233,7 +233,7 @@ def setup_tmux_and_ssh():
|
||||
ipaddr = "$IPADDR"
|
||||
uid = os.getuid()
|
||||
|
||||
raise Exception("""
|
||||
raise Exception(r"""
|
||||
1. If you're running this directly from a node:
|
||||
|
||||
tmux
|
||||
@@ -247,6 +247,16 @@ rm -f /tmp/tmux-{uid}/default && ssh -t -L /tmp/tmux-{uid}/default:/tmp/tmux-100
|
||||
3. If you cannot control a remote tmux:
|
||||
|
||||
tmux new-session ssh docker@{ipaddr}
|
||||
|
||||
4. If you are running this locally with a remote cluster, make sure your prompt has the expected format:
|
||||
|
||||
tmux
|
||||
IPADDR=$(
|
||||
kubectl get nodes -o json |
|
||||
jq -r '.items[0].status.addresses[] | select(.type=="ExternalIP") | .address'
|
||||
)
|
||||
export PS1="\n[{ipaddr}] \u@\h:\w\n\$ "
|
||||
|
||||
""".format(uid=uid, ipaddr=ipaddr))
|
||||
else:
|
||||
logging.info("Found tmux session. Trying to acquire shell prompt.")
|
||||
|
||||
298
slides/autopilot/package-lock.json
generated
@@ -24,14 +24,9 @@
|
||||
"integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
|
||||
},
|
||||
"arraybuffer.slice": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz",
|
||||
"integrity": "sha1-8zshWfBTKj8xB6JywMz70a0peco="
|
||||
},
|
||||
"async-limiter": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz",
|
||||
"integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg=="
|
||||
"version": "0.0.7",
|
||||
"resolved": "https://registry.npmjs.org/arraybuffer.slice/-/arraybuffer.slice-0.0.7.tgz",
|
||||
"integrity": "sha512-wGUIVQXuehL5TCqQun8OW81jGzAWycqzFF8lFp+GOM5BXLYj3bKNsYC4daB7n6XjCqxQA/qgTJ+8ANR3acjrog=="
|
||||
},
|
||||
"backo2": {
|
||||
"version": "1.0.2",
|
||||
@@ -39,27 +34,19 @@
|
||||
"integrity": "sha1-MasayLEpNjRj41s+u2n038+6eUc="
|
||||
},
|
||||
"base64-arraybuffer": {
|
||||
"version": "0.1.5",
|
||||
"resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz",
|
||||
"integrity": "sha1-c5JncZI7Whl0etZmqlzUv5xunOg="
|
||||
"version": "0.1.4",
|
||||
"resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-0.1.4.tgz",
|
||||
"integrity": "sha1-mBjHngWbE1X5fgQooBfIOOkLqBI="
|
||||
},
|
||||
"base64id": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/base64id/-/base64id-1.0.0.tgz",
|
||||
"integrity": "sha1-R2iMuZu2gE8OBtPnY7HDLlfY5rY="
|
||||
},
|
||||
"better-assert": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/better-assert/-/better-assert-1.0.2.tgz",
|
||||
"integrity": "sha1-QIZrnhueC1W0gYlDEeaPr/rrxSI=",
|
||||
"requires": {
|
||||
"callsite": "1.0.0"
|
||||
}
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz",
|
||||
"integrity": "sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog=="
|
||||
},
|
||||
"blob": {
|
||||
"version": "0.0.4",
|
||||
"resolved": "https://registry.npmjs.org/blob/-/blob-0.0.4.tgz",
|
||||
"integrity": "sha1-vPEwUspURj8w+fx+lbmkdjCpSSE="
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/blob/-/blob-0.0.5.tgz",
|
||||
"integrity": "sha512-gaqbzQPqOoamawKg0LGVd7SzLgXS+JH61oWprSLH+P+abTczqJbhTR8CmJ2u9/bUYNmHTGJx/UEmn6doAvvuig=="
|
||||
},
|
||||
"body-parser": {
|
||||
"version": "1.18.2",
|
||||
@@ -83,20 +70,15 @@
|
||||
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
|
||||
"integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
|
||||
},
|
||||
"callsite": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/callsite/-/callsite-1.0.0.tgz",
|
||||
"integrity": "sha1-KAOY5dZkvXQDi28JBRU+borxvCA="
|
||||
},
|
||||
"component-bind": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/component-bind/-/component-bind-1.0.0.tgz",
|
||||
"integrity": "sha1-AMYIq33Nk4l8AAllGx06jh5zu9E="
|
||||
},
|
||||
"component-emitter": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz",
|
||||
"integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY="
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
|
||||
"integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg=="
|
||||
},
|
||||
"component-inherit": {
|
||||
"version": "0.0.3",
|
||||
@@ -152,58 +134,76 @@
|
||||
"integrity": "sha1-eePVhlU0aQn+bw9Fpd5oEDspTSA="
|
||||
},
|
||||
"engine.io": {
|
||||
"version": "3.1.4",
|
||||
"resolved": "https://registry.npmjs.org/engine.io/-/engine.io-3.1.4.tgz",
|
||||
"integrity": "sha1-PQIRtwpVLOhB/8fahiezAamkFi4=",
|
||||
"version": "3.5.0",
|
||||
"resolved": "https://registry.npmjs.org/engine.io/-/engine.io-3.5.0.tgz",
|
||||
"integrity": "sha512-21HlvPUKaitDGE4GXNtQ7PLP0Sz4aWLddMPw2VTyFz1FVZqu/kZsJUO8WNpKuE/OCL7nkfRaOui2ZCJloGznGA==",
|
||||
"requires": {
|
||||
"accepts": "1.3.3",
|
||||
"base64id": "1.0.0",
|
||||
"cookie": "0.3.1",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-parser": "2.1.1",
|
||||
"uws": "0.14.5",
|
||||
"ws": "3.3.3"
|
||||
"accepts": "~1.3.4",
|
||||
"base64id": "2.0.0",
|
||||
"cookie": "~0.4.1",
|
||||
"debug": "~4.1.0",
|
||||
"engine.io-parser": "~2.2.0",
|
||||
"ws": "~7.4.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"accepts": {
|
||||
"version": "1.3.3",
|
||||
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.3.tgz",
|
||||
"integrity": "sha1-w8p0NJOGSMPg2cHjKN1otiLChMo=",
|
||||
"cookie": {
|
||||
"version": "0.4.1",
|
||||
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.1.tgz",
|
||||
"integrity": "sha512-ZwrFkGJxUR3EIoXtO+yVE69Eb7KlixbaeAWfBQB9vVsNn/o+Yw69gBWSSDK825hQNdN+wF8zELf3dFNl/kxkUA=="
|
||||
},
|
||||
"debug": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
|
||||
"integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
|
||||
"requires": {
|
||||
"mime-types": "2.1.17",
|
||||
"negotiator": "0.6.1"
|
||||
"ms": "^2.1.1"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"engine.io-client": {
|
||||
"version": "3.1.4",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-3.1.4.tgz",
|
||||
"integrity": "sha1-T88TcLRxY70s6b4nM5ckMDUNTqE=",
|
||||
"version": "3.5.0",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-3.5.0.tgz",
|
||||
"integrity": "sha512-12wPRfMrugVw/DNyJk34GQ5vIVArEcVMXWugQGGuw2XxUSztFNmJggZmv8IZlLyEdnpO1QB9LkcjeWewO2vxtA==",
|
||||
"requires": {
|
||||
"component-emitter": "1.2.1",
|
||||
"component-emitter": "~1.3.0",
|
||||
"component-inherit": "0.0.3",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-parser": "2.1.1",
|
||||
"debug": "~3.1.0",
|
||||
"engine.io-parser": "~2.2.0",
|
||||
"has-cors": "1.1.0",
|
||||
"indexof": "0.0.1",
|
||||
"parseqs": "0.0.5",
|
||||
"parseuri": "0.0.5",
|
||||
"ws": "3.3.3",
|
||||
"xmlhttprequest-ssl": "1.5.4",
|
||||
"parseqs": "0.0.6",
|
||||
"parseuri": "0.0.6",
|
||||
"ws": "~7.4.2",
|
||||
"xmlhttprequest-ssl": "~1.5.4",
|
||||
"yeast": "0.1.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
|
||||
"integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"engine.io-parser": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-2.1.1.tgz",
|
||||
"integrity": "sha1-4Ps/DgRi9/WLt3waUun1p+JuRmg=",
|
||||
"version": "2.2.1",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-2.2.1.tgz",
|
||||
"integrity": "sha512-x+dN/fBH8Ro8TFwJ+rkB2AmuVw9Yu2mockR/p3W8f8YtExwFgDvBDi0GWyb4ZLkpahtDGZgtr3zLovanJghPqg==",
|
||||
"requires": {
|
||||
"after": "0.8.2",
|
||||
"arraybuffer.slice": "0.0.6",
|
||||
"base64-arraybuffer": "0.1.5",
|
||||
"blob": "0.0.4",
|
||||
"has-binary2": "1.0.2"
|
||||
"arraybuffer.slice": "~0.0.7",
|
||||
"base64-arraybuffer": "0.1.4",
|
||||
"blob": "0.0.5",
|
||||
"has-binary2": "~1.0.2"
|
||||
}
|
||||
},
|
||||
"escape-html": {
|
||||
@@ -278,9 +278,9 @@
|
||||
"integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
|
||||
},
|
||||
"has-binary2": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-binary2/-/has-binary2-1.0.2.tgz",
|
||||
"integrity": "sha1-6D26SfC5vk0CbSc2U1DZ8D9Uvpg=",
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/has-binary2/-/has-binary2-1.0.3.tgz",
|
||||
"integrity": "sha512-G1LWKhDSvhGeAQ8mPVQlqNcOB2sJdwATtZKl2pDKKHfpf/rYj24lkinxf69blJbnsvtqqNU+L3SL50vzZhXOnw==",
|
||||
"requires": {
|
||||
"isarray": "2.0.1"
|
||||
}
|
||||
@@ -376,11 +376,6 @@
|
||||
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
|
||||
"integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk="
|
||||
},
|
||||
"object-component": {
|
||||
"version": "0.0.3",
|
||||
"resolved": "https://registry.npmjs.org/object-component/-/object-component-0.0.3.tgz",
|
||||
"integrity": "sha1-8MaapQ78lbhmwYb0AKM3acsvEpE="
|
||||
},
|
||||
"on-finished": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
|
||||
@@ -390,20 +385,14 @@
|
||||
}
|
||||
},
|
||||
"parseqs": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/parseqs/-/parseqs-0.0.5.tgz",
|
||||
"integrity": "sha1-1SCKNzjkZ2bikbouoXNoSSGouJ0=",
|
||||
"requires": {
|
||||
"better-assert": "1.0.2"
|
||||
}
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/parseqs/-/parseqs-0.0.6.tgz",
|
||||
"integrity": "sha512-jeAGzMDbfSHHA091hr0r31eYfTig+29g3GKKE/PPbEQ65X0lmMwlEoqmhzu0iztID5uJpZsFlUPDP8ThPL7M8w=="
|
||||
},
|
||||
"parseuri": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/parseuri/-/parseuri-0.0.5.tgz",
|
||||
"integrity": "sha1-gCBKUNTbt3m/3G6+J3jZDkvOMgo=",
|
||||
"requires": {
|
||||
"better-assert": "1.0.2"
|
||||
}
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/parseuri/-/parseuri-0.0.6.tgz",
|
||||
"integrity": "sha512-AUjen8sAkGgao7UyCX6Ahv0gIK2fABKmYjvP4xmy5JaKvcbTRueIqIPHLAfq30xJddqSE033IOMUSOMCcK3Sow=="
|
||||
},
|
||||
"parseurl": {
|
||||
"version": "1.3.2",
|
||||
@@ -487,51 +476,104 @@
|
||||
"integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
|
||||
},
|
||||
"socket.io": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/socket.io/-/socket.io-2.0.4.tgz",
|
||||
"integrity": "sha1-waRZDO/4fs8TxyZS8Eb3FrKeYBQ=",
|
||||
"version": "2.4.0",
|
||||
"resolved": "https://registry.npmjs.org/socket.io/-/socket.io-2.4.0.tgz",
|
||||
"integrity": "sha512-9UPJ1UTvKayuQfVv2IQ3k7tCQC/fboDyIK62i99dAQIyHKaBsNdTpwHLgKJ6guRWxRtC9H+138UwpaGuQO9uWQ==",
|
||||
"requires": {
|
||||
"debug": "2.6.9",
|
||||
"engine.io": "3.1.4",
|
||||
"socket.io-adapter": "1.1.1",
|
||||
"socket.io-client": "2.0.4",
|
||||
"socket.io-parser": "3.1.2"
|
||||
"debug": "~4.1.0",
|
||||
"engine.io": "~3.5.0",
|
||||
"has-binary2": "~1.0.2",
|
||||
"socket.io-adapter": "~1.1.0",
|
||||
"socket.io-client": "2.4.0",
|
||||
"socket.io-parser": "~3.4.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
|
||||
"integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
|
||||
"requires": {
|
||||
"ms": "^2.1.1"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"socket.io-adapter": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-1.1.1.tgz",
|
||||
"integrity": "sha1-KoBeihTWNyEk3ZFZrUUC+MsH8Gs="
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-1.1.2.tgz",
|
||||
"integrity": "sha512-WzZRUj1kUjrTIrUKpZLEzFZ1OLj5FwLlAFQs9kuZJzJi5DKdU7FsWc36SNmA8iDOtwBQyT8FkrriRM8vXLYz8g=="
|
||||
},
|
||||
"socket.io-client": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-2.0.4.tgz",
|
||||
"integrity": "sha1-CRilUkBtxeVAs4Dc2Xr8SmQzL44=",
|
||||
"version": "2.4.0",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-2.4.0.tgz",
|
||||
"integrity": "sha512-M6xhnKQHuuZd4Ba9vltCLT9oa+YvTsP8j9NcEiLElfIg8KeYPyhWOes6x4t+LTAC8enQbE/995AdTem2uNyKKQ==",
|
||||
"requires": {
|
||||
"backo2": "1.0.2",
|
||||
"base64-arraybuffer": "0.1.5",
|
||||
"component-bind": "1.0.0",
|
||||
"component-emitter": "1.2.1",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-client": "3.1.4",
|
||||
"has-cors": "1.1.0",
|
||||
"component-emitter": "~1.3.0",
|
||||
"debug": "~3.1.0",
|
||||
"engine.io-client": "~3.5.0",
|
||||
"has-binary2": "~1.0.2",
|
||||
"indexof": "0.0.1",
|
||||
"object-component": "0.0.3",
|
||||
"parseqs": "0.0.5",
|
||||
"parseuri": "0.0.5",
|
||||
"socket.io-parser": "3.1.2",
|
||||
"parseqs": "0.0.6",
|
||||
"parseuri": "0.0.6",
|
||||
"socket.io-parser": "~3.3.0",
|
||||
"to-array": "0.1.4"
|
||||
},
|
||||
"dependencies": {
|
||||
"debug": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
|
||||
"integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"socket.io-parser": {
|
||||
"version": "3.3.2",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.3.2.tgz",
|
||||
"integrity": "sha512-FJvDBuOALxdCI9qwRrO/Rfp9yfndRtc1jSgVgV8FDraihmSP/MLGD5PEuJrNfjALvcQ+vMDM/33AWOYP/JSjDg==",
|
||||
"requires": {
|
||||
"component-emitter": "~1.3.0",
|
||||
"debug": "~3.1.0",
|
||||
"isarray": "2.0.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"socket.io-parser": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.1.2.tgz",
|
||||
"integrity": "sha1-28IoIVH8T6675Aru3Ady66YZ9/I=",
|
||||
"version": "3.4.1",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.4.1.tgz",
|
||||
"integrity": "sha512-11hMgzL+WCLWf1uFtHSNvliI++tcRUWdoeYuwIl+Axvwy9z2gQM+7nJyN3STj1tLj5JyIUH8/gpDGxzAlDdi0A==",
|
||||
"requires": {
|
||||
"component-emitter": "1.2.1",
|
||||
"debug": "2.6.9",
|
||||
"has-binary2": "1.0.2",
|
||||
"debug": "~4.1.0",
|
||||
"isarray": "2.0.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"component-emitter": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz",
|
||||
"integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY="
|
||||
},
|
||||
"debug": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
|
||||
"integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
|
||||
"requires": {
|
||||
"ms": "^2.1.1"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"statuses": {
|
||||
@@ -553,11 +595,6 @@
|
||||
"mime-types": "2.1.17"
|
||||
}
|
||||
},
|
||||
"ultron": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz",
|
||||
"integrity": "sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og=="
|
||||
},
|
||||
"unpipe": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
|
||||
@@ -568,31 +605,20 @@
|
||||
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
|
||||
"integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
|
||||
},
|
||||
"uws": {
|
||||
"version": "0.14.5",
|
||||
"resolved": "https://registry.npmjs.org/uws/-/uws-0.14.5.tgz",
|
||||
"integrity": "sha1-Z6rzPEaypYel9mZtAPdpEyjxSdw=",
|
||||
"optional": true
|
||||
},
|
||||
"vary": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
|
||||
"integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
|
||||
},
|
||||
"ws": {
|
||||
"version": "3.3.3",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-3.3.3.tgz",
|
||||
"integrity": "sha512-nnWLa/NwZSt4KQJu51MYlCcSQ5g7INpOrOMt4XV8j4dqTXdmlUmSHQ8/oLC069ckre0fRsgfvsKwbTdtKLCDkA==",
|
||||
"requires": {
|
||||
"async-limiter": "1.0.0",
|
||||
"safe-buffer": "5.1.1",
|
||||
"ultron": "1.1.1"
|
||||
}
|
||||
"version": "7.4.2",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-7.4.2.tgz",
|
||||
"integrity": "sha512-T4tewALS3+qsrpGI/8dqNMLIVdq/g/85U98HPMa6F0m6xTbvhXU6RCQLqPH3+SlomNV/LdY6RXEbBpMH6EOJnA=="
|
||||
},
|
||||
"xmlhttprequest-ssl": {
|
||||
"version": "1.5.4",
|
||||
"resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.4.tgz",
|
||||
"integrity": "sha1-BPVgkVcks4kIhxXMDteBPpZ3v1c="
|
||||
"version": "1.5.5",
|
||||
"resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.5.tgz",
|
||||
"integrity": "sha1-wodrBhaKrcQOV9l+gRkayPQ5iz4="
|
||||
},
|
||||
"yeast": {
|
||||
"version": "0.1.2",
|
||||
|
||||
@@ -3,6 +3,6 @@
|
||||
"version": "0.0.1",
|
||||
"dependencies": {
|
||||
"express": "^4.16.2",
|
||||
"socket.io": "^2.0.4"
|
||||
"socket.io": "^2.4.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Advanced Dockerfiles
|
||||
# Advanced Dockerfile Syntax
|
||||
|
||||

|
||||
|
||||
@@ -12,7 +12,10 @@ class: title
|
||||
We have seen simple Dockerfiles to illustrate how Docker build
|
||||
container images.
|
||||
|
||||
In this section, we will see more Dockerfile commands.
|
||||
In this section, we will give a recap of the Dockerfile syntax,
|
||||
and introduce advanced Dockerfile commands that we might
|
||||
come across sometimes; or that we might want to use in some
|
||||
specific scenarios.
|
||||
|
||||
---
|
||||
|
||||
@@ -420,3 +423,8 @@ ONBUILD COPY . /src
|
||||
|
||||
* You can't chain `ONBUILD` instructions with `ONBUILD`.
|
||||
* `ONBUILD` can't be used to trigger `FROM` instructions.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Advanced Dockerfile syntax
|
||||
:FR:- Dockerfile niveau expert
|
||||
|
||||
@@ -280,3 +280,8 @@ CONTAINER ID IMAGE ... CREATED STATUS
|
||||
5c1dfd4d81f1 jpetazzo/clock ... 40 min. ago Exited (0) 40 min. ago
|
||||
b13c164401fb ubuntu ... 55 min. ago Exited (130) 53 min. ago
|
||||
```
|
||||
|
||||
???
|
||||
|
||||
:EN:- Foreground and background containers
|
||||
:FR:- Exécution interactive ou en arrière-plan
|
||||
|
||||
@@ -167,3 +167,8 @@ Automated process = good.
|
||||
|
||||
In the next chapter, we will learn how to automate the build
|
||||
process by writing a `Dockerfile`.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Building our first images interactively
|
||||
:FR:- Fabriquer nos premières images à la main
|
||||
|
||||
@@ -363,3 +363,10 @@ In this example, `sh -c` will still be used, but
|
||||
The shell gets replaced by `figlet` when `figlet` starts execution.
|
||||
|
||||
This allows to run processes as PID 1 without using JSON.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Towards automated, reproducible builds
|
||||
:EN:- Writing our first Dockerfile
|
||||
:FR:- Rendre le processus automatique et reproductible
|
||||
:FR:- Écrire son premier Dockerfile
|
||||
|
||||
@@ -272,3 +272,7 @@ $ docker run -it --entrypoint bash myfiglet
|
||||
root@6027e44e2955:/#
|
||||
```
|
||||
|
||||
???
|
||||
|
||||
:EN:- CMD and ENTRYPOINT
|
||||
:FR:- CMD et ENTRYPOINT
|
||||
|
||||
@@ -322,3 +322,11 @@ You can:
|
||||
Each copy will run in a different network, totally isolated from the other.
|
||||
|
||||
This is ideal to debug regressions, do side-by-side comparisons, etc.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Using compose to describe an environment
|
||||
:EN:- Connecting services together with a *Compose file*
|
||||
|
||||
:FR:- Utiliser Compose pour décrire son environnement
|
||||
:FR:- Écrire un *Compose file* pour connecter les services entre eux
|
||||
@@ -307,6 +307,8 @@ Let's remove the `redis` container:
|
||||
$ docker rm -f redis
|
||||
```
|
||||
|
||||
* `-f`: Force the removal of a running container (uses SIGKILL)
|
||||
|
||||
And create one that doesn't block the `redis` name:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -226,3 +226,13 @@ We've learned how to:
|
||||
|
||||
In the next chapter, we will see how to connect
|
||||
containers together without exposing their ports.
|
||||
|
||||
???
|
||||
|
||||
:EN:Connecting containers
|
||||
:EN:- Container networking basics
|
||||
:EN:- Exposing a container
|
||||
|
||||
:FR:Connecter les conteneurs
|
||||
:FR:- Description du modèle réseau des conteneurs
|
||||
:FR:- Exposer un conteneur
|
||||
|
||||
@@ -98,3 +98,8 @@ Success!
|
||||
* Place it in a different directory, with the `WORKDIR` instruction.
|
||||
|
||||
* Even better, use the `gcc` official image.
|
||||
|
||||
???
|
||||
|
||||
:EN:- The build cache
|
||||
:FR:- Tirer parti du cache afin d'optimiser la vitesse de *build*
|
||||
|
||||
@@ -424,10 +424,15 @@ services:
|
||||
|
||||
- In this chapter, we showed many ways to write Dockerfiles.
|
||||
|
||||
- These Dockerfiles use sometimes diametrally opposed techniques.
|
||||
- These Dockerfiles use sometimes diametrically opposed techniques.
|
||||
|
||||
- Yet, they were the "right" ones *for a specific situation.*
|
||||
|
||||
- It's OK (and even encouraged) to start simple and evolve as needed.
|
||||
|
||||
- Feel free to review this chapter later (after writing a few Dockerfiles) for inspiration!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Dockerfile tips, tricks, and best practices
|
||||
:FR:- Bonnes pratiques pour la construction des images
|
||||
|
||||
@@ -290,3 +290,8 @@ bash: figlet: command not found
|
||||
* We have a clear definition of our environment, and can share it reliably with others.
|
||||
|
||||
* Let's see in the next chapters how to bake a custom image with `figlet`!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Running our first container
|
||||
:FR:- Lancer nos premiers conteneurs
|
||||
|
||||
@@ -226,3 +226,8 @@ docker export <container_id> | tar tv
|
||||
```
|
||||
|
||||
This will give a detailed listing of the content of the container.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Troubleshooting and getting inside a container
|
||||
:FR:- Inspecter un conteneur en détail, en *live* ou *post-mortem*
|
||||
|
||||
@@ -375,3 +375,13 @@ We've learned how to:
|
||||
* Understand Docker image namespacing.
|
||||
* Search and download images.
|
||||
|
||||
???
|
||||
|
||||
:EN:Building images
|
||||
:EN:- Containers, images, and layers
|
||||
:EN:- Image addresses and tags
|
||||
:EN:- Finding and transferring images
|
||||
|
||||
:FR:Construire des images
|
||||
:FR:- La différence entre un conteneur et une image
|
||||
:FR:- La notion de *layer* partagé entre images
|
||||
|
||||
@@ -80,3 +80,8 @@ $ docker ps --filter label=owner=alice
|
||||
(To determine internal cross-billing, or who to page in case of outage.)
|
||||
|
||||
* etc.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Using labels to identify containers
|
||||
:FR:- Étiqueter ses conteneurs avec des méta-données
|
||||
|
||||
@@ -391,3 +391,10 @@ We've learned how to:
|
||||
|
||||
* Use a simple local development workflow.
|
||||
|
||||
???
|
||||
|
||||
:EN:Developing with containers
|
||||
:EN:- “Containerize” a development environment
|
||||
|
||||
:FR:Développer au jour le jour
|
||||
:FR:- « Containeriser » son environnement de développement
|
||||
@@ -313,3 +313,11 @@ virtually "free."
|
||||
* Sometimes, we want to inspect a specific intermediary build stage.
|
||||
|
||||
* Or, we want to describe multiple images using a single Dockerfile.
|
||||
|
||||
???
|
||||
|
||||
:EN:Optimizing our images and their build process
|
||||
:EN:- Leveraging multi-stage builds
|
||||
|
||||
:FR:Optimiser les images et leur construction
|
||||
:FR:- Utilisation d'un *multi-stage build*
|
||||
|
||||
@@ -130,3 +130,12 @@ $ docker inspect --format '{{ json .Created }}' <containerID>
|
||||
|
||||
* The optional `json` keyword asks for valid JSON output.
|
||||
<br/>(e.g. here it adds the surrounding double-quotes.)
|
||||
|
||||
???
|
||||
|
||||
:EN:Managing container lifecycle
|
||||
:EN:- Naming and inspecting containers
|
||||
|
||||
:FR:Suivre ses conteneurs à la loupe
|
||||
:FR:- Obtenir des informations détaillées sur un conteneur
|
||||
:FR:- Associer un identifiant unique à un conteneur
|
||||
|
||||
@@ -175,3 +175,10 @@ class: extra-details
|
||||
* This will cause some CLI and TUI programs to redraw the screen.
|
||||
|
||||
* But not all of them.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Restarting old containers
|
||||
:EN:- Detaching and reattaching to container
|
||||
:FR:- Redémarrer des anciens conteneurs
|
||||
:FR:- Se détacher et rattacher à des conteneurs
|
||||
|
||||
@@ -95,6 +95,24 @@ $ ssh <login>@<ip-address>
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## `tailhist`
|
||||
|
||||
The shell history of the instructor is available online in real time.
|
||||
|
||||
Note the IP address of the instructor's virtual machine (A.B.C.D).
|
||||
|
||||
Open http://A.B.C.D:1088 in your browser and you should see the history.
|
||||
|
||||
The history is updated in real time (using a WebSocket connection).
|
||||
|
||||
It should be green when the WebSocket is connected.
|
||||
|
||||
If it turns red, reloading the page should fix it.
|
||||
|
||||
---
|
||||
|
||||
## Checking your Virtual Machine
|
||||
|
||||
Once logged in, make sure that you can run a basic Docker command:
|
||||
@@ -125,3 +143,11 @@ Server:
|
||||
]
|
||||
|
||||
If this doesn't work, raise your hand so that an instructor can assist you!
|
||||
|
||||
???
|
||||
|
||||
:EN:Container concepts
|
||||
:FR:Premier contact avec les conteneurs
|
||||
|
||||
:EN:- What's a container engine?
|
||||
:FR:- Qu'est-ce qu'un *container engine* ?
|
||||
|
||||
@@ -119,7 +119,7 @@ Nano and LinuxKit VMs in Hyper-V!)
|
||||
|
||||
- golang, mongo, python, redis, hello-world ... and more being added
|
||||
|
||||
- you should still use `--plaform` with multi-os images to be certain
|
||||
- you should still use `--platform` with multi-os images to be certain
|
||||
|
||||
- Windows Containers now support `localhost` accessible containers (July 2018)
|
||||
|
||||
|
||||
@@ -11,10 +11,10 @@ class State(object):
|
||||
self.section_title = None
|
||||
self.section_start = 0
|
||||
self.section_slides = 0
|
||||
self.chapters = {}
|
||||
self.modules = {}
|
||||
self.sections = {}
|
||||
def show(self):
|
||||
if self.section_title.startswith("chapter-"):
|
||||
if self.section_title.startswith("module-"):
|
||||
return
|
||||
print("{0.section_title}\t{0.section_start}\t{0.section_slides}".format(self))
|
||||
self.sections[self.section_title] = self.section_slides
|
||||
@@ -38,10 +38,10 @@ for line in open(sys.argv[1]):
|
||||
if line == "--":
|
||||
state.current_slide += 1
|
||||
toc_links = re.findall("\(#toc-(.*)\)", line)
|
||||
if toc_links and state.section_title.startswith("chapter-"):
|
||||
if state.section_title not in state.chapters:
|
||||
state.chapters[state.section_title] = []
|
||||
state.chapters[state.section_title].append(toc_links[0])
|
||||
if toc_links and state.section_title.startswith("module-"):
|
||||
if state.section_title not in state.modules:
|
||||
state.modules[state.section_title] = []
|
||||
state.modules[state.section_title].append(toc_links[0])
|
||||
# This is really hackish
|
||||
if line.startswith("class:"):
|
||||
for klass in EXCLUDED:
|
||||
@@ -51,7 +51,7 @@ for line in open(sys.argv[1]):
|
||||
|
||||
state.show()
|
||||
|
||||
for chapter in sorted(state.chapters, key=lambda f: int(f.split("-")[1])):
|
||||
chapter_size = sum(state.sections[s] for s in state.chapters[chapter])
|
||||
print("{}\t{}\t{}".format("total size for", chapter, chapter_size))
|
||||
for module in sorted(state.modules, key=lambda f: int(f.split("-")[1])):
|
||||
module_size = sum(state.sections[s] for s in state.modules[module])
|
||||
print("{}\t{}\t{}".format("total size for", module, module_size))
|
||||
|
||||
|
||||
118
slides/fix-redirects.sh
Executable file
@@ -0,0 +1,118 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script helps to add "force-redirects" where needed.
|
||||
# This might replace your entire git repos with Vogon poetry.
|
||||
# Use at your own peril!
|
||||
|
||||
set -eu
|
||||
|
||||
# The easiest way to set this env var is by copy-pasting from
|
||||
# the netlify web dashboard, then doctoring the output a bit.
|
||||
# Yeah, that's gross, but after spending 10 minutes with the
|
||||
# API and the CLI and OAuth, it took about 10 seconds to do it
|
||||
# with le copier-coller, so ... :)
|
||||
|
||||
SITES="
|
||||
2020-01-caen
|
||||
2020-01-zr
|
||||
2020-02-caen
|
||||
2020-02-enix
|
||||
2020-02-outreach
|
||||
2020-02-vmware
|
||||
2020-03-ardan
|
||||
2020-03-qcon
|
||||
alfun-2019-06
|
||||
boosterconf2018
|
||||
clt-2019-10
|
||||
dc17eu
|
||||
decembre2018
|
||||
devopsdaysams2018
|
||||
devopsdaysmsp2018
|
||||
gotochgo2018
|
||||
gotochgo2019
|
||||
indexconf2018
|
||||
intro-2019-01
|
||||
intro-2019-04
|
||||
intro-2019-06
|
||||
intro-2019-08
|
||||
intro-2019-09
|
||||
intro-2019-11
|
||||
intro-2019-12
|
||||
k8s2d
|
||||
kadm-2019-04
|
||||
kadm-2019-06
|
||||
kube
|
||||
kube-2019-01
|
||||
kube-2019-02
|
||||
kube-2019-03
|
||||
kube-2019-04
|
||||
kube-2019-06
|
||||
kube-2019-08
|
||||
kube-2019-09
|
||||
kube-2019-10
|
||||
kube-2019-11
|
||||
lisa-2019-10
|
||||
lisa16t1
|
||||
lisa17m7
|
||||
lisa17t9
|
||||
maersk-2019-07
|
||||
maersk-2019-08
|
||||
ndcminnesota2018
|
||||
nr-2019-08
|
||||
oscon2018
|
||||
oscon2019
|
||||
osseu17
|
||||
pycon2019
|
||||
qconsf18wkshp
|
||||
qconsf2017intro
|
||||
qconsf2017swarm
|
||||
qconsf2018
|
||||
qconuk2019
|
||||
septembre2018
|
||||
sfsf-2019-06
|
||||
srecon2018
|
||||
swarm2017
|
||||
velny-k8s101-2018
|
||||
velocity-2019-11
|
||||
velocityeu2018
|
||||
velocitysj2018
|
||||
vmware-2019-11
|
||||
weka
|
||||
wwc-2019-10
|
||||
wwrk-2019-05
|
||||
wwrk-2019-06
|
||||
"
|
||||
|
||||
for SITE in $SITES; do
|
||||
echo "##### $SITE"
|
||||
git checkout -q origin/$SITE
|
||||
# No _redirects? No problem.
|
||||
if ! [ -f _redirects ]; then
|
||||
continue
|
||||
fi
|
||||
# If there is already a force redirect on /, we're good.
|
||||
if grep '^/ .* 200!' _redirects; then
|
||||
continue
|
||||
fi
|
||||
# If there is a redirect on / ... and it's not forced ... do something.
|
||||
if grep "^/ .* 200$" _redirects; then
|
||||
echo "##### $SITE needs to be patched"
|
||||
sed -i 's,^/ \(.*\) 200$,/ \1 200!,' _redirects
|
||||
git add _redirects
|
||||
git commit -m "fix-redirects.sh: adding forced redirect"
|
||||
git push origin HEAD:$SITE
|
||||
continue
|
||||
fi
|
||||
if grep "^/ " _redirects; then
|
||||
echo "##### $SITE with / but no status code"
|
||||
echo "##### Should I add '200!' ?"
|
||||
read foo
|
||||
sed -i 's,^/ \(.*\)$,/ \1 200!,' _redirects
|
||||
git add _redirects
|
||||
git commit -m "fix-redirects.sh: adding status code and forced redirect"
|
||||
git push origin HEAD:$SITE
|
||||
continue
|
||||
fi
|
||||
echo "##### $SITE without / ?"
|
||||
cat _redirects
|
||||
done
|
||||
BIN
slides/images/hpa-v2-pa-latency.png
Normal file
|
After Width: | Height: | Size: 66 KiB |
BIN
slides/images/hpa-v2-pa-pods.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
1060
slides/images/k8s-net-0-overview.svg
Normal file
|
After Width: | Height: | Size: 99 KiB |
519
slides/images/k8s-net-1-pod-to-pod.svg
Normal file
@@ -0,0 +1,519 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
version="1.1"
|
||||
viewBox="16 32 880 495"
|
||||
fill="none"
|
||||
stroke="none"
|
||||
stroke-linecap="square"
|
||||
stroke-miterlimit="10"
|
||||
id="svg464"
|
||||
sodipodi:docname="k8s-net-1-pod-to-pod.svg"
|
||||
width="1600"
|
||||
height="900"
|
||||
inkscape:version="1.0.1 (3bc2e813f5, 2020-09-07, custom)">
|
||||
<metadata
|
||||
id="metadata470">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<defs
|
||||
id="defs468" />
|
||||
<sodipodi:namedview
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1"
|
||||
objecttolerance="10"
|
||||
gridtolerance="10"
|
||||
guidetolerance="10"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:window-width="476"
|
||||
inkscape:window-height="1032"
|
||||
id="namedview466"
|
||||
showgrid="false"
|
||||
inkscape:zoom="1.0208333"
|
||||
inkscape:cx="480"
|
||||
inkscape:cy="360"
|
||||
inkscape:window-x="480"
|
||||
inkscape:window-y="18"
|
||||
inkscape:window-maximized="0"
|
||||
inkscape:current-layer="svg464" />
|
||||
<clipPath
|
||||
id="p.0">
|
||||
<path
|
||||
d="M 0,0 H 960 V 720 H 0 Z"
|
||||
clip-rule="nonzero"
|
||||
id="path317" />
|
||||
</clipPath>
|
||||
<g
|
||||
clip-path="url(#p.0)"
|
||||
id="g462">
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="M 0,0 H 960 V 720 H 0 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path320" />
|
||||
<path
|
||||
fill="#d9d9d9"
|
||||
d="m 66.944885,154.29968 v 0 c 0,-13.08115 10.604363,-23.68552 23.685509,-23.68552 H 296.55071 c 6.28177,0 12.30628,2.49544 16.74817,6.93734 4.4419,4.44189 6.93732,10.4664 6.93732,16.74818 v 94.73921 c 0,13.08116 -10.60434,23.6855 -23.68549,23.6855 H 90.630394 c -13.081146,0 -23.685509,-10.60434 -23.685509,-23.6855 z"
|
||||
fill-rule="evenodd"
|
||||
id="path322" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="2"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 66.944885,154.29968 v 0 c 0,-13.08115 10.604363,-23.68552 23.685509,-23.68552 H 296.55071 c 6.28177,0 12.30628,2.49544 16.74817,6.93734 4.4419,4.44189 6.93732,10.4664 6.93732,16.74818 v 94.73921 c 0,13.08116 -10.60434,23.6855 -23.68549,23.6855 H 90.630394 c -13.081146,0 -23.685509,-10.60434 -23.685509,-23.6855 z"
|
||||
fill-rule="evenodd"
|
||||
id="path324" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 85.75713,154.61205 0.04687,1.23437 q 1.125,-1.40625 2.953125,-1.40625 3.125,0 3.15625,3.51563 v 6.51562 h -1.6875 v -6.51562 q -0.01563,-1.07813 -0.5,-1.57813 -0.46875,-0.51562 -1.484375,-0.51562 -0.8125,0 -1.4375,0.4375 -0.609375,0.4375 -0.96875,1.15625 v 7.01562 h -1.67187 v -9.85937 z m 8.246857,4.84375 q 0,-1.45313 0.5625,-2.60938 0.578125,-1.15625 1.59375,-1.78125 1.015625,-0.625 2.3125,-0.625 2.015623,0 3.250003,1.39063 1.25,1.39062 1.25,3.70312 v 0.125 q 0,1.4375 -0.54688,2.57813 -0.54687,1.14062 -1.57812,1.78125 -1.015628,0.64062 -2.359378,0.64062 -2,0 -3.25,-1.39062 -1.234375,-1.40625 -1.234375,-3.70313 z m 1.6875,0.20312 q 0,1.64063 0.765625,2.64063 0.765625,0.98437 2.03125,0.98437 1.296875,0 2.046878,-1 0.75,-1.01562 0.75,-2.82812 0,-1.625 -0.76563,-2.625 -0.765623,-1.01563 -2.046873,-1.01563 -1.25,0 -2.015625,1 -0.765625,0.98438 -0.765625,2.84375 z m 8.983643,-0.20312 q 0,-2.26563 1.07813,-3.64063 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17188 v -5.14063 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20312 q 0,1.67188 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39062 -2.32813,-1.39062 -1.23437,0 -1.9375,0.95312 -0.6875,0.95313 -0.6875,2.84375 z m 13.33397,5 q -2,0 -3.26563,-1.3125 -1.25,-1.32812 -1.25,-3.53125 v -0.3125 q 0,-1.46875 0.5625,-2.60937 0.5625,-1.15625 1.5625,-1.79688 1.01563,-0.65625 2.1875,-0.65625 1.92188,0 2.98438,1.26563 1.0625,1.26562 1.0625,3.625 v 0.6875 h -6.67188 q 0.0312,1.46875 0.84375,2.375 0.82813,0.89062 2.07813,0.89062 0.89062,0 1.51562,-0.35937 0.625,-0.375 1.07813,-0.96875 l 1.03125,0.79687 q -1.23438,1.90625 -3.71875,1.90625 z m -0.20313,-8.84375 q -1.01562,0 -1.71875,0.75 -0.6875,0.73438 -0.84375,2.07813 h 4.9375 v -0.125 q -0.0781,-1.28125 -0.70312,-1.98438 -0.60938,-0.71875 -1.67188,-0.71875 z"
|
||||
fill-rule="nonzero"
|
||||
id="path326" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="M 741.52496,67.91863 H 819.2572"
|
||||
fill-rule="evenodd"
|
||||
id="path328" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="M 741.52496,67.91863 H 819.2572"
|
||||
fill-rule="evenodd"
|
||||
id="path330" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="M 692.19946,70.157394 H 868.57745 V 116.01565 H 692.19946 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path332" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 747.46643,91.32864 q 0,2.078125 -0.96875,3.359375 -0.95313,1.28125 -2.57813,1.28125 -1.67187,0 -2.625,-1.0625 v 4.40625 h -1.5625 V 86.64114 h 1.42188 l 0.0781,1.015625 q 0.95313,-1.1875 2.65625,-1.1875 1.65625,0 2.60938,1.25 0.96875,1.234375 0.96875,3.453125 z m -1.5625,-0.1875 q 0,-1.546875 -0.67188,-2.4375 -0.65625,-0.90625 -1.8125,-0.90625 -1.42187,0 -2.125,1.265625 v 4.375 q 0.70313,1.25 2.15625,1.25 1.125,0 1.78125,-0.890625 0.67188,-0.890625 0.67188,-2.65625 z m 3.12799,0 q 0,-1.34375 0.53125,-2.421875 0.53125,-1.078125 1.46875,-1.65625 0.95313,-0.59375 2.15625,-0.59375 1.875,0 3.03125,1.296875 1.15625,1.296875 1.15625,3.453125 v 0.109375 q 0,1.328125 -0.51562,2.390625 -0.51563,1.0625 -1.46875,1.65625 -0.95313,0.59375 -2.1875,0.59375 -1.85938,0 -3.01563,-1.296875 -1.15625,-1.296875 -1.15625,-3.421875 z m 1.57813,0.1875 q 0,1.515625 0.70312,2.4375 0.70313,0.921875 1.89063,0.921875 1.20312,0 1.89062,-0.9375 0.70313,-0.9375 0.70313,-2.609375 0,-1.515625 -0.71875,-2.4375 -0.70313,-0.9375 -1.89063,-0.9375 -1.15625,0 -1.875,0.921875 -0.70312,0.921875 -0.70312,2.640625 z m 8.33557,-0.1875 q 0,-2.109375 1,-3.390625 1,-1.28125 2.625,-1.28125 1.60937,0 2.54687,1.109375 v -4.78125 h 1.5625 v 13 h -1.4375 l -0.0781,-0.984375 q -0.9375,1.15625 -2.60938,1.15625 -1.59375,0 -2.60937,-1.296875 -1,-1.3125 -1,-3.40625 z m 1.57812,0.1875 q 0,1.546875 0.64063,2.4375 0.64062,0.875 1.76562,0.875 1.5,0 2.1875,-1.34375 v -4.203125 q -0.70312,-1.296875 -2.15625,-1.296875 -1.15625,0 -1.79687,0.890625 -0.64063,0.890625 -0.64063,2.640625 z m 11.83496,-0.125 h -4.125 v -1.28125 h 4.125 z m 3.65546,-6.78125 v 2.21875 h 1.70312 v 1.21875 h -1.70312 v 5.671875 q 0,0.546875 0.21875,0.828125 0.23437,0.265625 0.78125,0.265625 0.26562,0 0.75,-0.09375 v 1.265625 q -0.625,0.171875 -1.20313,0.171875 -1.04687,0 -1.57812,-0.625 -0.53125,-0.640625 -0.53125,-1.8125 V 87.85989 h -1.67188 v -1.21875 h 1.67188 v -2.21875 z m 2.94427,6.71875 q 0,-1.34375 0.53125,-2.421875 0.53125,-1.078125 1.46875,-1.65625 0.95313,-0.59375 2.15625,-0.59375 1.875,0 3.03125,1.296875 1.15625,1.296875 1.15625,3.453125 v 0.109375 q 0,1.328125 -0.51562,2.390625 -0.51563,1.0625 -1.46875,1.65625 -0.95313,0.59375 -2.1875,0.59375 -1.85938,0 -3.01563,-1.296875 -1.15625,-1.296875 -1.15625,-3.421875 z m 1.57813,0.1875 q 0,1.515625 0.70312,2.4375 0.70313,0.921875 1.89063,0.921875 1.20312,0 1.89062,-0.9375 0.70313,-0.9375 0.70313,-2.609375 0,-1.515625 -0.71875,-2.4375 -0.70313,-0.9375 -1.89063,-0.9375 -1.15625,0 -1.875,0.921875 -0.70312,0.921875 -0.70312,2.640625 z m 11.9762,-0.125 h -4.125 v -1.28125 h 4.125 z m 9.26483,0.125 q 0,2.078125 -0.96875,3.359375 -0.95313,1.28125 -2.57813,1.28125 -1.67187,0 -2.625,-1.0625 v 4.40625 h -1.5625 V 86.64114 h 1.42188 l 0.0781,1.015625 q 0.95313,-1.1875 2.65625,-1.1875 1.65625,0 2.60938,1.25 0.96875,1.234375 0.96875,3.453125 z m -1.5625,-0.1875 q 0,-1.546875 -0.67188,-2.4375 -0.65625,-0.90625 -1.8125,-0.90625 -1.42187,0 -2.125,1.265625 v 4.375 q 0.70313,1.25 2.15625,1.25 1.125,0 1.78125,-0.890625 0.67188,-0.890625 0.67188,-2.65625 z m 3.12793,0 q 0,-1.34375 0.53125,-2.421875 0.53125,-1.078125 1.46875,-1.65625 0.95312,-0.59375 2.15625,-0.59375 1.875,0 3.03125,1.296875 1.15625,1.296875 1.15625,3.453125 v 0.109375 q 0,1.328125 -0.51563,2.390625 -0.51562,1.0625 -1.46875,1.65625 -0.95312,0.59375 -2.1875,0.59375 -1.85937,0 -3.01562,-1.296875 -1.15625,-1.296875 -1.15625,-3.421875 z m 1.57812,0.1875 q 0,1.515625 0.70313,2.4375 0.70312,0.921875 1.89062,0.921875 1.20313,0 1.89063,-0.9375 0.70312,-0.9375 0.70312,-2.609375 0,-1.515625 -0.71875,-2.4375 -0.70312,-0.9375 -1.89062,-0.9375 -1.15625,0 -1.875,0.921875 -0.70313,0.921875 -0.70313,2.640625 z m 8.33557,-0.1875 q 0,-2.109375 1,-3.390625 1,-1.28125 2.625,-1.28125 1.60938,0 2.54688,1.109375 v -4.78125 h 1.5625 v 13 h -1.4375 l -0.0781,-0.984375 q -0.9375,1.15625 -2.60937,1.15625 -1.59375,0 -2.60938,-1.296875 -1,-1.3125 -1,-3.40625 z m 1.57813,0.1875 q 0,1.546875 0.64062,2.4375 0.64063,0.875 1.76563,0.875 1.5,0 2.1875,-1.34375 v -4.203125 q -0.70313,-1.296875 -2.15625,-1.296875 -1.15625,0 -1.79688,0.890625 -0.64062,0.890625 -0.64062,2.640625 z"
|
||||
fill-rule="nonzero"
|
||||
id="path334" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 228.27559,151.61942 h 63.53264 l 8.34137,8.34138 v 41.70587 h -71.87401 z"
|
||||
fill-rule="evenodd"
|
||||
id="path336" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 228.27559,151.61942 h 63.53264 l 8.34137,8.34138 v 41.70587 h -71.87401 z"
|
||||
fill-rule="evenodd"
|
||||
id="path338" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 246.86934,177.89761 q 0,2.25 -1.03125,3.625 -1.01563,1.375 -2.78125,1.375 -1.79688,0 -2.82813,-1.14063 v 4.75 h -1.67187 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85938,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67188,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70312,-0.96875 -1.95312,-0.96875 -1.53125,0 -2.29688,1.35938 v 4.70312 q 0.76563,1.35938 2.32813,1.35938 1.20312,0 1.92187,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45312 0.5625,-2.60937 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54688,2.57812 -0.54687,1.14063 -1.57812,1.78125 -1.01563,0.64063 -2.35938,0.64063 -2,0 -3.25,-1.39063 -1.23437,-1.40625 -1.23437,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76562,2.64062 0.76563,0.98438 2.03125,0.98438 1.29688,0 2.04688,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76563,-2.625 -0.76562,-1.01562 -2.04687,-1.01562 -1.25,0 -2.01563,1 -0.76562,0.98437 -0.76562,2.84375 z m 8.98364,-0.20313 q 0,-2.26562 1.07813,-3.64062 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39063 -2.32813,-1.39063 -1.23437,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path340" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 180.86351,212.08398 h 63.53265 l 8.34137,8.34138 v 41.70586 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path342" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 180.86351,212.08398 h 63.53265 l 8.34137,8.34138 v 41.70586 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path344" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 199.45726,238.36217 q 0,2.25 -1.03125,3.625 -1.01563,1.375 -2.78125,1.375 -1.79688,0 -2.82813,-1.14063 v 4.75 h -1.67187 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85938,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67188,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70312,-0.96875 -1.95312,-0.96875 -1.53125,0 -2.29688,1.35938 v 4.70312 q 0.76563,1.35938 2.32813,1.35938 1.20312,0 1.92187,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45312 0.5625,-2.60937 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54688,2.57812 -0.54687,1.14063 -1.57812,1.78125 -1.01563,0.64063 -2.35938,0.64063 -2,0 -3.25,-1.39063 -1.23437,-1.40625 -1.23437,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76562,2.64062 0.76563,0.98438 2.03125,0.98438 1.29688,0 2.04688,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76563,-2.625 -0.76562,-1.01562 -2.04687,-1.01562 -1.25,0 -2.01563,1 -0.76562,0.98437 -0.76562,2.84375 z m 8.98364,-0.20313 q 0,-2.26562 1.07813,-3.64062 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39063 -2.32813,-1.39063 -1.23437,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path346" />
|
||||
<path
|
||||
fill="#d9d9d9"
|
||||
d="m 398.08398,209.82724 v 0 c 0,-13.08115 10.60437,-23.6855 23.68552,-23.6855 h 205.92032 c 6.2818,0 12.30627,2.49542 16.74817,6.93732 4.44189,4.44189 6.93731,10.4664 6.93731,16.74818 v 94.73923 c 0,13.08115 -10.60437,23.68552 -23.68548,23.68552 H 421.7695 c -13.08115,0 -23.68552,-10.60437 -23.68552,-23.68552 z"
|
||||
fill-rule="evenodd"
|
||||
id="path348" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="2"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 398.08398,209.82724 v 0 c 0,-13.08115 10.60437,-23.6855 23.68552,-23.6855 h 205.92032 c 6.2818,0 12.30627,2.49542 16.74817,6.93732 4.44189,4.44189 6.93731,10.4664 6.93731,16.74818 v 94.73923 c 0,13.08115 -10.60437,23.68552 -23.68548,23.68552 H 421.7695 c -13.08115,0 -23.68552,-10.60437 -23.68552,-23.68552 z"
|
||||
fill-rule="evenodd"
|
||||
id="path350" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 416.89624,210.1396 0.0469,1.23437 q 1.125,-1.40625 2.95313,-1.40625 3.125,0 3.15625,3.51563 v 6.51562 h -1.6875 v -6.51562 q -0.0156,-1.07813 -0.5,-1.57813 -0.46875,-0.51562 -1.48438,-0.51562 -0.8125,0 -1.4375,0.4375 -0.60937,0.4375 -0.96875,1.15625 v 7.01562 h -1.67187 v -9.85937 z m 8.24686,4.84375 q 0,-1.45313 0.5625,-2.60938 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39063 1.25,1.39062 1.25,3.70312 v 0.125 q 0,1.4375 -0.54688,2.57813 -0.54687,1.14062 -1.57812,1.78125 -1.01563,0.64062 -2.35938,0.64062 -2,0 -3.25,-1.39062 -1.23437,-1.40625 -1.23437,-3.70313 z m 1.6875,0.20312 q 0,1.64063 0.76562,2.64063 0.76563,0.98437 2.03125,0.98437 1.29688,0 2.04688,-1 0.75,-1.01562 0.75,-2.82812 0,-1.625 -0.76563,-2.625 -0.76562,-1.01563 -2.04687,-1.01563 -1.25,0 -2.01563,1 -0.76562,0.98438 -0.76562,2.84375 z m 8.98364,-0.20312 q 0,-2.26563 1.07812,-3.64063 1.07813,-1.375 2.8125,-1.375 1.73438,0 2.75,1.17188 v -5.14063 h 1.6875 v 14 h -1.54687 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70313,0 -2.79688,-1.40625 -1.07812,-1.40625 -1.07812,-3.65625 z m 1.6875,0.20312 q 0,1.67188 0.6875,2.625 0.70312,0.9375 1.92187,0.9375 1.60938,0 2.34375,-1.4375 v -4.53125 q -0.76562,-1.39062 -2.32812,-1.39062 -1.23438,0 -1.9375,0.95312 -0.6875,0.95313 -0.6875,2.84375 z m 13.33395,5 q -2,0 -3.26562,-1.3125 -1.25,-1.32812 -1.25,-3.53125 v -0.3125 q 0,-1.46875 0.5625,-2.60937 0.5625,-1.15625 1.5625,-1.79688 1.01562,-0.65625 2.1875,-0.65625 1.92187,0 2.98437,1.26563 1.0625,1.26562 1.0625,3.625 v 0.6875 h -6.67187 q 0.0312,1.46875 0.84375,2.375 0.82812,0.89062 2.07812,0.89062 0.89063,0 1.51563,-0.35937 0.625,-0.375 1.07812,-0.96875 l 1.03125,0.79687 q -1.23437,1.90625 -3.71875,1.90625 z m -0.20312,-8.84375 q -1.01563,0 -1.71875,0.75 -0.6875,0.73438 -0.84375,2.07813 h 4.9375 v -0.125 q -0.0781,-1.28125 -0.70313,-1.98438 -0.60937,-0.71875 -1.67187,-0.71875 z"
|
||||
fill-rule="nonzero"
|
||||
id="path352" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 436.5315,244.97638 h 63.53265 l 8.34137,8.34137 v 41.70587 H 436.5315 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path354" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 436.5315,244.97638 h 63.53265 l 8.34137,8.34137 v 41.70587 H 436.5315 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path356" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 455.12524,271.25458 q 0,2.25 -1.03125,3.625 -1.01562,1.375 -2.78125,1.375 -1.79687,0 -2.82812,-1.14063 v 4.75 h -1.67188 V 266.2077 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85937,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67187,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70313,-0.96875 -1.95313,-0.96875 -1.53125,0 -2.29687,1.35938 v 4.70312 q 0.76562,1.35938 2.32812,1.35938 1.20313,0 1.92188,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45312 0.5625,-2.60937 0.57813,-1.15625 1.59375,-1.78125 1.01563,-0.625 2.3125,-0.625 2.01563,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54687,2.57812 -0.54688,1.14063 -1.57813,1.78125 -1.01562,0.64063 -2.35937,0.64063 -2,0 -3.25,-1.39063 -1.23438,-1.40625 -1.23438,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76563,2.64062 0.76562,0.98438 2.03125,0.98438 1.29687,0 2.04687,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76562,-2.625 -0.76563,-1.01562 -2.04688,-1.01562 -1.25,0 -2.01562,1 -0.76563,0.98437 -0.76563,2.84375 z m 8.98365,-0.20313 q 0,-2.26562 1.07812,-3.64062 1.07813,-1.375 2.8125,-1.375 1.73438,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54687 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70313,0 -2.79688,-1.40625 -1.07812,-1.40625 -1.07812,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70312,0.9375 1.92187,0.9375 1.60938,0 2.34375,-1.4375 v -4.53125 q -0.76562,-1.39063 -2.32812,-1.39063 -1.23438,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path358" />
|
||||
<path
|
||||
fill="#d9d9d9"
|
||||
d="m 95.60105,380.54904 v 0 c 0,-13.08115 10.60436,-23.68552 23.68551,-23.68552 h 205.92032 c 6.2818,0 12.30627,2.49543 16.74817,6.93732 4.44192,4.4419 6.93735,10.4664 6.93735,16.7482 v 94.7392 c 0,13.08115 -10.60437,23.68552 -23.68552,23.68552 H 119.28656 c -13.08115,0 -23.68551,-10.60437 -23.68551,-23.68552 z"
|
||||
fill-rule="evenodd"
|
||||
id="path360" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="2"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 95.60105,380.54904 v 0 c 0,-13.08115 10.60436,-23.68552 23.68551,-23.68552 h 205.92032 c 6.2818,0 12.30627,2.49543 16.74817,6.93732 4.44192,4.4419 6.93735,10.4664 6.93735,16.7482 v 94.7392 c 0,13.08115 -10.60437,23.68552 -23.68552,23.68552 H 119.28656 c -13.08115,0 -23.68551,-10.60437 -23.68551,-23.68552 z"
|
||||
fill-rule="evenodd"
|
||||
id="path362" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 114.4133,380.8614 0.0469,1.23437 q 1.125,-1.40625 2.95312,-1.40625 3.125,0 3.15625,3.51563 v 6.51562 h -1.6875 v -6.51562 q -0.0156,-1.07813 -0.5,-1.57813 -0.46875,-0.51562 -1.48437,-0.51562 -0.8125,0 -1.4375,0.4375 -0.60938,0.4375 -0.96875,1.15625 v 7.01562 h -1.67188 v -9.85937 z m 8.24686,4.84375 q 0,-1.45313 0.5625,-2.60938 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39063 1.25,1.39062 1.25,3.70312 v 0.125 q 0,1.4375 -0.54688,2.57813 -0.54687,1.14062 -1.57812,1.78125 -1.01563,0.64062 -2.35938,0.64062 -2,0 -3.25,-1.39062 -1.23437,-1.40625 -1.23437,-3.70313 z m 1.6875,0.20312 q 0,1.64063 0.76562,2.64063 0.76563,0.98437 2.03125,0.98437 1.29688,0 2.04688,-1 0.75,-1.01562 0.75,-2.82812 0,-1.625 -0.76563,-2.625 -0.76562,-1.01563 -2.04687,-1.01563 -1.25,0 -2.01563,1 -0.76562,0.98438 -0.76562,2.84375 z m 8.98364,-0.20312 q 0,-2.26563 1.07812,-3.64063 1.07813,-1.375 2.8125,-1.375 1.73438,0 2.75,1.17188 v -5.14063 h 1.6875 v 14 h -1.54687 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70313,0 -2.79688,-1.40625 -1.07812,-1.40625 -1.07812,-3.65625 z m 1.6875,0.20312 q 0,1.67188 0.6875,2.625 0.70312,0.9375 1.92187,0.9375 1.60938,0 2.34375,-1.4375 v -4.53125 q -0.76562,-1.39062 -2.32812,-1.39062 -1.23438,0 -1.9375,0.95312 -0.6875,0.95313 -0.6875,2.84375 z m 13.33397,5 q -2,0 -3.26563,-1.3125 -1.25,-1.32812 -1.25,-3.53125 v -0.3125 q 0,-1.46875 0.5625,-2.60937 0.5625,-1.15625 1.5625,-1.79688 1.01563,-0.65625 2.1875,-0.65625 1.92188,0 2.98438,1.26563 1.0625,1.26562 1.0625,3.625 v 0.6875 h -6.67188 q 0.0312,1.46875 0.84375,2.375 0.82813,0.89062 2.07813,0.89062 0.89062,0 1.51562,-0.35937 0.625,-0.375 1.07813,-0.96875 l 1.03125,0.79687 q -1.23438,1.90625 -3.71875,1.90625 z m -0.20313,-8.84375 q -1.01562,0 -1.71875,0.75 -0.6875,0.73438 -0.84375,2.07813 h 4.9375 v -0.125 q -0.0781,-1.28125 -0.70312,-1.98438 -0.60938,-0.71875 -1.67188,-0.71875 z"
|
||||
fill-rule="nonzero"
|
||||
id="path364" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 200.92651,377.43307 h 63.53262 l 8.3414,8.34137 v 41.70587 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path366" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 200.92651,377.43307 h 63.53262 l 8.3414,8.34137 v 41.70587 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path368" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 219.52026,403.71124 q 0,2.25 -1.03125,3.625 -1.01563,1.375 -2.78125,1.375 -1.79688,0 -2.82813,-1.14063 v 4.75 h -1.67187 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85938,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67188,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70312,-0.96875 -1.95312,-0.96875 -1.53125,0 -2.29688,1.35938 v 4.70312 q 0.76563,1.35938 2.32813,1.35938 1.20312,0 1.92187,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45312 0.5625,-2.60937 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54688,2.57812 -0.54687,1.14063 -1.57812,1.78125 -1.01563,0.64063 -2.35938,0.64063 -2,0 -3.25,-1.39063 -1.23437,-1.40625 -1.23437,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76562,2.64062 0.76563,0.98438 2.03125,0.98438 1.29688,0 2.04688,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76563,-2.625 -0.76562,-1.01562 -2.04687,-1.01562 -1.25,0 -2.01563,1 -0.76562,0.98437 -0.76562,2.84375 z m 8.98364,-0.20313 q 0,-2.26562 1.07813,-3.64062 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39063 -2.32813,-1.39063 -1.23437,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path370" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 145.48819,305.3176 v 0 c 0,-108.95232 87.37144,-197.2756 195.1496,-197.2756 v 0 c 51.7569,0 101.39395,20.78433 137.99161,57.78069 36.59766,36.99635 57.15802,87.17416 57.15802,139.49492 v 0 c 0,108.9523 -87.37146,197.27557 -195.14963,197.27557 v 0 c -107.77815,0 -195.1496,-88.32327 -195.1496,-197.27557 z"
|
||||
fill-rule="evenodd"
|
||||
id="path372" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 145.48819,305.3176 v 0 c 0,-108.95232 87.37144,-197.2756 195.1496,-197.2756 v 0 c 51.7569,0 101.39395,20.78433 137.99161,57.78069 36.59766,36.99635 57.15802,87.17416 57.15802,139.49492 v 0 c 0,108.9523 -87.37146,197.27557 -195.14963,197.27557 v 0 c -107.77815,0 -195.1496,-88.32327 -195.1496,-197.27557 z"
|
||||
fill-rule="evenodd"
|
||||
id="path374" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 108.9895,417.0105 h 63.53264 l 8.34137,8.34137 v 41.70587 H 108.9895 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path376" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 108.9895,417.0105 h 63.53264 l 8.34137,8.34137 v 41.70587 H 108.9895 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path378" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 127.58325,443.2887 q 0,2.25 -1.03125,3.625 -1.01562,1.375 -2.78125,1.375 -1.79687,0 -2.82812,-1.14063 v 4.75 h -1.67188 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85937,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67187,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70313,-0.96875 -1.95313,-0.96875 -1.53125,0 -2.29687,1.35938 v 4.70312 q 0.76562,1.35938 2.32812,1.35938 1.20313,0 1.92188,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37306,0 q 0,-1.45312 0.5625,-2.60937 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54688,2.57812 -0.54687,1.14063 -1.57812,1.78125 -1.01563,0.64063 -2.35938,0.64063 -2,0 -3.25,-1.39063 -1.23437,-1.40625 -1.23437,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76562,2.64062 0.76563,0.98438 2.03125,0.98438 1.29688,0 2.04688,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76563,-2.625 -0.76562,-1.01562 -2.04687,-1.01562 -1.25,0 -2.01563,1 -0.76562,0.98437 -0.76562,2.84375 z m 8.98364,-0.20313 q 0,-2.26562 1.07813,-3.64062 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39063 -2.32813,-1.39063 -1.23437,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path380" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 87.96063,209.9895 h 63.53264 l 8.34137,8.34137 v 41.70587 H 87.96063 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path382" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 87.96063,209.9895 h 63.53264 l 8.34137,8.34137 v 41.70587 H 87.96063 Z"
|
||||
fill-rule="evenodd"
|
||||
id="path384" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 106.55438,236.26768 q 0,2.25 -1.03125,3.625 -1.01563,1.375 -2.78125,1.375 -1.79688,0 -2.828125,-1.14062 v 4.75 H 98.24188 v -13.65625 h 1.53125 l 0.07813,1.09375 q 1.03124,-1.26563 2.85937,-1.26563 1.78125,0 2.8125,1.34375 1.03125,1.32813 1.03125,3.71875 z m -1.67188,-0.20312 q 0,-1.65625 -0.71875,-2.625 -0.70312,-0.96875 -1.95312,-0.96875 -1.53125,0 -2.296875,1.35937 v 4.70313 q 0.765625,1.35937 2.328125,1.35937 1.20312,0 1.92187,-0.95312 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45313 0.5625,-2.60938 0.57812,-1.15625 1.59375,-1.78125 1.01562,-0.625 2.3125,-0.625 2.01562,0 3.25,1.39063 1.25,1.39062 1.25,3.70312 v 0.125 q 0,1.4375 -0.54688,2.57813 -0.54687,1.14062 -1.57812,1.78125 -1.01563,0.64062 -2.35938,0.64062 -2,0 -3.25,-1.39062 -1.23437,-1.40625 -1.23437,-3.70313 z m 1.6875,0.20312 q 0,1.64063 0.76562,2.64063 0.76563,0.98437 2.03125,0.98437 1.29688,0 2.04688,-1 0.75,-1.01562 0.75,-2.82812 0,-1.625 -0.76563,-2.625 -0.76562,-1.01563 -2.04687,-1.01563 -1.25,0 -2.01563,1 -0.76562,0.98438 -0.76562,2.84375 z m 8.98364,-0.20312 q 0,-2.26563 1.07813,-3.64063 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17188 v -5.14063 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20312 q 0,1.67188 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39062 -2.32813,-1.39062 -1.23437,0 -1.9375,0.95312 -0.6875,0.95313 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path386" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 204.99213,171.03674 -81.10236,38.96063"
|
||||
fill-rule="evenodd"
|
||||
id="path388" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 204.99213,171.03674 -81.10236,38.96063"
|
||||
fill-rule="evenodd"
|
||||
id="path390" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 185.52843,162.97461 31.27559,49.10237"
|
||||
fill-rule="evenodd"
|
||||
id="path392" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 185.52843,162.97461 31.27559,49.10237"
|
||||
fill-rule="evenodd"
|
||||
id="path394" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 182.18898,171.03674 46.07873,5.60631"
|
||||
fill-rule="evenodd"
|
||||
id="path396" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 182.18898,171.03674 46.07873,5.60631"
|
||||
fill-rule="evenodd"
|
||||
id="path398" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 180.86351,442.03412 44,27.2756"
|
||||
fill-rule="evenodd"
|
||||
id="path400" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 180.86351,442.03412 44,27.2756"
|
||||
fill-rule="evenodd"
|
||||
id="path402" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="M 236.86351,427.48032 208.73753,469.3386"
|
||||
fill-rule="evenodd"
|
||||
id="path404" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="M 236.86351,427.48032 208.73753,469.3386"
|
||||
fill-rule="evenodd"
|
||||
id="path406" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 228.2021,461.26248 h 24.53543"
|
||||
fill-rule="evenodd"
|
||||
id="path408" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 228.2021,461.26248 h 24.53543"
|
||||
fill-rule="evenodd"
|
||||
id="path410" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 521.6842,219.98076 35.21259,20.59843"
|
||||
fill-rule="evenodd"
|
||||
id="path412" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 521.6842,219.98076 35.21259,20.59843"
|
||||
fill-rule="evenodd"
|
||||
id="path414" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 505.5599,219.98076 -33.10236,25.00789"
|
||||
fill-rule="evenodd"
|
||||
id="path416" />
|
||||
<path
|
||||
stroke="#0000ff"
|
||||
stroke-width="4"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 505.5599,219.98076 -33.10236,25.00789"
|
||||
fill-rule="evenodd"
|
||||
id="path418" />
|
||||
<path
|
||||
fill="#0000ff"
|
||||
d="m 205.39896,461.26248 v 0 c 0,-6.29694 5.10466,-11.40158 11.40157,-11.40158 v 0 c 3.02389,0 5.92392,1.20123 8.06213,3.33945 2.13821,2.13818 3.33945,5.03823 3.33945,8.06213 v 0 c 0,6.29691 -5.10466,11.40155 -11.40158,11.40155 v 0 c -6.29691,0 -11.40157,-5.10465 -11.40157,-11.40155 z"
|
||||
fill-rule="evenodd"
|
||||
id="path420" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 208.73839,453.20035 16.12427,16.12424 m 0,-16.12424 -16.12427,16.12424"
|
||||
fill-rule="evenodd"
|
||||
id="path422" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 205.39896,461.26248 v 0 c 0,-6.29694 5.10466,-11.40158 11.40157,-11.40158 v 0 c 3.02389,0 5.92392,1.20123 8.06213,3.33945 2.13821,2.13818 3.33945,5.03823 3.33945,8.06213 v 0 c 0,6.29691 -5.10466,11.40155 -11.40158,11.40155 v 0 c -6.29691,0 -11.40157,-5.10465 -11.40157,-11.40155 z"
|
||||
fill-rule="evenodd"
|
||||
id="path424" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 208.73839,453.20035 16.12427,16.12424 m 0,-16.12424 -16.12427,16.12424"
|
||||
fill-rule="evenodd"
|
||||
id="path426" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 205.39896,461.26248 v 0 c 0,-6.29694 5.10466,-11.40158 11.40157,-11.40158 v 0 c 3.02389,0 5.92392,1.20123 8.06213,3.33945 2.13821,2.13818 3.33945,5.03823 3.33945,8.06213 v 0 c 0,6.29691 -5.10466,11.40155 -11.40158,11.40155 v 0 c -6.29691,0 -11.40157,-5.10465 -11.40157,-11.40155 z"
|
||||
fill-rule="evenodd"
|
||||
id="path428" />
|
||||
<path
|
||||
fill="#0000ff"
|
||||
d="m 182.18898,171.03674 v 0 c 0,-6.29692 5.10466,-11.40157 11.40157,-11.40157 v 0 c 3.02389,0 5.92392,1.20124 8.06213,3.33944 2.13821,2.13821 3.33945,5.03825 3.33945,8.06213 v 0 c 0,6.29692 -5.10466,11.40158 -11.40158,11.40158 v 0 c -6.29691,0 -11.40157,-5.10466 -11.40157,-11.40158 z"
|
||||
fill-rule="evenodd"
|
||||
id="path430" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 185.52843,162.97461 16.12425,16.12427 m 0,-16.12427 -16.12425,16.12427"
|
||||
fill-rule="evenodd"
|
||||
id="path432" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 182.18898,171.03674 v 0 c 0,-6.29692 5.10466,-11.40157 11.40157,-11.40157 v 0 c 3.02389,0 5.92392,1.20124 8.06213,3.33944 2.13821,2.13821 3.33945,5.03825 3.33945,8.06213 v 0 c 0,6.29692 -5.10466,11.40158 -11.40158,11.40158 v 0 c -6.29691,0 -11.40157,-5.10466 -11.40157,-11.40158 z"
|
||||
fill-rule="evenodd"
|
||||
id="path434" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 185.52843,162.97461 16.12425,16.12427 m 0,-16.12427 -16.12425,16.12427"
|
||||
fill-rule="evenodd"
|
||||
id="path436" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 182.18898,171.03674 v 0 c 0,-6.29692 5.10466,-11.40157 11.40157,-11.40157 v 0 c 3.02389,0 5.92392,1.20124 8.06213,3.33944 2.13821,2.13821 3.33945,5.03825 3.33945,8.06213 v 0 c 0,6.29692 -5.10466,11.40158 -11.40158,11.40158 v 0 c -6.29691,0 -11.40157,-5.10466 -11.40157,-11.40158 z"
|
||||
fill-rule="evenodd"
|
||||
id="path438" />
|
||||
<path
|
||||
fill="#0000ff"
|
||||
d="m 502.22046,211.91864 v 0 c 0,-6.29692 5.10468,-11.40158 11.40161,-11.40158 v 0 c 3.02387,0 5.92389,1.20123 8.06214,3.33945 2.13818,2.13821 3.33941,5.03823 3.33941,8.06213 v 0 c 0,6.29691 -5.10467,11.40157 -11.40155,11.40157 v 0 c -6.29693,0 -11.40161,-5.10466 -11.40161,-11.40157 z"
|
||||
fill-rule="evenodd"
|
||||
id="path440" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 505.5599,203.8565 16.1243,16.12425 m 0,-16.12425 -16.1243,16.12425"
|
||||
fill-rule="evenodd"
|
||||
id="path442" />
|
||||
<path
|
||||
fill="#000000"
|
||||
fill-opacity="0"
|
||||
d="m 502.22046,211.91864 v 0 c 0,-6.29692 5.10468,-11.40158 11.40161,-11.40158 v 0 c 3.02387,0 5.92389,1.20123 8.06214,3.33945 2.13818,2.13821 3.33941,5.03823 3.33941,8.06213 v 0 c 0,6.29691 -5.10467,11.40157 -11.40155,11.40157 v 0 c -6.29693,0 -11.40161,-5.10466 -11.40161,-11.40157 z"
|
||||
fill-rule="evenodd"
|
||||
id="path444" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 505.5599,203.8565 16.1243,16.12425 m 0,-16.12425 -16.1243,16.12425"
|
||||
fill-rule="evenodd"
|
||||
id="path446" />
|
||||
<path
|
||||
stroke="#ffffff"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 502.22046,211.91864 v 0 c 0,-6.29692 5.10468,-11.40158 11.40161,-11.40158 v 0 c 3.02387,0 5.92389,1.20123 8.06214,3.33945 2.13818,2.13821 3.33941,5.03823 3.33941,8.06213 v 0 c 0,6.29691 -5.10467,11.40157 -11.40155,11.40157 v 0 c -6.29693,0 -11.40161,-5.10466 -11.40161,-11.40157 z"
|
||||
fill-rule="evenodd"
|
||||
id="path448" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 520.96063,240.58267 h 63.53265 l 8.34137,8.34138 v 41.70586 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path450" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 520.96063,240.58267 h 63.53265 l 8.34137,8.34138 v 41.70586 h -71.87402 z"
|
||||
fill-rule="evenodd"
|
||||
id="path452" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 539.5544,266.86087 q 0,2.25 -1.03125,3.625 -1.01563,1.375 -2.78125,1.375 -1.79688,0 -2.82813,-1.14063 v 4.75 h -1.67187 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85938,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67188,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70312,-0.96875 -1.95312,-0.96875 -1.53125,0 -2.29688,1.35938 v 4.70312 q 0.76563,1.35938 2.32813,1.35938 1.20312,0 1.92187,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37305,0 q 0,-1.45312 0.5625,-2.60937 0.57813,-1.15625 1.59375,-1.78125 1.01563,-0.625 2.3125,-0.625 2.01563,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54687,2.57812 -0.54688,1.14063 -1.57813,1.78125 -1.01562,0.64063 -2.35937,0.64063 -2,0 -3.25,-1.39063 -1.23438,-1.40625 -1.23438,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76563,2.64062 0.76562,0.98438 2.03125,0.98438 1.29687,0 2.04687,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76562,-2.625 -0.76563,-1.01562 -2.04688,-1.01562 -1.25,0 -2.01562,1 -0.76563,0.98437 -0.76563,2.84375 z m 8.98364,-0.20313 q 0,-2.26562 1.07813,-3.64062 1.07812,-1.375 2.8125,-1.375 1.73437,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54688 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70312,0 -2.79687,-1.40625 -1.07813,-1.40625 -1.07813,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70313,0.9375 1.92188,0.9375 1.60937,0 2.34375,-1.4375 v -4.53125 q -0.76563,-1.39063 -2.32813,-1.39063 -1.23437,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path454" />
|
||||
<path
|
||||
fill="#f4cccc"
|
||||
d="m 252.73753,436.23883 h 63.53264 l 8.34137,8.3414 v 41.70587 h -71.87401 z"
|
||||
fill-rule="evenodd"
|
||||
id="path456" />
|
||||
<path
|
||||
stroke="#434343"
|
||||
stroke-width="1"
|
||||
stroke-linejoin="round"
|
||||
stroke-linecap="butt"
|
||||
d="m 252.73753,436.23883 h 63.53264 l 8.34137,8.3414 v 41.70587 h -71.87401 z"
|
||||
fill-rule="evenodd"
|
||||
id="path458" />
|
||||
<path
|
||||
fill="#000000"
|
||||
d="m 271.33127,462.51703 q 0,2.25 -1.03125,3.625 -1.01562,1.375 -2.78125,1.375 -1.79687,0 -2.82812,-1.14063 v 4.75 h -1.67188 v -13.65625 h 1.53125 l 0.0781,1.09375 q 1.03125,-1.26562 2.85937,-1.26562 1.78125,0 2.8125,1.34375 1.03125,1.32812 1.03125,3.71875 z m -1.67187,-0.20313 q 0,-1.65625 -0.71875,-2.625 -0.70313,-0.96875 -1.95313,-0.96875 -1.53125,0 -2.29687,1.35938 v 4.70312 q 0.76562,1.35938 2.32812,1.35938 1.20313,0 1.92188,-0.95313 0.71875,-0.96875 0.71875,-2.875 z m 3.37307,0 q 0,-1.45312 0.5625,-2.60937 0.57813,-1.15625 1.59375,-1.78125 1.01563,-0.625 2.3125,-0.625 2.01563,0 3.25,1.39062 1.25,1.39063 1.25,3.70313 v 0.125 q 0,1.4375 -0.54687,2.57812 -0.54688,1.14063 -1.57813,1.78125 -1.01562,0.64063 -2.35937,0.64063 -2,0 -3.25,-1.39063 -1.23438,-1.40625 -1.23438,-3.70312 z m 1.6875,0.20313 q 0,1.64062 0.76563,2.64062 0.76562,0.98438 2.03125,0.98438 1.29687,0 2.04687,-1 0.75,-1.01563 0.75,-2.82813 0,-1.625 -0.76562,-2.625 -0.76563,-1.01562 -2.04688,-1.01562 -1.25,0 -2.01562,1 -0.76563,0.98437 -0.76563,2.84375 z m 8.98365,-0.20313 q 0,-2.26562 1.07812,-3.64062 1.07813,-1.375 2.8125,-1.375 1.73438,0 2.75,1.17187 v -5.14062 h 1.6875 v 14 h -1.54687 l -0.0937,-1.0625 q -1,1.25 -2.8125,1.25 -1.70313,0 -2.79688,-1.40625 -1.07812,-1.40625 -1.07812,-3.65625 z m 1.6875,0.20313 q 0,1.67187 0.6875,2.625 0.70312,0.9375 1.92187,0.9375 1.60938,0 2.34375,-1.4375 v -4.53125 q -0.76562,-1.39063 -2.32812,-1.39063 -1.23438,0 -1.9375,0.95313 -0.6875,0.95312 -0.6875,2.84375 z"
|
||||
fill-rule="nonzero"
|
||||
id="path460" />
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 39 KiB |
587
slides/images/k8s-net-2-pod-to-svc.svg
Normal file
|
After Width: | Height: | Size: 57 KiB |
493
slides/images/k8s-net-3-netpol.svg
Normal file
|
After Width: | Height: | Size: 55 KiB |
1108
slides/images/k8s-net-4-overview.svg
Normal file
|
After Width: | Height: | Size: 110 KiB |
@@ -7,6 +7,7 @@ FLAGS=dict(
|
||||
fr=u"🇫🇷",
|
||||
uk=u"🇬🇧",
|
||||
us=u"🇺🇸",
|
||||
www=u"🌐",
|
||||
)
|
||||
|
||||
TEMPLATE="""<html>
|
||||
@@ -19,9 +20,9 @@ TEMPLATE="""<html>
|
||||
<div class="main">
|
||||
<table>
|
||||
<tr><td class="header" colspan="3">{{ title }}</td></tr>
|
||||
<tr><td class="details" colspan="3">Note: while some workshops are delivered in French, slides are always in English.</td></tr>
|
||||
<tr><td class="details" colspan="3">Note: while some workshops are delivered in other languages, slides are always in English.</td></tr>
|
||||
|
||||
<tr><td class="title" colspan="3">Free video of our latest workshop</td></tr>
|
||||
<tr><td class="title" colspan="3">Free Kubernetes intro course</td></tr>
|
||||
|
||||
<tr>
|
||||
<td>Getting Started With Kubernetes and Container Orchestration</td>
|
||||
@@ -35,11 +36,11 @@ TEMPLATE="""<html>
|
||||
<td class="details">If you're interested, we can deliver that workshop (or longer courses) to your team or organization.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="details">Contact <a href="mailto:jerome.petazzoni@gmail.com">Jérôme Petazzoni</a> to make that happen!</a></td>
|
||||
<td class="details">Contact <a href="mailto:jerome.petazzoni@gmail.com">Jérôme Petazzoni</a> to make that happen!</td>
|
||||
</tr>
|
||||
|
||||
{% if coming_soon %}
|
||||
<tr><td class="title" colspan="3">Coming soon near you</td></tr>
|
||||
<tr><td class="title" colspan="3">Coming soon</td></tr>
|
||||
|
||||
{% for item in coming_soon %}
|
||||
<tr>
|
||||
@@ -140,13 +141,26 @@ import yaml
|
||||
|
||||
items = yaml.safe_load(open("index.yaml"))
|
||||
|
||||
|
||||
def prettyparse(date):
|
||||
months = [
|
||||
"January", "February", "March", "April", "May", "June",
|
||||
"July", "August", "September", "October", "November", "December"
|
||||
]
|
||||
month = months[date.month-1]
|
||||
suffix = {
|
||||
1: "st", 2: "nd", 3: "rd",
|
||||
21: "st", 22: "nd", 23: "rd",
|
||||
31: "st"}.get(date.day, "th")
|
||||
return date.year, month, "{}{}".format(date.day, suffix)
|
||||
|
||||
|
||||
# Items with a date correspond to scheduled sessions.
|
||||
# Items without a date correspond to self-paced content.
|
||||
# The date should be specified as a string (e.g. 2018-11-26).
|
||||
# It can also be a list of two elements (e.g. [2018-11-26, 2018-11-28]).
|
||||
# The latter indicates an event spanning multiple dates.
|
||||
# The first date will be used in the generated page, but the event
|
||||
# will be considered "current" (and therefore, shown in the list of
|
||||
# The event will be considered "current" (shown in the list of
|
||||
# upcoming events) until the second date.
|
||||
|
||||
for item in items:
|
||||
@@ -156,19 +170,23 @@ for item in items:
|
||||
date_begin, date_end = date
|
||||
else:
|
||||
date_begin, date_end = date, date
|
||||
suffix = {
|
||||
1: "st", 2: "nd", 3: "rd",
|
||||
21: "st", 22: "nd", 23: "rd",
|
||||
31: "st"}.get(date_begin.day, "th")
|
||||
# %e is a non-standard extension (it displays the day, but without a
|
||||
# leading zero). If strftime fails with ValueError, try to fall back
|
||||
# on %d (which displays the day but with a leading zero when needed).
|
||||
try:
|
||||
item["prettydate"] = date_begin.strftime("%B %e{}, %Y").format(suffix)
|
||||
except ValueError:
|
||||
item["prettydate"] = date_begin.strftime("%B %d{}, %Y").format(suffix)
|
||||
y1, m1, d1 = prettyparse(date_begin)
|
||||
y2, m2, d2 = prettyparse(date_end)
|
||||
if (y1, m1, d1) == (y2, m2, d2):
|
||||
# Single day event
|
||||
pretty_date = "{} {}, {}".format(m1, d1, y1)
|
||||
elif (y1, m1) == (y2, m2):
|
||||
# Multi-day event within a single month
|
||||
pretty_date = "{} {}-{}, {}".format(m1, d1, d2, y1)
|
||||
elif y1 == y2:
|
||||
# Multi-day event spanning more than a month
|
||||
pretty_date = "{} {}-{} {}, {}".format(m1, d1, m2, d2, y1)
|
||||
else:
|
||||
# Event spanning the turn of the year (REALLY???)
|
||||
pretty_date = "{} {}, {}-{} {}, {}".format(m1, d1, y1, m2, d2, y2)
|
||||
item["begin"] = date_begin
|
||||
item["end"] = date_end
|
||||
item["prettydate"] = pretty_date
|
||||
item["flag"] = FLAGS.get(item.get("country"),"")
|
||||
|
||||
today = datetime.date.today()
|
||||
|
||||
@@ -1,3 +1,197 @@
|
||||
- date: [2021-02-08, 2021-02-10]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2021-02-15, 2021-02-18]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2021-02-22, 2021-02-23]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Packaging et CI/CD pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2021-02-24, 2021-02-26]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2021-03-01, 2021-03-02]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-10-05, 2020-10-06]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-10-07, 2020-10-09]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: 2020-10-12
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Packaging pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-10-13, 2020-10-14]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-10-19, 2020-10-20]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-09-28, 2020-10-01]
|
||||
country: www
|
||||
city: streaming
|
||||
event: Skills Matter
|
||||
speaker: jpetazzo
|
||||
title: Advanced Kubernetes Concepts
|
||||
attend: https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
|
||||
|
||||
- date: [2020-08-29, 2020-08-30]
|
||||
country: www
|
||||
city: streaming
|
||||
event: fwdays
|
||||
speaker: jpetazzo
|
||||
title: Intensive Docker Online Workshop
|
||||
attend: https://fwdays.com/en/event/intensive-docker-workshop
|
||||
slides: https://2020-08-fwdays.container.training/
|
||||
|
||||
- date: [2020-09-12, 2020-09-13]
|
||||
country: www
|
||||
city: streaming
|
||||
event: fwdays
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes Intensive Online Workshop
|
||||
attend: https://fwdays.com/en/event/kubernetes-intensive-workshop
|
||||
slides: https://2020-09-fwdays.container.training/
|
||||
|
||||
- date: [2020-07-07, 2020-07-09]
|
||||
country: www
|
||||
city: streaming
|
||||
event: Ardan Live
|
||||
speaker: jpetazzo
|
||||
title: Intensive Docker Bootcamp
|
||||
attend: https://www.eventbrite.com/e/livestream-intensive-docker-bootcamp-tickets-103258886108
|
||||
|
||||
- date: [2020-06-15, 2020-06-16]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-06-17, 2020-06-19]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: 2020-06-22
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Packaging pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-06-23, 2020-06-24]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-06-25, 2020-06-26]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
|
||||
- date: [2020-06-09, 2020-06-11]
|
||||
country: www
|
||||
city: streaming
|
||||
event: Ardan Live
|
||||
speaker: jpetazzo
|
||||
title: Intensive Kubernetes Bootcamp
|
||||
attend: https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
|
||||
|
||||
- date: [2020-05-04, 2020-05-08]
|
||||
country: www
|
||||
city: streaming
|
||||
event: Ardan Live
|
||||
speaker: jpetazzo
|
||||
title: Intensive Kubernetes - Advanced Concepts
|
||||
attend: https://www.eventbrite.com/e/livestream-intensive-kubernetes-advanced-concepts-tickets-102358725704
|
||||
|
||||
- date: [2020-03-30, 2020-04-02]
|
||||
country: www
|
||||
city: streaming
|
||||
event: Ardan Live
|
||||
speaker: jpetazzo
|
||||
title: Intensive Docker and Kubernetes
|
||||
attend: https://www.eventbrite.com/e/ardan-labs-live-worldwide-march-30-april-2-2020-tickets-100331129108#
|
||||
slides: https://2020-03-ardan.container.training/
|
||||
|
||||
- date: 2020-03-06
|
||||
country: uk
|
||||
city: London
|
||||
|
||||
@@ -14,13 +14,14 @@ slides: http://container.training/
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
#- containers/Docker_Overview.md
|
||||
@@ -43,8 +44,8 @@ chapters:
|
||||
-
|
||||
- containers/Container_Networking_Basics.md
|
||||
#- containers/Network_Drivers.md
|
||||
#- containers/Container_Network_Model.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
@@ -64,6 +65,5 @@ chapters:
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
#- containers/Orchestration_Overview.md
|
||||
-
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
|
||||
@@ -14,13 +14,14 @@ slides: http://container.training/
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
chapters:
|
||||
content:
|
||||
- shared/title.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
|
||||
@@ -14,13 +14,14 @@ slides: http://container.training/
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
- containers/Docker_Overview.md
|
||||
|
||||
@@ -129,3 +129,8 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
:EN:- Securely accessing internal services
|
||||
:FR:- Accès sécurisé aux services internes
|
||||
|
||||
549
slides/k8s/admission.md
Normal file
@@ -0,0 +1,549 @@
|
||||
# Dynamic Admission Control
|
||||
|
||||
- This is one of the many ways to extend the Kubernetes API
|
||||
|
||||
- High level summary: dynamic admission control relies on webhooks that are ...
|
||||
|
||||
- dynamic (can be added/removed on the fly)
|
||||
|
||||
- running inside our outside the cluster
|
||||
|
||||
- *validating* (yay/nay) or *mutating* (can change objects that are created/updated)
|
||||
|
||||
- selective (can be configured to apply only to some kinds, some selectors...)
|
||||
|
||||
- mandatory or optional (should it block operations when webhook is down?)
|
||||
|
||||
- Used for themselves (e.g. policy enforcement) or as part of operators
|
||||
|
||||
---
|
||||
|
||||
## Use cases
|
||||
|
||||
Some examples ...
|
||||
|
||||
- Stand-alone admission controllers
|
||||
|
||||
*validating:* policy enforcement (e.g. quotas, naming conventions ...)
|
||||
|
||||
*mutating:* inject or provide default values (e.g. pod presets)
|
||||
|
||||
- Admission controllers part of a greater system
|
||||
|
||||
*validating:* advanced typing for operators
|
||||
|
||||
*mutating:* inject sidecars for service meshes
|
||||
|
||||
---
|
||||
|
||||
## You said *dynamic?*
|
||||
|
||||
- Some admission controllers are built in the API server
|
||||
|
||||
- They are enabled/disabled through Kubernetes API server configuration
|
||||
|
||||
(e.g. `--enable-admission-plugins`/`--disable-admission-plugins` flags)
|
||||
|
||||
- Here, we're talking about *dynamic* admission controllers
|
||||
|
||||
- They can be added/remove while the API server is running
|
||||
|
||||
(without touching the configuration files or even having access to them)
|
||||
|
||||
- This is done through two kinds of cluster-scope resources:
|
||||
|
||||
ValidatingWebhookConfiguration and MutatingWebhookConfiguration
|
||||
|
||||
---
|
||||
|
||||
## You said *webhooks?*
|
||||
|
||||
- A ValidatingWebhookConfiguration or MutatingWebhookConfiguration contains:
|
||||
|
||||
- a resource filter
|
||||
<br/>
|
||||
(e.g. "all pods", "deployments in namespace xyz", "everything"...)
|
||||
|
||||
- an operations filter
|
||||
<br/>
|
||||
(e.g. CREATE, UPDATE, DELETE)
|
||||
|
||||
- the address of the webhook server
|
||||
|
||||
- Each time an operation matches the filters, it is sent to the webhook server
|
||||
|
||||
---
|
||||
|
||||
## What gets sent exactly?
|
||||
|
||||
- The API server will `POST` a JSON object to the webhook
|
||||
|
||||
- That object will be a Kubernetes API message with `kind` `AdmissionReview`
|
||||
|
||||
- It will contain a `request` field, with, notably:
|
||||
|
||||
- `request.uid` (to be used when replying)
|
||||
|
||||
- `request.object` (the object created/deleted/changed)
|
||||
|
||||
- `request.oldObject` (when an object is modified)
|
||||
|
||||
- `request.userInfo` (who was making the request to the API in the first place)
|
||||
|
||||
(See [the documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#request) for a detailed example showing more fields.)
|
||||
|
||||
---
|
||||
|
||||
## How should the webhook respond?
|
||||
|
||||
- By replying with another `AdmissionReview` in JSON
|
||||
|
||||
- It should have a `response` field, with, notably:
|
||||
|
||||
- `response.uid` (matching the `request.uid`)
|
||||
|
||||
- `response.allowed` (`true`/`false`)
|
||||
|
||||
- `response.status.message` (optional string; useful when denying requests)
|
||||
|
||||
- `response.patchType` (when a mutating webhook changes the object; e.g. `json`)
|
||||
|
||||
- `response.patch` (the patch, encoded in base64)
|
||||
|
||||
---
|
||||
|
||||
## What if the webhook *does not* respond?
|
||||
|
||||
- If "something bad" happens, the API server follows the `failurePolicy` option
|
||||
|
||||
- this is a per-webhook option (specified in the webhook configuration)
|
||||
|
||||
- it can be `Fail` (the default) or `Ignore` ("allow all, unmodified")
|
||||
|
||||
- What's "something bad"?
|
||||
|
||||
- webhook responds with something invalid
|
||||
|
||||
- webhook takes more than 10 seconds to respond
|
||||
<br/>
|
||||
(this can be changed with `timeoutSeconds` field in the webhook config)
|
||||
|
||||
- webhook is down or has invalid certificates
|
||||
<br/>
|
||||
(TLS! It's not just a good idea; for admission control, it's the law!)
|
||||
|
||||
---
|
||||
|
||||
## What did you say about TLS?
|
||||
|
||||
- The webhook configuration can indicate:
|
||||
|
||||
- either `url` of the webhook server (has to begin with `https://`)
|
||||
|
||||
- or `service.name` and `service.namespace` of a Service on the cluster
|
||||
|
||||
- In the latter case, the Service has to accept TLS connections on port 443
|
||||
|
||||
- It has to use a certificate with CN `<name>.<namespace>.svc`
|
||||
|
||||
(**and** a `subjectAltName` extension with `DNS:<name>.<namespace>.svc`)
|
||||
|
||||
- The certificate needs to be valid (signed by a CA trusted by the API server)
|
||||
|
||||
... alternatively, we can pass a `caBundle` in the webhook configuration
|
||||
|
||||
---
|
||||
|
||||
## Webhook server inside or outside
|
||||
|
||||
- "Outside" webhook server is defined with `url` option
|
||||
|
||||
- convenient for external webooks (e.g. tamper-resistent audit trail)
|
||||
|
||||
- also great for initial development (e.g. with ngrok)
|
||||
|
||||
- requires outbound connectivity (duh) and can become a SPOF
|
||||
|
||||
- "Inside" webhook server is defined with `service` option
|
||||
|
||||
- convenient when the webhook needs to be deployed and managed on the cluster
|
||||
|
||||
- also great for air gapped clusters
|
||||
|
||||
- development can be harder (but tools like [Tilt](https://tilt.dev) can help)
|
||||
|
||||
---
|
||||
|
||||
## Developing a simple admission webhook
|
||||
|
||||
- We're going to register a custom webhook!
|
||||
|
||||
- First, we'll just dump the `AdmissionRequest` object
|
||||
|
||||
(using a little Node app)
|
||||
|
||||
- Then, we'll implement a strict policy on a specific label
|
||||
|
||||
(using a little Flask app)
|
||||
|
||||
- Development will happen in local containers, plumbed with ngrok
|
||||
|
||||
- The we will deploy to the cluster 🔥
|
||||
|
||||
---
|
||||
|
||||
## Running the webhook locally
|
||||
|
||||
- We prepared a Docker Compose file to start the whole stack
|
||||
|
||||
(the Node "echo" app, the Flask app, and one ngrok tunnel for each of them)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the webhook directory:
|
||||
```bash
|
||||
cd ~/container.training/webhooks/admission
|
||||
```
|
||||
|
||||
- Start the webhook in Docker containers:
|
||||
```bash
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
*Note the URL in `ngrok-echo_1` looking like `url=https://xxxx.ngrok.io`.*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's ngrok?
|
||||
|
||||
- Ngrok provides secure tunnels to access local services
|
||||
|
||||
- Example: run `ngrok http 1234`
|
||||
|
||||
- `ngrok` will display a publicly-available URL (e.g. https://xxxxyyyyzzzz.ngrok.io)
|
||||
|
||||
- Connections to https://xxxxyyyyzzzz.ngrok.io will terminate at `localhost:1234`
|
||||
|
||||
- Basic product is free; extra features (vanity domains, end-to-end TLS...) for $$$
|
||||
|
||||
- Perfect to develop our webhook!
|
||||
|
||||
- Probably not for production, though
|
||||
|
||||
(webhook requests and responses now pass through the ngrok platform)
|
||||
|
||||
---
|
||||
|
||||
## Update the webhook configuration
|
||||
|
||||
- We have a webhook configuration in `k8s/webhook-configuration.yaml`
|
||||
|
||||
- We need to update the configuration with the correct `url`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the webhook configuration manifest:
|
||||
```bash
|
||||
vim k8s/webhook-configuration.yaml
|
||||
```
|
||||
|
||||
- **Uncomment** the `url:` line
|
||||
|
||||
- **Update** the `.ngrok.io` URL with the URL shown by Compose
|
||||
|
||||
- Save and quit
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Register the webhook configuration
|
||||
|
||||
- Just after we register the webhook, it will be called for each matching request
|
||||
|
||||
(CREATE and UPDATE on Pods in all namespaces)
|
||||
|
||||
- The `failurePolicy` is `Ignore`
|
||||
|
||||
(so if the webhook server is down, we can still create pods)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Register the webhook:
|
||||
```bash
|
||||
kubectl apply -f k8s/webhook-configuration.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
It is strongly recommended to tail the logs of the API server while doing that.
|
||||
|
||||
---
|
||||
|
||||
## Create a pod
|
||||
|
||||
- Let's create a pod and try to set a `color` label
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a pod named `chroma`:
|
||||
```bash
|
||||
kubectl run --restart=Never chroma --image=nginx
|
||||
```
|
||||
|
||||
- Add a label `color` set to `pink`:
|
||||
```bash
|
||||
kubectl label pod chroma color=pink
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see the `AdmissionReview` objects in the Compose logs.
|
||||
|
||||
Note: the webhook doesn't do anything (other than printing the request payload).
|
||||
|
||||
---
|
||||
|
||||
## Use the "real" admission webhook
|
||||
|
||||
- We have a small Flask app implementing a particular policy on pod labels:
|
||||
|
||||
- if a pod sets a label `color`, it must be `blue`, `green`, `red`
|
||||
|
||||
- once that `color` label is set, it cannot be removed or changed
|
||||
|
||||
- That Flask app was started when we did `docker-compose up` earlier
|
||||
|
||||
- It is exposed through its own ngrok tunnel
|
||||
|
||||
- We are going to use that webhook instead of the other one
|
||||
|
||||
(by changing only the `url` field in the ValidatingWebhookConfiguration)
|
||||
|
||||
---
|
||||
|
||||
## Update the webhook configuration
|
||||
|
||||
.exercise[
|
||||
|
||||
- First, check the ngrok URL of the tunnel for the Flask app:
|
||||
```bash
|
||||
docker-compose logs ngrok-flask
|
||||
```
|
||||
|
||||
- Then, edit the webhook configuration:
|
||||
```bash
|
||||
kubectl edit validatingwebhookconfiguration admission.container.training
|
||||
```
|
||||
- Find the `url:` field with the `.ngrok.io` URL and update it
|
||||
|
||||
- Save and quit; the new configuration is applied immediately
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Verify the behavior of the webhook
|
||||
|
||||
- Try to create a few pods and/or change labels on existing pods
|
||||
|
||||
- What happens if we try to make changes to the earlier pod?
|
||||
|
||||
(the one that has `label=pink`)
|
||||
|
||||
---
|
||||
|
||||
## Deploying the webhook on the cluster
|
||||
|
||||
- Let's see what's needed to self-host the webhook server!
|
||||
|
||||
- The webhook needs to be reachable through a Service on our cluster
|
||||
|
||||
- The Service needs to accept TLS connections on port 443
|
||||
|
||||
- We need a proper TLS certificate:
|
||||
|
||||
- with the right `CN` and `subjectAltName` (`<servicename>.<namespace>.svc`)
|
||||
|
||||
- signed by a trusted CA
|
||||
|
||||
- We can either use a "real" CA, or use the `caBundle` option to specify the CA cert
|
||||
|
||||
(the latter makes it easy to use self-signed certs)
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- We're going to generate a key pair and a self-signed certificate
|
||||
|
||||
- We will store them in a Secret
|
||||
|
||||
- We will run the webhook in a Deployment, exposed with a Service
|
||||
|
||||
- We will update the webhook configuration to use that Service
|
||||
|
||||
- The Service will be named `admission`, in Namespace `webhooks`
|
||||
|
||||
(keep in mind that the ValidatingWebhookConfiguration itself is at cluster scope)
|
||||
|
||||
---
|
||||
|
||||
## Let's get to work!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Make sure we're in the right directory:
|
||||
```bash
|
||||
cd ~/container.training/webhooks/admission
|
||||
```
|
||||
|
||||
- Create the namespace:
|
||||
```bash
|
||||
kubectl create namespace webhooks
|
||||
```
|
||||
|
||||
- Switch to the namespace:
|
||||
```bash
|
||||
kubectl config set-context --current --namespace=webhooks
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the webhook
|
||||
|
||||
- *Normally,* we would author an image for this
|
||||
|
||||
- Since our webhook is just *one* Python source file ...
|
||||
|
||||
... we'll store it in a ConfigMap, and install dependencies on the fly
|
||||
|
||||
.exercise[
|
||||
|
||||
- Load the webhook source in a ConfigMap:
|
||||
```bash
|
||||
kubectl create configmap admission --from-file=flask/webhook.py
|
||||
```
|
||||
|
||||
- Create the Deployment and Service:
|
||||
```bash
|
||||
kubectl apply -f k8s/webhook-server.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Generating the key pair and certificate
|
||||
|
||||
- Let's call OpenSSL to the rescue!
|
||||
|
||||
(of course, there are plenty others options; e.g. `cfssl`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate a self-signed certificate:
|
||||
```bash
|
||||
NAMESPACE=webhooks
|
||||
SERVICE=admission
|
||||
CN=$SERVICE.$NAMESPACE.svc
|
||||
openssl req -x509 -newkey rsa:4096 -nodes -keyout key.pem -out cert.pem \
|
||||
-days 30 -subj /CN=$CN -addext subjectAltName=DNS:$CN
|
||||
```
|
||||
|
||||
- Load up the key and cert in a Secret:
|
||||
```bash
|
||||
kubectl create secret tls admission --cert=cert.pem --key=key.pem
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Update the webhook configuration
|
||||
|
||||
- Let's reconfigure the webhook to use our Service instead of ngrok
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the webhook configuration manifest:
|
||||
```bash
|
||||
vim k8s/webhook-configuration.yaml
|
||||
```
|
||||
|
||||
- Comment out the `url:` line
|
||||
|
||||
- Uncomment the `service:` section
|
||||
|
||||
- Save, quit
|
||||
|
||||
- Update the webhook configuration:
|
||||
```bash
|
||||
kubectl apply -f k8s/webhook-configuration.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Add our self-signed cert to the `caBundle`
|
||||
|
||||
- The API server won't accept our self-signed certificate
|
||||
|
||||
- We need to add it to the `caBundle` field in the webhook configuration
|
||||
|
||||
- The `caBundle` will be our `cert.pem` file, encoded in base64
|
||||
|
||||
---
|
||||
|
||||
Shell to the rescue!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Load up our cert and encode it in base64:
|
||||
```bash
|
||||
CA=$(base64 -w0 < cert.pem)
|
||||
```
|
||||
|
||||
- Define a patch operation to update the `caBundle`:
|
||||
```bash
|
||||
PATCH='[{
|
||||
"op": "replace",
|
||||
"path": "/webhooks/0/clientConfig/caBundle",
|
||||
"value":"'$CA'"
|
||||
}]'
|
||||
```
|
||||
|
||||
- Patch the webhook configuration:
|
||||
```bash
|
||||
kubectl patch validatingwebhookconfiguration \
|
||||
admission.webhook.container.training \
|
||||
--type='json' -p="$PATCH"
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Try it out!
|
||||
|
||||
- Keep an eye on the API server logs
|
||||
|
||||
- Tail the logs of the pod running the webhook server
|
||||
|
||||
- Create a few pods; we should see requests in the webhook server logs
|
||||
|
||||
- Check that the label `color` is enforced correctly
|
||||
|
||||
(it should only allow values of `red`, `green`, `blue`)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Dynamic admission control with webhooks
|
||||
:FR:- Contrôle d'admission dynamique (webhooks)
|
||||
394
slides/k8s/aggregation-layer.md
Normal file
@@ -0,0 +1,394 @@
|
||||
# The Aggregation Layer
|
||||
|
||||
- The aggregation layer is a way to extend the Kubernetes API
|
||||
|
||||
- It is similar to CRDs
|
||||
|
||||
- it lets us define new resource types
|
||||
|
||||
- these resources can then be used with `kubectl` and other clients
|
||||
|
||||
- The implementation is very different
|
||||
|
||||
- CRDs are handled within the API server
|
||||
|
||||
- the aggregation layer offloads requests to another process
|
||||
|
||||
- They are designed for very different use-cases
|
||||
|
||||
---
|
||||
|
||||
## CRDs vs aggregation layer
|
||||
|
||||
- The Kubernetes API is a REST-ish API with a hierarchical structure
|
||||
|
||||
- It can be extended with Custom Resource Definifions (CRDs)
|
||||
|
||||
- Custom resources are managed by the Kubernetes API server
|
||||
|
||||
- we don't need to write code
|
||||
|
||||
- the API server does all the heavy lifting
|
||||
|
||||
- these resources are persisted in Kubernetes' "standard" database
|
||||
<br/>
|
||||
(for most installations, that's `etcd`)
|
||||
|
||||
- We can also define resources that are *not* managed by the API server
|
||||
|
||||
(the API server merely proxies the requests to another server)
|
||||
|
||||
---
|
||||
|
||||
## Which one is best?
|
||||
|
||||
- For things that "map" well to objects stored in a traditional database:
|
||||
|
||||
*probably CRDs*
|
||||
|
||||
- For things that "exist" only in Kubernetes and don't represent external resources:
|
||||
|
||||
*probably CRDs*
|
||||
|
||||
- For things that are read-only, at least from Kubernetes' perspective:
|
||||
|
||||
*probably aggregation layer*
|
||||
|
||||
- For things that can't be stored in etcd because of size or access patterns:
|
||||
|
||||
*probably aggregation layer*
|
||||
|
||||
---
|
||||
|
||||
## How are resources organized?
|
||||
|
||||
- Let's have a look at the Kubernetes API hierarchical structure
|
||||
|
||||
- We'll ask `kubectl` to show us the exacts requests that it's making
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the URI for a cluster-scope, "core" resource, e.g. a Node:
|
||||
```bash
|
||||
kubectl -v6 get node node1
|
||||
```
|
||||
|
||||
- Check the URI for a cluster-scope, "non-core" resource, e.g. a ClusterRole:
|
||||
```bash
|
||||
kubectl -v6 get clusterrole view
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Core vs non-core
|
||||
|
||||
- This is the structure of the URIs that we just checked:
|
||||
|
||||
```
|
||||
/api/v1/nodes/node1
|
||||
↑ ↑ ↑
|
||||
`version` `kind` `name`
|
||||
|
||||
/apis/rbac.authorization.k8s.io/v1/clusterroles/view
|
||||
↑ ↑ ↑ ↑
|
||||
`group` `version` `kind` `name`
|
||||
```
|
||||
|
||||
- There is no group for "core" resources
|
||||
|
||||
- Or, we could say that the group, `core`, is implied
|
||||
|
||||
---
|
||||
|
||||
## Group-Version-Kind
|
||||
|
||||
- In the API server, the Group-Version-Kind triple maps to a Go type
|
||||
|
||||
(look for all the "GVK" occurrences in the source code!)
|
||||
|
||||
- In the API server URI router, the GVK is parsed "relatively early"
|
||||
|
||||
(so that the server can know which resource we're talking about)
|
||||
|
||||
- "Well, actually ..." Things are a bit more complicated, see next slides!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Namespaced resources
|
||||
|
||||
- What about namespaced resources?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the URI for a namespaced, "core" resource, e.g. a Service:
|
||||
```bash
|
||||
kubectl -v6 get service kubernetes --namespace default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- Here are what namespaced resources URIs look like:
|
||||
|
||||
```
|
||||
/api/v1/namespaces/default/services/kubernetes
|
||||
↑ ↑ ↑ ↑
|
||||
`version` `namespace` `kind` `name`
|
||||
|
||||
/apis/apps/v1/namespaces/kube-system/daemonsets/kube-proxy
|
||||
↑ ↑ ↑ ↑ ↑
|
||||
`group` `version` `namespace` `kind` `name`
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Subresources
|
||||
|
||||
- Many resources have *subresources*, for instance:
|
||||
|
||||
- `/status` (decouples status updates from other updates)
|
||||
|
||||
- `/scale` (exposes a consistent interface for autoscalers)
|
||||
|
||||
- `/proxy` (allows access to HTTP resources)
|
||||
|
||||
- `/portforward` (used by `kubectl port-forward`)
|
||||
|
||||
- `/logs` (access pod logs)
|
||||
|
||||
- These are added at the end of the URI
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Accessing a subresource
|
||||
|
||||
.exercise[
|
||||
|
||||
- List `kube-proxy` pods:
|
||||
```bash
|
||||
kubectl get pods --namespace=kube-system --selector=k8s-app=kube-proxy
|
||||
PODNAME=$(
|
||||
kubectl get pods --namespace=kube-system --selector=k8s-app=kube-proxy \
|
||||
-o json | jq -r .items[0].metadata.name)
|
||||
```
|
||||
|
||||
- Execute a command in a pod, showing the API requests:
|
||||
```bash
|
||||
kubectl -v6 exec --namespace=kube-system $PODNAME -- echo hello world
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
The full request looks like:
|
||||
```
|
||||
POST https://.../api/v1/namespaces/kube-system/pods/kube-proxy-c7rlw/exec?
|
||||
command=echo&command=hello&command=world&container=kube-proxy&stderr=true&stdout=true
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Listing what's supported on the server
|
||||
|
||||
- There are at least three useful commands to introspect the API server
|
||||
|
||||
.exercise[
|
||||
|
||||
- List resources types, their group, kind, short names, and scope:
|
||||
```bash
|
||||
kubectl api-resources
|
||||
```
|
||||
|
||||
- List API groups + versions:
|
||||
```bash
|
||||
kubectl api-versions
|
||||
```
|
||||
|
||||
- List APIServices:
|
||||
```bash
|
||||
kubectl get apiservices
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
🤔 What's the difference between the last two?
|
||||
|
||||
---
|
||||
|
||||
## API registration
|
||||
|
||||
- `kubectl api-versions` shows all API groups, including `apiregistration.k8s.io`
|
||||
|
||||
- `kubectl get apiservices` shows the "routing table" for API requests
|
||||
|
||||
- The latter doesn't show `apiregistration.k8s.io`
|
||||
|
||||
(APIServices belong to `apiregistration.k8s.io`)
|
||||
|
||||
- Most API groups are `Local` (handled internally by the API server)
|
||||
|
||||
- If we're running the `metrics-server`, it should handle `metrics.k8s.io`
|
||||
|
||||
- This is an API group handled *outside* of the API server
|
||||
|
||||
- This is the *aggregation layer!*
|
||||
|
||||
---
|
||||
|
||||
## Finding resources
|
||||
|
||||
The following assumes that `metrics-server` is deployed on your cluster.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that the metrics.k8s.io is registered with `metrics-server`:
|
||||
```bash
|
||||
kubectl get apiservices | grep metrics.k8s.io
|
||||
```
|
||||
|
||||
- Check the resource kinds registered in the metrics.k8s.io group:
|
||||
```bash
|
||||
kubectl api-resources --api-group=metrics.k8s.io
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(If the output of either command is empty, install `metrics-server` first.)
|
||||
|
||||
---
|
||||
|
||||
## `nodes` vs `nodes`
|
||||
|
||||
- We can have multiple resources with the same name
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look for resources named `node`:
|
||||
```bash
|
||||
kubectl api-resources | grep -w nodes
|
||||
```
|
||||
|
||||
- Compare the output of both commands:
|
||||
```bash
|
||||
kubectl get nodes
|
||||
kubectl get nodes.metrics.k8s.io
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
🤔 What are the second kind of nodes? How can we see what's really in them?
|
||||
|
||||
---
|
||||
|
||||
## Node vs NodeMetrics
|
||||
|
||||
- `nodes.metrics.k8s.io` (aka NodeMetrics) don't have fancy *printer columns*
|
||||
|
||||
- But we can look at the raw data (with `-o json` or `-o yaml`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at NodeMetrics objects with one of these commands:
|
||||
```bash
|
||||
kubectl get -o yaml nodes.metrics.k8s.io
|
||||
kubectl get -o yaml NodeMetrics
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
💡 Alright, these are the live metrics (CPU, RAM) for our nodes.
|
||||
|
||||
---
|
||||
|
||||
## An easier way to consume metrics
|
||||
|
||||
- We might have seen these metrics before ... With an easier command!
|
||||
|
||||
--
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display node metrics:
|
||||
```bash
|
||||
kubectl top nodes
|
||||
```
|
||||
|
||||
- Check which API requests happen behind the scenes:
|
||||
```bash
|
||||
kubectl top nodes -v6
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Aggregation layer in practice
|
||||
|
||||
- We can write an API server to handle a subset of the Kubernetes API
|
||||
|
||||
- Then we can register that server by creating an APIService resource
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the definition used for the `metrics-server`:
|
||||
```bash
|
||||
kubectl describe apiservices v1beta1.metrics.k8s.io
|
||||
```
|
||||
]
|
||||
|
||||
- Group priority is used when multiple API groups provide similar kinds
|
||||
|
||||
(e.g. `nodes` and `nodes.metrics.k8s.io` as seen earlier)
|
||||
|
||||
---
|
||||
|
||||
## Authentication flow
|
||||
|
||||
- We have two Kubernetes API servers:
|
||||
|
||||
- "aggregator" (the main one; clients connect to it)
|
||||
|
||||
- "aggregated" (the one providing the extra API; aggregator connects to it)
|
||||
|
||||
- Aggregator deals with client authentication
|
||||
|
||||
- Aggregator authenticates with aggregated using mutual TLS
|
||||
|
||||
- Aggregator passes (/forwards/proxies/...) requests to aggregated
|
||||
|
||||
- Aggregated performs authorization by calling back aggregator
|
||||
|
||||
("can subject X perform action Y on resource Z?")
|
||||
|
||||
[This doc page](https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#authentication-flow) has very nice swim lanes showing that flow.
|
||||
|
||||
---
|
||||
|
||||
## Discussion
|
||||
|
||||
- Aggregation layer is great for metrics
|
||||
|
||||
(fast-changing, ephemeral data, that would be outrageously bad for etcd)
|
||||
|
||||
- It *could* be a good fit to expose other REST APIs as a pass-thru
|
||||
|
||||
(but it's more common to see CRDs instead)
|
||||
|
||||
???
|
||||
|
||||
:EN:- The aggregation layer
|
||||
:FR:- Étendre l'API avec le *aggregation layer*
|
||||
@@ -87,3 +87,8 @@
|
||||
- Tunnels are also fine
|
||||
|
||||
(e.g. [k3s](https://k3s.io/) uses a tunnel to allow each node to contact the API server)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Ensuring API server availability
|
||||
:FR:- Assurer la disponibilité du serveur API
|
||||
|
||||
179
slides/k8s/apiserver-deepdive.md
Normal file
@@ -0,0 +1,179 @@
|
||||
# API server internals
|
||||
|
||||
- Understanding the internals of the API server is useful.red[¹]:
|
||||
|
||||
- when extending the Kubernetes API server (CRDs, webhooks...)
|
||||
|
||||
- when running Kubernetes at scale
|
||||
|
||||
- Let's dive into a bit of code!
|
||||
|
||||
.footnote[.red[¹]And by *useful*, we mean *strongly recommended or else...*]
|
||||
|
||||
---
|
||||
|
||||
## The main handler
|
||||
|
||||
- The API server parses its configuration, and builds a `GenericAPIServer`
|
||||
|
||||
- ... which contains an `APIServerHandler` ([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/server/handler.go#L37
|
||||
))
|
||||
|
||||
- ... which contains a couple of `http.Handler` fields
|
||||
|
||||
- Requests go through:
|
||||
|
||||
- `FullhandlerChain` (a series of HTTP filters, see next slide)
|
||||
|
||||
- `Director` (switches the request to `GoRestfulContainer` or `NonGoRestfulMux`)
|
||||
|
||||
- `GoRestfulContainer` is for "normal" APIs; integrates nicely with OpenAPI
|
||||
|
||||
- `NonGoRestfulMux` is for everything else (e.g. proxy, delegation)
|
||||
|
||||
---
|
||||
|
||||
## The chain of handlers
|
||||
|
||||
- API requests go through a complex chain of filters ([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/server/config.go#L671))
|
||||
|
||||
(note when reading that code: requests start at the bottom and go up)
|
||||
|
||||
- This is where authentication, authorization, and admission happen
|
||||
|
||||
(as well as a few other things!)
|
||||
|
||||
- Let's review an arbitrary selection of some of these handlers!
|
||||
|
||||
*In the following slides, the handlers are in chronological order.*
|
||||
|
||||
*Note: handlers are nested; so they can act at the beginning and end of a request.*
|
||||
|
||||
---
|
||||
|
||||
## `WithPanicRecovery`
|
||||
|
||||
- Reminder about Go: there is no exception handling in Go; instead:
|
||||
|
||||
- functions typically return a composite `(SomeType, error)` type
|
||||
|
||||
- when things go really bad, the code can call `panic()`
|
||||
|
||||
- `panic()` can be caught with `recover()`
|
||||
<br/>
|
||||
(but this is almost never used like an exception handler!)
|
||||
|
||||
- The API server code is not supposed to `panic()`
|
||||
|
||||
- But just in case, we have that handler to prevent (some) crashes
|
||||
|
||||
---
|
||||
|
||||
## `WithRequestInfo` ([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/request/requestinfo.go#L163))
|
||||
|
||||
|
||||
- Parse out essential information:
|
||||
|
||||
API group, version, namespace, resource, subresource, verb ...
|
||||
|
||||
- WithRequestInfo: parse out API group+version, Namespace, resource, subresource ...
|
||||
|
||||
- Maps HTTP verbs (GET, PUT, ...) to Kubernetes verbs (list, get, watch, ...)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## HTTP verb mapping
|
||||
|
||||
- POST → create
|
||||
|
||||
- PUT → update
|
||||
|
||||
- PATCH → patch
|
||||
|
||||
- DELETE
|
||||
<br/> → delete (if a resource name is specified)
|
||||
<br/> → deletecollection (otherwise)
|
||||
|
||||
- GET, HEAD
|
||||
<br/> → get (if a resource name is specified)
|
||||
<br/> → list (otherwise)
|
||||
<br/> → watch (if the `?watch=true` option is specified)
|
||||
|
||||
---
|
||||
|
||||
## `WithWaitGroup`,
|
||||
|
||||
- When we shutdown, tells clients (with in-flight requests) to retry
|
||||
|
||||
- only for "short" requests
|
||||
|
||||
- for long running requests, the client needs to do more
|
||||
|
||||
- Long running requests include `watch` verb, `proxy` sub-resource
|
||||
|
||||
(See also `WithTimeoutForNonLongRunningRequests`)
|
||||
|
||||
---
|
||||
|
||||
## AuthN and AuthZ
|
||||
|
||||
- `WithAuthentication`:
|
||||
the request goes through a *chain* of authenticators
|
||||
([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/filters/authentication.go#L38))
|
||||
|
||||
- WithAudit
|
||||
|
||||
- WithImpersonation: used for e.g. `kubectl ... --as another.user`
|
||||
|
||||
- WithPriorityAndFairness or WithMaxInFlightLimit
|
||||
|
||||
(`system:masters` can bypass these)
|
||||
|
||||
- WithAuthorization
|
||||
|
||||
---
|
||||
|
||||
## After all these handlers ...
|
||||
|
||||
- We get to the "director" mentioned above
|
||||
|
||||
- Api Groups get installed into the "gorestfulhandler"
|
||||
([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/server/genericapiserver.go#L423))
|
||||
|
||||
- REST-ish resources are managed by various handlers
|
||||
(in [this directory](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/))
|
||||
|
||||
- These files show us the code path for each type of request
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Request code path
|
||||
|
||||
- [create.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/create.go):
|
||||
decode to HubGroupVersion; admission; mutating admission; store
|
||||
|
||||
- [delete.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/delete.go):
|
||||
validating admission only; deletion
|
||||
|
||||
- [get.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/get.go) (get, list):
|
||||
directly fetch from rest storage abstraction
|
||||
|
||||
- [patch.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/patch.go):
|
||||
admission; mutating admission; patch
|
||||
|
||||
- [update.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/update.go):
|
||||
decode to HubGroupVersion; admission; mutating admission; store
|
||||
|
||||
- [watch.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/watch.go):
|
||||
similar to get.go, but with watch logic
|
||||
|
||||
(HubGroupVersion = in-memory, "canonical" version.)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Kubernetes API server internals
|
||||
:FR:- Fonctionnement interne du serveur API
|
||||
@@ -273,6 +273,26 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Group-Version-Kind, or GVK
|
||||
|
||||
- A particular type will be identified by the combination of:
|
||||
|
||||
- the API group it belongs to (core, `apps`, `metrics.k8s.io`, ...)
|
||||
|
||||
- the version of this API group (`v1`, `v1beta1`, ...)
|
||||
|
||||
- the "Kind" itself (Pod, Role, Job, ...)
|
||||
|
||||
- "GVK" appears a lot in the API machinery code
|
||||
|
||||
- Conversions are possible between different versions and even between API groups
|
||||
|
||||
(e.g. when Deployments moved from `extensions` to `apps`)
|
||||
|
||||
---
|
||||
|
||||
## Update
|
||||
|
||||
- Let's update our namespace object
|
||||
@@ -334,6 +354,34 @@ We demonstrated *update* and *watch* semantics.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Watch events
|
||||
|
||||
- `kubectl get --watch` shows changes
|
||||
|
||||
- If we add `--output-watch-events`, we can also see:
|
||||
|
||||
- the difference between ADDED and MODIFIED resources
|
||||
|
||||
- DELETED resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- In one terminal, watch pods, displaying full events:
|
||||
```bash
|
||||
kubectl get pods --watch --output-watch-events
|
||||
```
|
||||
|
||||
- In another, run a short-lived pod:
|
||||
```bash
|
||||
kubectl run pause --image=alpine --rm -ti --restart=Never -- sleep 5
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
# Other control plane components
|
||||
|
||||
- API server ✔️
|
||||
@@ -381,3 +429,8 @@ We demonstrated *update* and *watch* semantics.
|
||||
- if the pod has special constraints that can't be met
|
||||
|
||||
- if the scheduler is not running (!)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Kubernetes architecture review
|
||||
:FR:- Passage en revue de l'architecture de Kubernetes
|
||||
|
||||