Compare commits

..

1 Commits

Author SHA1 Message Date
Jerome Petazzoni
8dacf00bf3 Test gitpod 2020-04-03 10:56:39 -05:00
199 changed files with 2762 additions and 12413 deletions

1
.gitpod.yml Normal file
View File

@@ -0,0 +1 @@
image: jpetazzo/shpod

View File

@@ -9,21 +9,21 @@ services:
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
image: k8s.gcr.io/etcd:3.4.3
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
"Edit the CLUSTER placeholder first. Then, remove this line.":
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-scheduler --master http://localhost:8080

View File

@@ -9,20 +9,20 @@ services:
etcd:
network_mode: "service:pause"
image: k8s.gcr.io/etcd:3.4.9
image: k8s.gcr.io/etcd:3.4.3
command: etcd
kube-apiserver:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
kube-controller-manager:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-controller-manager --master http://localhost:8080
kube-scheduler:
network_mode: "service:pause"
image: k8s.gcr.io/hyperkube:v1.18.8
image: k8s.gcr.io/hyperkube:v1.17.2
command: kube-scheduler --master http://localhost:8080

View File

@@ -1,33 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: certbot
spec:
ports:
- port: 80
protocol: TCP
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: certbot
spec:
rules:
- http:
paths:
- path: /.well-known/acme-challenge/
backend:
serviceName: certbot
servicePort: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: certbot
subsets:
- addresses:
- ip: A.B.C.D
ports:
- port: 8000
protocol: TCP

View File

@@ -1,11 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: xyz.A.B.C.D.nip.io
spec:
secretName: xyz.A.B.C.D.nip.io
dnsNames:
- xyz.A.B.C.D.nip.io
issuerRef:
name: letsencrypt-staging
kind: ClusterIssuer

View File

@@ -1,18 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# Remember to update this if you use this manifest to obtain real certificates :)
email: hello@example.com
server: https://acme-staging-v02.api.letsencrypt.org/directory
# To use the production environment, use the following line instead:
#server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: issuer-letsencrypt-staging
solvers:
- http01:
ingress:
class: traefik

View File

@@ -4,10 +4,6 @@ metadata:
name: coffees.container.training
spec:
group: container.training
versions:
- name: v1alpha1
served: true
storage: true
scope: Namespaced
names:
plural: coffees
@@ -15,4 +11,25 @@ spec:
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
properties:
spec:
required:
- taste
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date

View File

@@ -1,37 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: coffees.container.training
spec:
group: container.training
scope: Namespaced
names:
plural: coffees
singular: coffee
kind: Coffee
shortNames:
- cof
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
required: [ spec ]
properties:
spec:
type: object
properties:
taste:
description: Subjective taste of that kind of coffee bean
type: string
required: [ taste ]
additionalPrinterColumns:
- jsonPath: .spec.taste
description: Subjective taste of that kind of coffee bean
name: Taste
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date

View File

@@ -9,9 +9,9 @@ spec:
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: excelsa
name: robusta
spec:
taste: fruity
taste: stronger
---
kind: Coffee
apiVersion: container.training/v1alpha1
@@ -23,12 +23,7 @@ spec:
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: robusta
name: excelsa
spec:
taste: stronger
bitterness: high
---
kind: Coffee
apiVersion: container.training/v1alpha1
metadata:
name: java
taste: fruity

View File

@@ -1,77 +0,0 @@
# Basic Consul cluster using Cloud Auto-Join.
# Caveats:
# - no actual persistence
# - scaling down to 1 will break the cluster
# - pods may be colocated
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
containers:
- name: consul
image: "consul:1.8"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"

View File

@@ -1,104 +0,0 @@
# Even better Consul cluster.
# That one uses a volumeClaimTemplate to achieve true persistence.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
subjects:
- kind: ServiceAccount
name: consul
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
spec:
serviceName: consul
replicas: 3
selector:
matchLabels:
app: consul
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
template:
metadata:
labels:
app: consul
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- persistentconsul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.8"
volumeMounts:
- name: data
mountPath: /consul/data
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"
- "-ui"
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave

View File

@@ -1,9 +1,5 @@
# Better Consul cluster.
# There is still no actual persistence, but:
# - podAntiaffinity prevents pod colocation
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
kind: ClusterRole
metadata:
name: consul
rules:
@@ -15,16 +11,17 @@ rules:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
kind: ClusterRoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
kind: ClusterRole
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: default
---
apiVersion: v1
kind: ServiceAccount
@@ -71,16 +68,11 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.8"
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: "consul:1.6"
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
- "-retry-join=provider=k8s label_selector=\"app=consul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"

View File

@@ -1,305 +0,0 @@
# This is a copy of the following file:
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

View File

@@ -1,336 +0,0 @@
# This file is based on the following manifest:
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
# It adds a ServiceAccount that has cluster-admin privileges on the cluster,
# and exposes the dashboard on a NodePort. It makes it easier to do quick demos
# of the Kubernetes dashboard, without compromising the security too much.
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cluster-admin
namespace: kubernetes-dashboard

View File

@@ -1,30 +0,0 @@
kind: Event
apiVersion: v1
metadata:
generateName: hello-
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
kind: Node
apiVersion: v1
name: kind-control-plane
# Note: the uid should be the Node name (not the uid of the Node).
# This might be specific to global objects.
uid: kind-control-plane
type: Warning
reason: NodeOverheat
message: "Node temperature exceeds critical threshold"
action: Hello
source:
component: thermal-probe
#host: node1
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -1,36 +0,0 @@
kind: Event
apiVersion: v1
metadata:
# One convention is to use <objectname>.<timestamp>,
# where the timestamp is taken with a nanosecond
# precision and expressed in hexadecimal.
# Example: web-5dcb957ccc-fjvzc.164689730a36ec3d
name: hello.1234567890
# The label doesn't serve any purpose, except making
# it easier to identify or delete that specific event.
labels:
container.training/test: ""
#eventTime: "2020-07-04T00:00:00.000000Z"
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
#count: 42
involvedObject:
### These 5 lines should be updated to refer to an object.
### Make sure to put the correct "uid", because it is what
### "kubectl describe" is using to gather relevant events.
#apiVersion: v1
#kind: Pod
#name: magic-bean
#namespace: blue
#uid: 7f28fda8-6ef4-4580-8d87-b55721fcfc30
type: Normal
reason: BackupSuccessful
message: "Object successfully dumped to gitops repository"
source:
component: gitops-sync
#reportingComponent: ""
#reportingInstance: ""

View File

@@ -52,7 +52,7 @@ data:
- add_kubernetes_metadata:
in_cluster: true
---
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: filebeat
@@ -60,9 +60,6 @@ metadata:
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:

View File

@@ -27,7 +27,7 @@ spec:
command:
- sh
- -c
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,29 +0,0 @@
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2beta2
metadata:
name: rng
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: rng
minReplicas: 1
maxReplicas: 20
behavior:
scaleUp:
stabilizationWindowSeconds: 60
scaleDown:
stabilizationWindowSeconds: 180
metrics:
- type: Object
object:
describedObject:
apiVersion: v1
kind: Service
name: httplat
metric:
name: httplat_latency_seconds
target:
type: Value
value: 0.1

View File

@@ -3,10 +3,6 @@ kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.A.B.C.D.nip.io
http:

View File

@@ -1,10 +1,3 @@
# This file is based on the following manifest:
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
# It adds the "skip login" flag, as well as an insecure hack to defeat SSL.
# As its name implies, it is INSECURE and you should not use it in production,
# or on clusters that contain any kind of important or sensitive data, or on
# clusters that have a life span of more than a few hours.
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -194,7 +187,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0
image: kubernetesui/dashboard:v2.0.0-rc2
imagePullPolicy: Always
ports:
- containerPort: 8443
@@ -233,7 +226,7 @@ spec:
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
@@ -279,7 +272,7 @@ spec:
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
image: kubernetesui/metrics-scraper:v1.0.2
ports:
- containerPort: 8000
protocol: TCP
@@ -300,7 +293,7 @@ spec:
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master

View File

@@ -0,0 +1,162 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard

View File

@@ -1,63 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: setup-namespace
spec:
rules:
- name: setup-limitrange
match:
resources:
kinds:
- Namespace
generate:
kind: LimitRange
name: default-limitrange
namespace: "{{request.object.metadata.name}}"
data:
spec:
limits:
- type: Container
min:
cpu: 0.1
memory: 0.1
max:
cpu: 2
memory: 2Gi
default:
cpu: 0.25
memory: 500Mi
defaultRequest:
cpu: 0.25
memory: 250Mi
- name: setup-resourcequota
match:
resources:
kinds:
- Namespace
generate:
kind: ResourceQuota
name: default-resourcequota
namespace: "{{request.object.metadata.name}}"
data:
spec:
hard:
requests.cpu: "10"
requests.memory: 10Gi
limits.cpu: "20"
limits.memory: 20Gi
- name: setup-networkpolicy
match:
resources:
kinds:
- Namespace
generate:
kind: NetworkPolicy
name: default-networkpolicy
namespace: "{{request.object.metadata.name}}"
data:
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}

View File

@@ -1,22 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-1
spec:
validationFailureAction: enforce
rules:
- name: ensure-pod-color-is-valid
match:
resources:
kinds:
- Pod
selector:
matchExpressions:
- key: color
operator: Exists
- key: color
operator: NotIn
values: [ red, green, blue ]
validate:
message: "If it exists, the label color must be red, green, or blue."
deny: {}

View File

@@ -1,21 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-2
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
match:
resources:
kinds:
- Pod
validate:
message: "Once label color has been added, it cannot be changed."
deny:
conditions:
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEqual
value: "{{ request.object.metadata.labels.color }}"

View File

@@ -1,25 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: pod-color-policy-3
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-removal
match:
resources:
kinds:
- Pod
selector:
matchExpressions:
- key: color
operator: DoesNotExist
validate:
message: "Once label color has been added, it cannot be removed."
deny:
conditions:
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotIn
value: []

View File

@@ -14,7 +14,7 @@ spec:
initContainers:
- name: git
image: alpine
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /www" ]
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
volumeMounts:
- name: www
mountPath: /www/

File diff suppressed because it is too large Load Diff

View File

@@ -22,10 +22,7 @@ spec:
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
containers:
- name: postgres
image: postgres:12
env:
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
image: postgres:11
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres

View File

@@ -1,12 +1,12 @@
---
apiVersion: policy/v1beta1
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
annotations:
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
name: restricted
spec:
allowPrivilegeEscalation: false

View File

@@ -1,17 +1,28 @@
apiVersion: apps/v1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
creationTimestamp: null
generation: 1
labels:
app: socat
name: socat
namespace: kube-system
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
spec:
replicas: 1
selector:
matchLabels:
app: socat
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: socat
spec:
@@ -23,19 +34,34 @@ spec:
image: alpine
imagePullPolicy: Always
name: socat
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: socat
name: socat
namespace: kube-system
selfLink: /api/v1/namespaces/kube-system/services/socat
spec:
externalTrafficPolicy: Cluster
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: socat
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}

View File

@@ -1,17 +0,0 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: whatever
spec:
#tls:
#- secretName: whatever.A.B.C.D.nip.io
# hosts:
# - whatever.A.B.C.D.nip.io
rules:
- host: whatever.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 1234

View File

@@ -1,103 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -1,122 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --accesslog
- --api
- --api.insecure
- --log.level=INFO
- --metrics.prometheus
- --providers.kubernetesingress
- --entrypoints.http.Address=:80
- --entrypoints.https.Address=:443
- --entrypoints.https.http.tls.certResolver=default
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
prometheus.io/path: "/metrics"
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -1 +0,0 @@
traefik-v2.yaml

103
k8s/traefik.yaml Normal file
View File

@@ -0,0 +1,103 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -8,24 +8,24 @@ metadata:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: user=jean.doe
name: users:jean.doe
rules:
- apiGroups: [ certificates.k8s.io ]
resources: [ certificatesigningrequests ]
verbs: [ create ]
- apiGroups: [ certificates.k8s.io ]
resourceNames: [ user=jean.doe ]
resourceNames: [ users:jean.doe ]
resources: [ certificatesigningrequests ]
verbs: [ get, create, delete, watch ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: user=jean.doe
name: users:jean.doe
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: user=jean.doe
name: users:jean.doe
subjects:
- kind: ServiceAccount
name: jean.doe

View File

@@ -1,5 +1,4 @@
INFRACLASS=openstack-tf
INFRACLASS=openstack
# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix")
# and customize the variables below.
export TF_VAR_user="jpetazzo"
@@ -7,4 +6,4 @@ export TF_VAR_tenant="training"
export TF_VAR_domain="Default"
export TF_VAR_password="..."
export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3"
export TF_VAR_flavor="GP1.S"
export TF_VAR_flavor="GP1.S"

View File

@@ -1,24 +0,0 @@
INFRACLASS=openstack-cli
# Copy that file to e.g. openstack or ovh, then customize it.
# Some Openstack providers (like OVHcloud) will let you download
# a file containing credentials. That's what you need to use.
# The file below contains some example values.
export OS_AUTH_URL=https://auth.cloud.ovh.net/v3/
export OS_IDENTITY_API_VERSION=3
export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME:-"Default"}
export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME:-"Default"}
export OS_TENANT_ID=abcd1234
export OS_TENANT_NAME="0123456"
export OS_USERNAME="user-xyz123"
export OS_PASSWORD=AbCd1234
export OS_REGION_NAME="GRA7"
# And then some values to indicate server type, image, etc.
# You can see available flavors with `openstack flavor list`
export OS_FLAVOR=s1-4
# You can see available images with `openstack image list`
export OS_IMAGE=896c5f54-51dc-44f0-8c22-ce99ba7164df
# You can create a key with `openstack keypair create --public-key ~/.ssh/id_rsa.pub containertraining`
export OS_KEY=containertraining

View File

@@ -1,5 +0,0 @@
INFRACLASS=hetzner
if ! [ -f ~/.config/hcloud/cli.toml ]; then
warn "~/.config/hcloud/cli.toml not found."
warn "Make sure that the Hetzner CLI (hcloud) is installed and configured."
fi

View File

@@ -1 +0,0 @@
INFRACLASS=scaleway

View File

@@ -43,16 +43,6 @@ _cmd_cards() {
info "$0 www"
}
_cmd clean "Remove information about stopped clusters"
_cmd_clean() {
for TAG in tags/*; do
if grep -q ^stopped$ "$TAG/status"; then
info "Removing $TAG..."
rm -rf "$TAG"
fi
done
}
_cmd deploy "Install Docker on a bunch of running VMs"
_cmd_deploy() {
TAG=$1
@@ -75,27 +65,6 @@ _cmd_deploy() {
sleep 1
done"
# Special case for scaleway since it doesn't come with sudo
if [ "$INFRACLASS" = "scaleway" ]; then
pssh -l root "
grep DEBIAN_FRONTEND /etc/environment || echo DEBIAN_FRONTEND=noninteractive >> /etc/environment
grep cloud-init /etc/sudoers && rm /etc/sudoers
apt-get update && apt-get install sudo -y"
fi
# FIXME
# Special case for hetzner since it doesn't have an ubuntu user
#if [ "$INFRACLASS" = "hetzner" ]; then
# pssh -l root "
#[ -d /home/ubuntu ] ||
# useradd ubuntu -m -s /bin/bash
#echo 'ubuntu ALL=(ALL:ALL) NOPASSWD:ALL' > /etc/sudoers.d/ubuntu
#[ -d /home/ubuntu/.ssh ] ||
# install --owner=ubuntu --mode=700 --directory /home/ubuntu/.ssh
#[ -f /home/ubuntu/.ssh/authorized_keys ] ||
# install --owner=ubuntu --mode=600 /root/.ssh/authorized_keys --target-directory /home/ubuntu/.ssh"
#fi
# Copy settings and install Python YAML parser
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
pssh "
@@ -162,19 +131,19 @@ _cmd_kubebins() {
cd /usr/local/bin
if ! [ -x etcd ]; then
##VERSION##
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz \
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.3/etcd-v3.4.3-linux-amd64.tar.gz \
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
fi
if ! [ -x hyperkube ]; then
##VERSION##
curl -L https://dl.k8s.io/v1.18.10/kubernetes-server-linux-amd64.tar.gz \
curl -L https://dl.k8s.io/v1.17.2/kubernetes-server-linux-amd64.tar.gz \
| sudo tar --strip-components=3 -zx \
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
fi
sudo mkdir -p /opt/cni/bin
cd /opt/cni/bin
if ! [ -x bridge ]; then
curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz \
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.6/cni-plugins-amd64-v0.7.6.tgz \
| sudo tar -zx
fi
"
@@ -204,15 +173,13 @@ _cmd_kube() {
pssh --timeout 200 "
sudo apt-get update -q &&
sudo apt-get install -qy kubelet$EXTRA_APTGET kubeadm$EXTRA_APTGET kubectl$EXTRA_APTGET &&
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
# Initialize kube master
pssh --timeout 200 "
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
kubeadm token generate > /tmp/token &&
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4) --ignore-preflight-errors=NumCPU
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4)
fi"
# Put kubeconfig in ubuntu's and docker's accounts
@@ -245,23 +212,17 @@ _cmd_kube() {
if i_am_first_node; then
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
fi"
}
_cmd kubetools "Install a bunch of CLI tools for Kubernetes"
_cmd_kubetools() {
TAG=$1
need_tag
# Install kubectx and kubens
pssh "
[ -d kubectx ] || git clone https://github.com/ahmetb/kubectx &&
sudo ln -sf \$HOME/kubectx/kubectx /usr/local/bin/kctx &&
sudo ln -sf \$HOME/kubectx/kubens /usr/local/bin/kns &&
sudo cp \$HOME/kubectx/completion/*.bash /etc/bash_completion.d &&
sudo ln -sf /home/ubuntu/kubectx/kubectx /usr/local/bin/kctx &&
sudo ln -sf /home/ubuntu/kubectx/kubens /usr/local/bin/kns &&
sudo cp /home/ubuntu/kubectx/completion/*.bash /etc/bash_completion.d &&
[ -d kube-ps1 ] || git clone https://github.com/jonmosco/kube-ps1 &&
sudo -u docker sed -i s/docker-prompt/kube_ps1/ /home/docker/.bashrc &&
sudo -u docker tee -a /home/docker/.bashrc <<EOF
. \$HOME/kube-ps1/kube-ps1.sh
. /home/ubuntu/kube-ps1/kube-ps1.sh
KUBE_PS1_PREFIX=""
KUBE_PS1_SUFFIX=""
KUBE_PS1_SYMBOL_ENABLE="false"
@@ -285,22 +246,11 @@ EOF"
helm completion bash | sudo tee /etc/bash_completion.d/helm
fi"
# Install kustomize
pssh "
if [ ! -x /usr/local/bin/kustomize ]; then
##VERSION##
curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v3.6.1/kustomize_v3.6.1_linux_amd64.tar.gz |
sudo tar -C /usr/local/bin -zx kustomize
echo complete -C /usr/local/bin/kustomize kustomize | sudo tee /etc/bash_completion.d/kustomize
fi"
# Install ship
# Note: 0.51.3 is the last version that doesn't display GIN-debug messages
# (don't want to get folks confused by that!)
pssh "
if [ ! -x /usr/local/bin/ship ]; then
##VERSION##
curl -L https://github.com/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_amd64.tar.gz |
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
sudo tar -C /usr/local/bin -zx ship
fi"
@@ -312,54 +262,7 @@ EOF"
sudo chmod +x /usr/local/bin/aws-iam-authenticator
fi"
# Install the krew package manager
pssh "
if [ ! -d /home/docker/.krew ]; then
cd /tmp &&
curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.tar.gz |
tar -zxf- &&
sudo -u docker -H ./krew-linux_amd64 install krew &&
echo export PATH=/home/docker/.krew/bin:\\\$PATH | sudo -u docker tee -a /home/docker/.bashrc
fi"
# Install k9s and popeye
pssh "
if [ ! -x /usr/local/bin/k9s ]; then
FILENAME=k9s_\$(uname -s)_\$(uname -m).tar.gz &&
curl -sSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
sudo tar -zxvf- -C /usr/local/bin k9s
fi
if [ ! -x /usr/local/bin/popeye ]; then
FILENAME=popeye_\$(uname -s)_\$(uname -m).tar.gz &&
curl -sSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
sudo tar -zxvf- -C /usr/local/bin popeye
fi"
# Install Tilt
pssh "
if [ ! -x /usr/local/bin/tilt ]; then
curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
fi"
# Install Skaffold
pssh "
if [ ! -x /usr/local/bin/skaffold ]; then
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 &&
sudo install skaffold /usr/local/bin/
fi"
# Install Kompose
pssh "
if [ ! -x /usr/local/bin/kompose ]; then
curl -Lo kompose https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-amd64 &&
sudo install kompose /usr/local/bin
fi"
pssh "
if [ ! -x /usr/local/bin/kubeseal ]; then
curl -Lo kubeseal https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.13.1/kubeseal-linux-amd64 &&
sudo install kubeseal /usr/local/bin
fi"
sep "Done"
}
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
@@ -381,44 +284,29 @@ _cmd_kubetest() {
set -e
if i_am_first_node; then
which kubectl
for NODE in \$(grep [0-9]\$ /etc/hosts | grep -v ^127 | awk {print\ \\\$2}); do
for NODE in \$(awk /[0-9]\$/\ {print\ \\\$2} /etc/hosts); do
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
done
fi"
}
_cmd ips "Show the IP addresses for a given tag"
_cmd_ips() {
_cmd ids "(FIXME) List the instance IDs belonging to a given tag or token"
_cmd_ids() {
TAG=$1
need_tag $TAG
SETTINGS=tags/$TAG/settings.yaml
CLUSTERSIZE=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
while true; do
for I in $(seq $CLUSTERSIZE); do
read ip || return 0
printf "%s\t" "$ip"
done
printf "\n"
done < tags/$TAG/ips.txt
info "Looking up by tag:"
aws_get_instance_ids_by_tag $TAG
# Just in case we managed to create instances but weren't able to tag them
info "Looking up by token:"
aws_get_instance_ids_by_client_token $TAG
}
_cmd list "List all VMs on a given infrastructure (or all infras if no arg given)"
_cmd list "List available groups for a given infrastructure"
_cmd_list() {
case "$1" in
"")
for INFRA in infra/*; do
$0 list $INFRA
done
;;
*/example.*)
;;
*)
need_infra $1
sep "Listing instances for $1"
infra_list
;;
esac
need_infra $1
infra_list
}
_cmd listall "List VMs running on all configured infrastructures"
@@ -441,7 +329,7 @@ _cmd_maketag() {
if [ -z $USER ]; then
export USER=anonymous
fi
MS=$(($(date +%N | tr -d 0)/1000000))
MS=$(($(date +%N)/1000000))
date +%Y-%m-%d-%H-%M-$MS-$USER
}
@@ -512,6 +400,16 @@ _cmd_opensg() {
infra_opensg
}
_cmd portworx "Prepare the nodes for Portworx deployment"
_cmd_portworx() {
TAG=$1
need_tag
pssh "
sudo truncate --size 10G /portworx.blk &&
sudo losetup /dev/loop4 /portworx.blk"
}
_cmd disableaddrchecks "Disable source/destination IP address checks"
_cmd_disableaddrchecks() {
TAG=$1
@@ -548,17 +446,6 @@ _cmd_remap_nodeports() {
if i_am_first_node && ! grep -q '$ADD_LINE' $MANIFEST_FILE; then
sudo sed -i 's/\($FIND_LINE\)\$/\1\n$ADD_LINE/' $MANIFEST_FILE
fi"
info "If you have manifests hard-coding nodePort values,"
info "you might want to patch them with a command like:"
info "
if i_am_first_node; then
kubectl -n kube-system patch svc prometheus-server \\
-p 'spec: { ports: [ {port: 80, nodePort: 10101} ]}'
fi
"
}
_cmd quotas "Check our infrastructure quotas (max instances)"
@@ -567,6 +454,18 @@ _cmd_quotas() {
infra_quotas
}
_cmd retag "(FIXME) Apply a new tag to a group of VMs"
_cmd_retag() {
OLDTAG=$1
NEWTAG=$2
TAG=$OLDTAG
need_tag
if [[ -z "$NEWTAG" ]]; then
die "You must specify a new tag to apply."
fi
aws_tag_instances $OLDTAG $NEWTAG
}
_cmd ssh "Open an SSH session to the first node of a tag"
_cmd_ssh() {
TAG=$1
@@ -584,7 +483,6 @@ _cmd_start() {
--settings) SETTINGS=$2; shift 2;;
--count) COUNT=$2; shift 2;;
--tag) TAG=$2; shift 2;;
--students) STUDENTS=$2; shift 2;;
*) die "Unrecognized parameter: $1."
esac
done
@@ -596,14 +494,8 @@ _cmd_start() {
die "Please add --settings flag to specify which settings file to use."
fi
if [ -z "$COUNT" ]; then
CLUSTERSIZE=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
if [ -z "$STUDENTS" ]; then
warning "Neither --count nor --students was specified."
warning "According to the settings file, the cluster size is $CLUSTERSIZE."
warning "Deploying one cluster of $CLUSTERSIZE nodes."
STUDENTS=1
fi
COUNT=$(($STUDENTS*$CLUSTERSIZE))
COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
warning "No --count option was specified. Using value from settings file ($COUNT)."
fi
# Check that the specified settings and infrastructure are valid.
@@ -621,43 +513,11 @@ _cmd_start() {
infra_start $COUNT
sep
info "Successfully created $COUNT instances with tag $TAG"
sep
echo created > tags/$TAG/status
# If the settings.yaml file has a "steps" field,
# automatically execute all the actions listed in that field.
# If an action fails, retry it up to 10 times.
python -c 'if True: # hack to deal with indentation
import sys, yaml
settings = yaml.safe_load(sys.stdin)
print ("\n".join(settings.get("steps", [])))
' < tags/$TAG/settings.yaml \
| while read step; do
if [ -z "$step" ]; then
break
fi
sep
info "Automatically executing step '$step'."
TRY=1
MAXTRY=10
while ! $0 $step $TAG ; do
TRY=$(($TRY+1))
if [ $TRY -gt $MAXTRY ]; then
error "This step ($step) failed after $MAXTRY attempts."
info "You can troubleshoot the situation manually, or terminate these instances with:"
info "$0 stop $TAG"
die "Giving up."
else
sep
info "Step '$step' failed. Let's wait 10 seconds and try again."
info "(Attempt $TRY out of $MAXTRY.)"
sleep 10
fi
done
done
sep
info "Deployment successful."
info "To log into the first machine of that batch, you can run:"
info "$0 ssh $TAG"
info "To deploy Docker on these instances, you can run:"
info "$0 deploy $TAG"
info "To terminate these instances, you can run:"
info "$0 stop $TAG"
}
@@ -721,8 +581,8 @@ _cmd_helmprom() {
need_tag
pssh "
if i_am_first_node; then
sudo -u docker -H helm helm repo add prometheus-community https://prometheus-community.github.io/helm-charts/
sudo -u docker -H helm install prometheus prometheus-community/prometheus \
sudo -u docker -H helm repo add stable https://kubernetes-charts.storage.googleapis.com/
sudo -u docker -H helm install prometheus stable/prometheus \
--namespace kube-system \
--set server.service.type=NodePort \
--set server.service.nodePort=30090 \
@@ -755,12 +615,11 @@ _cmd_webssh() {
sudo apt-get update &&
sudo apt-get install python-tornado python-paramiko -y"
pssh "
cd /opt
[ -d webssh ] || sudo git clone https://github.com/jpetazzo/webssh"
[ -d webssh ] || git clone https://github.com/jpetazzo/webssh"
pssh "
for KEYFILE in /etc/ssh/*.pub; do
read a b c < \$KEYFILE; echo localhost \$a \$b
done | sudo tee /opt/webssh/known_hosts"
done > webssh/known_hosts"
pssh "cat >webssh.service <<EOF
[Unit]
Description=webssh
@@ -769,7 +628,7 @@ Description=webssh
WantedBy=multi-user.target
[Service]
WorkingDirectory=/opt/webssh
WorkingDirectory=/home/ubuntu/webssh
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
User=nobody
Group=nogroup
@@ -792,6 +651,11 @@ _cmd_www() {
python3 -m http.server
}
greet() {
IAMUSER=$(aws iam get-user --query 'User.UserName')
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
}
pull_tag() {
# Pre-pull a bunch of images
pssh --timeout 900 'for I in \
@@ -881,3 +745,27 @@ make_key_name() {
SHORT_FINGERPRINT=$(ssh-add -l | grep RSA | head -n1 | cut -d " " -f 2 | tr -d : | cut -c 1-8)
echo "${SHORT_FINGERPRINT}-${USER}"
}
sync_keys() {
# make sure ssh-add -l contains "RSA"
ssh-add -l | grep -q RSA \
|| die "The output of \`ssh-add -l\` doesn't contain 'RSA'. Start the agent, add your keys?"
AWS_KEY_NAME=$(make_key_name)
info "Syncing keys... "
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
aws ec2 import-key-pair --key-name $AWS_KEY_NAME \
--public-key-material "$(ssh-add -L \
| grep -i RSA \
| head -n1 \
| cut -d " " -f 1-2)" &>/dev/null
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
die "Somehow, importing the key didn't work. Make sure that 'ssh-add -l | grep RSA | head -n1' returns an RSA key?"
else
info "Imported new key $AWS_KEY_NAME."
fi
else
info "Using existing key $AWS_KEY_NAME."
fi
}

View File

@@ -1,14 +1,9 @@
if ! command -v aws >/dev/null; then
warn "AWS CLI (aws) not found."
fi
infra_list() {
aws ec2 describe-instances --output json |
jq -r '.Reservations[].Instances[] | [.InstanceId, .ClientToken, .State.Name, .InstanceType ] | @tsv'
aws_display_tags
}
infra_quotas() {
aws_greet
greet
max_instances=$(aws ec2 describe-account-attributes \
--attribute-names max-instances \
@@ -26,10 +21,10 @@ infra_start() {
COUNT=$1
# Print our AWS username, to ease the pain of credential-juggling
aws_greet
greet
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
key_name=$(aws_sync_keys)
key_name=$(sync_keys)
AMI=$(aws_get_ami) # Retrieve the AWS image ID
if [ -z "$AMI" ]; then
@@ -66,7 +61,7 @@ infra_start() {
aws_tag_instances $TAG $TAG
# Wait until EC2 API tells us that the instances are running
aws_wait_until_tag_is_running $TAG $COUNT
wait_until_tag_is_running $TAG $COUNT
aws_get_instance_ips_by_tag $TAG > tags/$TAG/ips.txt
}
@@ -103,7 +98,7 @@ infra_disableaddrchecks() {
done
}
aws_wait_until_tag_is_running() {
wait_until_tag_is_running() {
max_retry=100
i=0
done_count=0
@@ -219,32 +214,3 @@ aws_get_ami() {
##VERSION##
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
}
aws_greet() {
IAMUSER=$(aws iam get-user --query 'User.UserName')
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
}
aws_sync_keys() {
# make sure ssh-add -l contains "RSA"
ssh-add -l | grep -q RSA \
|| die "The output of \`ssh-add -l\` doesn't contain 'RSA'. Start the agent, add your keys?"
AWS_KEY_NAME=$(make_key_name)
info "Syncing keys... "
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
aws ec2 import-key-pair --key-name $AWS_KEY_NAME \
--public-key-material "$(ssh-add -L \
| grep -i RSA \
| head -n1 \
| cut -d " " -f 1-2)" &>/dev/null
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
die "Somehow, importing the key didn't work. Make sure that 'ssh-add -l | grep RSA | head -n1' returns an RSA key?"
else
info "Imported new key $AWS_KEY_NAME."
fi
else
info "Using existing key $AWS_KEY_NAME."
fi
}

View File

@@ -1,57 +0,0 @@
if ! command -v hcloud >/dev/null; then
warn "Hetzner CLI (hcloud) not found."
fi
if ! [ -f ~/.config/hcloud/cli.toml ]; then
warn "~/.config/hcloud/cli.toml not found."
fi
infra_list() {
[ "$(hcloud server list -o json)" = "null" ] && return
hcloud server list -o json |
jq -r '.[] | [.id, .name , .status, .server_type.name] | @tsv'
}
infra_start() {
COUNT=$1
HETZNER_INSTANCE_TYPE=${HETZNER_INSTANCE_TYPE-cx21}
HETZNER_DATACENTER=${HETZNER_DATACENTER-nbg1-dc3}
HETZNER_IMAGE=${HETZNER_IMAGE-168855}
for I in $(seq 1 $COUNT); do
NAME=$(printf "%s-%03d" $TAG $I)
sep "Starting instance $I/$COUNT"
info " Datacenter: $HETZNER_DATACENTER"
info " Name: $NAME"
info " Instance type: $HETZNER_INSTANCE_TYPE"
hcloud server create \
--type=${HETZNER_INSTANCE_TYPE} \
--datacenter=${HETZNER_DATACENTER} \
--image=${HETZNER_IMAGE} \
--name=$NAME \
--label=tag=$TAG \
--ssh-key ~/.ssh/id_rsa.pub
done
hetzner_get_ips_by_tag $TAG > tags/$TAG/ips.txt
}
infra_stop() {
for ID in $(hetzner_get_ids_by_tag $TAG); do
info "Scheduling deletion of instance $ID..."
hcloud server delete $ID &
done
info "Waiting for deletion to complete..."
wait
}
hetzner_get_ids_by_tag() {
TAG=$1
hcloud server list --selector=tag=$TAG -o json | jq -r .[].name
}
hetzner_get_ips_by_tag() {
TAG=$1
hcloud server list --selector=tag=$TAG -o json | jq -r .[].public_net.ipv4.ip
}

View File

@@ -1,53 +0,0 @@
infra_list() {
openstack server list -f json |
jq -r '.[] | [.ID, .Name , .Status, .Flavor] | @tsv'
}
infra_start() {
COUNT=$1
sep "Starting $COUNT instances"
info " Region: $OS_REGION_NAME"
info " User: $OS_USERNAME"
info " Flavor: $OS_FLAVOR"
info " Image: $OS_IMAGE"
openstack server create \
--flavor $OS_FLAVOR \
--image $OS_IMAGE \
--key-name $OS_KEY \
--min $COUNT --max $COUNT \
--property workshopctl=$TAG \
$TAG
sep "Waiting for IP addresses to be available"
GOT=0
while [ "$GOT" != "$COUNT" ]; do
echo "Got $GOT/$COUNT IP addresses."
oscli_get_ips_by_tag $TAG > tags/$TAG/ips.txt
GOT="$(wc -l < tags/$TAG/ips.txt)"
done
}
infra_stop() {
info "Counting instances..."
oscli_get_instances_json $TAG |
jq -r .[].Name |
wc -l
info "Deleting instances..."
oscli_get_instances_json $TAG |
jq -r .[].Name |
xargs -P10 -n1 openstack server delete
info "Done."
}
oscli_get_instances_json() {
TAG=$1
openstack server list -f json --name "${TAG}-[0-9]*"
}
oscli_get_ips_by_tag() {
TAG=$1
oscli_get_instances_json $TAG |
jq -r .[].Networks | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' || true
}

View File

@@ -1,51 +0,0 @@
if ! command -v scw >/dev/null; then
warn "Scaleway CLI (scw) not found."
fi
if ! [ -f ~/.config/scw/config.yaml ]; then
warn "~/.config/scw/config.yaml not found."
fi
infra_list() {
scw instance server list -o json |
jq -r '.[] | [.id, .name, .state, .commercial_type] | @tsv'
}
infra_start() {
COUNT=$1
SCW_INSTANCE_TYPE=${SCW_INSTANCE_TYPE-DEV1-M}
SCW_ZONE=${SCW_ZONE-fr-par-1}
for I in $(seq 1 $COUNT); do
NAME=$(printf "%s-%03d" $TAG $I)
sep "Starting instance $I/$COUNT"
info " Zone: $SCW_ZONE"
info " Name: $NAME"
info " Instance type: $SCW_INSTANCE_TYPE"
scw instance server create \
type=${SCW_INSTANCE_TYPE} zone=${SCW_ZONE} \
image=ubuntu_bionic name=${NAME}
done
sep
scw_get_ips_by_tag $TAG > tags/$TAG/ips.txt
}
infra_stop() {
info "Counting instances..."
scw_get_ids_by_tag $TAG | wc -l
info "Deleting instances..."
scw_get_ids_by_tag $TAG |
xargs -n1 -P10 -I@@ \
scw instance server delete force-shutdown=true server-id=@@
}
scw_get_ids_by_tag() {
TAG=$1
scw instance server list name=$TAG -o json | jq -r .[].id
}
scw_get_ips_by_tag() {
TAG=$1
scw instance server list name=$TAG -o json | jq -r .[].public_ip.address
}

View File

@@ -1,23 +0,0 @@
infra_disableaddrchecks() {
die "unimplemented"
}
infra_list() {
die "unimplemented"
}
infra_opensg() {
die "unimplemented"
}
infra_quotas() {
die "unimplemented"
}
infra_start() {
die "unimplemented"
}
infra_stop() {
die "unimplemented"
}

View File

@@ -37,7 +37,7 @@ def system(cmd):
td = str(t2-t1)[:5]
f.write(bold("[{}] in {}s\n".format(retcode, td)))
STEP += 1
with open(os.environ["HOME"] + "/.bash_history", "a") as f:
with open("/home/ubuntu/.bash_history", "a") as f:
f.write("{}\n".format(cmd))
if retcode != 0:
msg = "The following command failed with exit code {}:\n".format(retcode)
@@ -114,7 +114,7 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
system("sudo service ssh restart")
system("sudo apt-get -q update")
system("sudo apt-get -qy install git jid jq")
system("sudo apt-get -qy install git jq")
system("sudo apt-get -qy install emacs-nox joe")
#######################

View File

@@ -18,13 +18,7 @@ pssh() {
echo "[parallel-ssh] $@"
export PSSH=$(which pssh || which parallel-ssh)
if [ "$INFRACLASS" = hetzner ]; then
LOGIN=root
else
LOGIN=ubuntu
fi
$PSSH -h $HOSTFILE -l $LOGIN \
$PSSH -h $HOSTFILE -l ubuntu \
--par 100 \
-O LogLevel=ERROR \
-O UserKnownHostsFile=/dev/null \

View File

@@ -1,45 +1,21 @@
#!/usr/bin/env python
"""
There are two ways to use this script:
1. Pass a tag name as a single argument.
It will then take the clusters corresponding to that tag, and assign one
domain name per cluster. Currently it gets the domains from a hard-coded
path. There should be more domains than clusters.
Example: ./map-dns.py 2020-08-15-jp
2. Pass a domain as the 1st argument, and IP addresses then.
It will configure the domain with the listed IP addresses.
Example: ./map-dns.py open-duck.site 1.2.3.4 2.3.4.5 3.4.5.6
In both cases, the domains should be configured to use GANDI LiveDNS.
"""
import os
import requests
import sys
import yaml
# configurable stuff
domains_file = "../../plentydomains/domains.txt"
config_file = os.path.join(
os.environ["HOME"], ".config/gandi/config.yaml")
tag = None
tag = "test"
apiurl = "https://dns.api.gandi.net/api/v5/domains"
if len(sys.argv) == 2:
tag = sys.argv[1]
domains = open(domains_file).read().split()
domains = [ d for d in domains if not d.startswith('#') ]
ips = open(f"tags/{tag}/ips.txt").read().split()
settings_file = f"tags/{tag}/settings.yaml"
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
else:
domains = [sys.argv[1]]
ips = sys.argv[2:]
clustersize = len(ips)
# inferred stuff
domains = open(domains_file).read().split()
apikey = yaml.safe_load(open(config_file))["apirest"]["key"]
ips = open(f"tags/{tag}/ips.txt").read().split()
settings_file = f"tags/{tag}/settings.yaml"
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
# now do the fucking work
while domains and ips:

View File

@@ -21,9 +21,3 @@ machine_version: 0.15.0
# Password used to connect with the "docker user"
docker_user_password: training
steps:
- deploy
- webssh
- tailhist
- cards

View File

@@ -20,11 +20,3 @@ machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training
steps:
- deploy
- webssh
- tailhist
- kube
- kubetools
- cards
- kubetest

View File

@@ -35,8 +35,6 @@ TAG=$PREFIX-$SETTINGS
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl disabledocker $TAG
retry 5 ./workshopctl kubebins $TAG
retry 5 ./workshopctl webssh $TAG
retry 5 ./workshopctl tailhist $TAG
./workshopctl cards $TAG
SETTINGS=admin-kubenet
@@ -50,8 +48,6 @@ TAG=$PREFIX-$SETTINGS
retry 5 ./workshopctl disableaddrchecks $TAG
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl kubebins $TAG
retry 5 ./workshopctl webssh $TAG
retry 5 ./workshopctl tailhist $TAG
./workshopctl cards $TAG
SETTINGS=admin-kuberouter
@@ -65,8 +61,6 @@ TAG=$PREFIX-$SETTINGS
retry 5 ./workshopctl disableaddrchecks $TAG
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl kubebins $TAG
retry 5 ./workshopctl webssh $TAG
retry 5 ./workshopctl tailhist $TAG
./workshopctl cards $TAG
#INFRA=infra/aws-us-west-1
@@ -82,7 +76,5 @@ TAG=$PREFIX-$SETTINGS
--count $((3*$STUDENTS))
retry 5 ./workshopctl deploy $TAG
retry 5 ./workshopctl kube $TAG 1.17.13
retry 5 ./workshopctl webssh $TAG
retry 5 ./workshopctl tailhist $TAG
retry 5 ./workshopctl kube $TAG 1.15.9
./workshopctl cards $TAG

View File

@@ -1,7 +1,7 @@
resource "openstack_compute_instance_v2" "machine" {
count = "${var.count}"
name = "${format("%s-%04d", "${var.prefix}", count.index+1)}"
image_name = "Ubuntu 18.04.4 20200324"
image_name = "Ubuntu 16.04.5 (Xenial Xerus)"
flavor_name = "${var.flavor}"
security_groups = ["${openstack_networking_secgroup_v2.full_access.name}"]
key_pair = "${openstack_compute_keypair_v2.ssh_deploy_key.name}"

View File

@@ -15,6 +15,7 @@ for lib in lib/*.sh; do
done
DEPENDENCIES="
aws
ssh
curl
jq

View File

@@ -1,24 +1,15 @@
# Uncomment and/or edit one of the the following lines if necessary.
#/ /kube-halfday.yml.html 200!
#/ /kube-fullday.yml.html 200!
#/ /kube-twodays.yml.html 200!
/ /kube-adv.yml.html 200!
#/ /kube-halfday.yml.html 200
#/ /kube-fullday.yml.html 200
#/ /kube-twodays.yml.html 200
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
#/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=DOCKERALLDAY
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=DOCKERALLDAY
#/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=SWEETFEBSALEC1
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=SWEETFEBSALEC4
# Shortlink for the QRCode
/q /qrcode.html 200
# Shortlinks for next training in English and French
#/next https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
/next https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
/hi5 https://enix.io/fr/services/formation/online/
# Survey form
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform

View File

@@ -233,7 +233,7 @@ def setup_tmux_and_ssh():
ipaddr = "$IPADDR"
uid = os.getuid()
raise Exception(r"""
raise Exception("""
1. If you're running this directly from a node:
tmux
@@ -247,16 +247,6 @@ rm -f /tmp/tmux-{uid}/default && ssh -t -L /tmp/tmux-{uid}/default:/tmp/tmux-100
3. If you cannot control a remote tmux:
tmux new-session ssh docker@{ipaddr}
4. If you are running this locally with a remote cluster, make sure your prompt has the expected format:
tmux
IPADDR=$(
kubectl get nodes -o json |
jq -r '.items[0].status.addresses[] | select(.type=="ExternalIP") | .address'
)
export PS1="\n[{ipaddr}] \u@\h:\w\n\$ "
""".format(uid=uid, ipaddr=ipaddr))
else:
logging.info("Found tmux session. Trying to acquire shell prompt.")

View File

@@ -1,7 +1,7 @@
class: title
# Advanced Dockerfile Syntax
# Advanced Dockerfiles
![construction](images/title-advanced-dockerfiles.jpg)
@@ -12,10 +12,7 @@ class: title
We have seen simple Dockerfiles to illustrate how Docker build
container images.
In this section, we will give a recap of the Dockerfile syntax,
and introduce advanced Dockerfile commands that we might
come across sometimes; or that we might want to use in some
specific scenarios.
In this section, we will see more Dockerfile commands.
---
@@ -423,8 +420,3 @@ ONBUILD COPY . /src
* You can't chain `ONBUILD` instructions with `ONBUILD`.
* `ONBUILD` can't be used to trigger `FROM` instructions.
???
:EN:- Advanced Dockerfile syntax
:FR:- Dockerfile niveau expert

View File

@@ -280,8 +280,3 @@ CONTAINER ID IMAGE ... CREATED STATUS
5c1dfd4d81f1 jpetazzo/clock ... 40 min. ago Exited (0) 40 min. ago
b13c164401fb ubuntu ... 55 min. ago Exited (130) 53 min. ago
```
???
:EN:- Foreground and background containers
:FR:- Exécution interactive ou en arrière-plan

View File

@@ -167,8 +167,3 @@ Automated process = good.
In the next chapter, we will learn how to automate the build
process by writing a `Dockerfile`.
???
:EN:- Building our first images interactively
:FR:- Fabriquer nos premières images à la main

View File

@@ -363,10 +363,3 @@ In this example, `sh -c` will still be used, but
The shell gets replaced by `figlet` when `figlet` starts execution.
This allows to run processes as PID 1 without using JSON.
???
:EN:- Towards automated, reproducible builds
:EN:- Writing our first Dockerfile
:FR:- Rendre le processus automatique et reproductible
:FR:- Écrire son premier Dockerfile

View File

@@ -272,7 +272,3 @@ $ docker run -it --entrypoint bash myfiglet
root@6027e44e2955:/#
```
???
:EN:- CMD and ENTRYPOINT
:FR:- CMD et ENTRYPOINT

View File

@@ -322,11 +322,3 @@ You can:
Each copy will run in a different network, totally isolated from the other.
This is ideal to debug regressions, do side-by-side comparisons, etc.
???
:EN:- Using compose to describe an environment
:EN:- Connecting services together with a *Compose file*
:FR:- Utiliser Compose pour décrire son environnement
:FR:- Écrire un *Compose file* pour connecter les services entre eux

View File

@@ -307,8 +307,6 @@ Let's remove the `redis` container:
$ docker rm -f redis
```
* `-f`: Force the removal of a running container (uses SIGKILL)
And create one that doesn't block the `redis` name:
```bash

View File

@@ -226,13 +226,3 @@ We've learned how to:
In the next chapter, we will see how to connect
containers together without exposing their ports.
???
:EN:Connecting containers
:EN:- Container networking basics
:EN:- Exposing a container
:FR:Connecter les conteneurs
:FR:- Description du modèle réseau des conteneurs
:FR:- Exposer un conteneur

View File

@@ -98,8 +98,3 @@ Success!
* Place it in a different directory, with the `WORKDIR` instruction.
* Even better, use the `gcc` official image.
???
:EN:- The build cache
:FR:- Tirer parti du cache afin d'optimiser la vitesse de *build*

View File

@@ -431,8 +431,3 @@ services:
- It's OK (and even encouraged) to start simple and evolve as needed.
- Feel free to review this chapter later (after writing a few Dockerfiles) for inspiration!
???
:EN:- Dockerfile tips, tricks, and best practices
:FR:- Bonnes pratiques pour la construction des images

View File

@@ -290,8 +290,3 @@ bash: figlet: command not found
* We have a clear definition of our environment, and can share it reliably with others.
* Let's see in the next chapters how to bake a custom image with `figlet`!
???
:EN:- Running our first container
:FR:- Lancer nos premiers conteneurs

View File

@@ -226,8 +226,3 @@ docker export <container_id> | tar tv
```
This will give a detailed listing of the content of the container.
???
:EN:- Troubleshooting and getting inside a container
:FR:- Inspecter un conteneur en détail, en *live* ou *post-mortem*

View File

@@ -375,13 +375,3 @@ We've learned how to:
* Understand Docker image namespacing.
* Search and download images.
???
:EN:Building images
:EN:- Containers, images, and layers
:EN:- Image addresses and tags
:EN:- Finding and transferring images
:FR:Construire des images
:FR:- La différence entre un conteneur et une image
:FR:- La notion de *layer* partagé entre images

View File

@@ -80,8 +80,3 @@ $ docker ps --filter label=owner=alice
(To determine internal cross-billing, or who to page in case of outage.)
* etc.
???
:EN:- Using labels to identify containers
:FR:- Étiqueter ses conteneurs avec des méta-données

View File

@@ -391,10 +391,3 @@ We've learned how to:
* Use a simple local development workflow.
???
:EN:Developing with containers
:EN:- “Containerize” a development environment
:FR:Développer au jour le jour
:FR:- « Containeriser » son environnement de développement

View File

@@ -313,11 +313,3 @@ virtually "free."
* Sometimes, we want to inspect a specific intermediary build stage.
* Or, we want to describe multiple images using a single Dockerfile.
???
:EN:Optimizing our images and their build process
:EN:- Leveraging multi-stage builds
:FR:Optimiser les images et leur construction
:FR:- Utilisation d'un *multi-stage build*

View File

@@ -130,12 +130,3 @@ $ docker inspect --format '{{ json .Created }}' <containerID>
* The optional `json` keyword asks for valid JSON output.
<br/>(e.g. here it adds the surrounding double-quotes.)
???
:EN:Managing container lifecycle
:EN:- Naming and inspecting containers
:FR:Suivre ses conteneurs à la loupe
:FR:- Obtenir des informations détaillées sur un conteneur
:FR:- Associer un identifiant unique à un conteneur

View File

@@ -175,10 +175,3 @@ class: extra-details
* This will cause some CLI and TUI programs to redraw the screen.
* But not all of them.
???
:EN:- Restarting old containers
:EN:- Detaching and reattaching to container
:FR:- Redémarrer des anciens conteneurs
:FR:- Se détacher et rattacher à des conteneurs

View File

@@ -95,24 +95,6 @@ $ ssh <login>@<ip-address>
---
class: in-person
## `tailhist`
The shell history of the instructor is available online in real time.
Note the IP address of the instructor's virtual machine (A.B.C.D).
Open http://A.B.C.D:1088 in your browser and you should see the history.
The history is updated in real time (using a WebSocket connection).
It should be green when the WebSocket is connected.
If it turns red, reloading the page should fix it.
---
## Checking your Virtual Machine
Once logged in, make sure that you can run a basic Docker command:
@@ -143,11 +125,3 @@ Server:
]
If this doesn't work, raise your hand so that an instructor can assist you!
???
:EN:Container concepts
:FR:Premier contact avec les conteneurs
:EN:- What's a container engine?
:FR:- Qu'est-ce qu'un *container engine* ?

View File

@@ -119,7 +119,7 @@ Nano and LinuxKit VMs in Hyper-V!)
- golang, mongo, python, redis, hello-world ... and more being added
- you should still use `--platform` with multi-os images to be certain
- you should still use `--plaform` with multi-os images to be certain
- Windows Containers now support `localhost` accessible containers (July 2018)

View File

@@ -11,10 +11,10 @@ class State(object):
self.section_title = None
self.section_start = 0
self.section_slides = 0
self.modules = {}
self.chapters = {}
self.sections = {}
def show(self):
if self.section_title.startswith("module-"):
if self.section_title.startswith("chapter-"):
return
print("{0.section_title}\t{0.section_start}\t{0.section_slides}".format(self))
self.sections[self.section_title] = self.section_slides
@@ -38,10 +38,10 @@ for line in open(sys.argv[1]):
if line == "--":
state.current_slide += 1
toc_links = re.findall("\(#toc-(.*)\)", line)
if toc_links and state.section_title.startswith("module-"):
if state.section_title not in state.modules:
state.modules[state.section_title] = []
state.modules[state.section_title].append(toc_links[0])
if toc_links and state.section_title.startswith("chapter-"):
if state.section_title not in state.chapters:
state.chapters[state.section_title] = []
state.chapters[state.section_title].append(toc_links[0])
# This is really hackish
if line.startswith("class:"):
for klass in EXCLUDED:
@@ -51,7 +51,7 @@ for line in open(sys.argv[1]):
state.show()
for module in sorted(state.modules, key=lambda f: int(f.split("-")[1])):
module_size = sum(state.sections[s] for s in state.modules[module])
print("{}\t{}\t{}".format("total size for", module, module_size))
for chapter in sorted(state.chapters, key=lambda f: int(f.split("-")[1])):
chapter_size = sum(state.sections[s] for s in state.chapters[chapter])
print("{}\t{}\t{}".format("total size for", chapter, chapter_size))

View File

@@ -1,118 +0,0 @@
#!/bin/sh
# This script helps to add "force-redirects" where needed.
# This might replace your entire git repos with Vogon poetry.
# Use at your own peril!
set -eu
# The easiest way to set this env var is by copy-pasting from
# the netlify web dashboard, then doctoring the output a bit.
# Yeah, that's gross, but after spending 10 minutes with the
# API and the CLI and OAuth, it took about 10 seconds to do it
# with le copier-coller, so ... :)
SITES="
2020-01-caen
2020-01-zr
2020-02-caen
2020-02-enix
2020-02-outreach
2020-02-vmware
2020-03-ardan
2020-03-qcon
alfun-2019-06
boosterconf2018
clt-2019-10
dc17eu
decembre2018
devopsdaysams2018
devopsdaysmsp2018
gotochgo2018
gotochgo2019
indexconf2018
intro-2019-01
intro-2019-04
intro-2019-06
intro-2019-08
intro-2019-09
intro-2019-11
intro-2019-12
k8s2d
kadm-2019-04
kadm-2019-06
kube
kube-2019-01
kube-2019-02
kube-2019-03
kube-2019-04
kube-2019-06
kube-2019-08
kube-2019-09
kube-2019-10
kube-2019-11
lisa-2019-10
lisa16t1
lisa17m7
lisa17t9
maersk-2019-07
maersk-2019-08
ndcminnesota2018
nr-2019-08
oscon2018
oscon2019
osseu17
pycon2019
qconsf18wkshp
qconsf2017intro
qconsf2017swarm
qconsf2018
qconuk2019
septembre2018
sfsf-2019-06
srecon2018
swarm2017
velny-k8s101-2018
velocity-2019-11
velocityeu2018
velocitysj2018
vmware-2019-11
weka
wwc-2019-10
wwrk-2019-05
wwrk-2019-06
"
for SITE in $SITES; do
echo "##### $SITE"
git checkout -q origin/$SITE
# No _redirects? No problem.
if ! [ -f _redirects ]; then
continue
fi
# If there is already a force redirect on /, we're good.
if grep '^/ .* 200!' _redirects; then
continue
fi
# If there is a redirect on / ... and it's not forced ... do something.
if grep "^/ .* 200$" _redirects; then
echo "##### $SITE needs to be patched"
sed -i 's,^/ \(.*\) 200$,/ \1 200!,' _redirects
git add _redirects
git commit -m "fix-redirects.sh: adding forced redirect"
git push origin HEAD:$SITE
continue
fi
if grep "^/ " _redirects; then
echo "##### $SITE with / but no status code"
echo "##### Should I add '200!' ?"
read foo
sed -i 's,^/ \(.*\)$,/ \1 200!,' _redirects
git add _redirects
git commit -m "fix-redirects.sh: adding status code and forced redirect"
git push origin HEAD:$SITE
continue
fi
echo "##### $SITE without / ?"
cat _redirects
done

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

View File

@@ -7,7 +7,6 @@ FLAGS=dict(
fr=u"🇫🇷",
uk=u"🇬🇧",
us=u"🇺🇸",
www=u"🌐",
)
TEMPLATE="""<html>
@@ -20,9 +19,9 @@ TEMPLATE="""<html>
<div class="main">
<table>
<tr><td class="header" colspan="3">{{ title }}</td></tr>
<tr><td class="details" colspan="3">Note: while some workshops are delivered in other languages, slides are always in English.</td></tr>
<tr><td class="details" colspan="3">Note: while some workshops are delivered in French, slides are always in English.</td></tr>
<tr><td class="title" colspan="3">Free Kubernetes intro course</td></tr>
<tr><td class="title" colspan="3">Free video of our latest workshop</td></tr>
<tr>
<td>Getting Started With Kubernetes and Container Orchestration</td>
@@ -36,11 +35,11 @@ TEMPLATE="""<html>
<td class="details">If you're interested, we can deliver that workshop (or longer courses) to your team or organization.</td>
</tr>
<tr>
<td class="details">Contact <a href="mailto:jerome.petazzoni@gmail.com">Jérôme Petazzoni</a> to make that happen!</td>
<td class="details">Contact <a href="mailto:jerome.petazzoni@gmail.com">Jérôme Petazzoni</a> to make that happen!</a></td>
</tr>
{% if coming_soon %}
<tr><td class="title" colspan="3">Coming soon</td></tr>
<tr><td class="title" colspan="3">Coming soon near you</td></tr>
{% for item in coming_soon %}
<tr>
@@ -141,26 +140,13 @@ import yaml
items = yaml.safe_load(open("index.yaml"))
def prettyparse(date):
months = [
"January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"
]
month = months[date.month-1]
suffix = {
1: "st", 2: "nd", 3: "rd",
21: "st", 22: "nd", 23: "rd",
31: "st"}.get(date.day, "th")
return date.year, month, "{}{}".format(date.day, suffix)
# Items with a date correspond to scheduled sessions.
# Items without a date correspond to self-paced content.
# The date should be specified as a string (e.g. 2018-11-26).
# It can also be a list of two elements (e.g. [2018-11-26, 2018-11-28]).
# The latter indicates an event spanning multiple dates.
# The event will be considered "current" (shown in the list of
# The first date will be used in the generated page, but the event
# will be considered "current" (and therefore, shown in the list of
# upcoming events) until the second date.
for item in items:
@@ -170,23 +156,19 @@ for item in items:
date_begin, date_end = date
else:
date_begin, date_end = date, date
y1, m1, d1 = prettyparse(date_begin)
y2, m2, d2 = prettyparse(date_end)
if (y1, m1, d1) == (y2, m2, d2):
# Single day event
pretty_date = "{} {}, {}".format(m1, d1, y1)
elif (y1, m1) == (y2, m2):
# Multi-day event within a single month
pretty_date = "{} {}-{}, {}".format(m1, d1, d2, y1)
elif y1 == y2:
# Multi-day event spanning more than a month
pretty_date = "{} {}-{} {}, {}".format(m1, d1, m2, d2, y1)
else:
# Event spanning the turn of the year (REALLY???)
pretty_date = "{} {}, {}-{} {}, {}".format(m1, d1, y1, m2, d2, y2)
suffix = {
1: "st", 2: "nd", 3: "rd",
21: "st", 22: "nd", 23: "rd",
31: "st"}.get(date_begin.day, "th")
# %e is a non-standard extension (it displays the day, but without a
# leading zero). If strftime fails with ValueError, try to fall back
# on %d (which displays the day but with a leading zero when needed).
try:
item["prettydate"] = date_begin.strftime("%B %e{}, %Y").format(suffix)
except ValueError:
item["prettydate"] = date_begin.strftime("%B %d{}, %Y").format(suffix)
item["begin"] = date_begin
item["end"] = date_end
item["prettydate"] = pretty_date
item["flag"] = FLAGS.get(item.get("country"),"")
today = datetime.date.today()

View File

@@ -1,152 +1,3 @@
- date: [2020-10-05, 2020-10-06]
country: www
city: streaming
event: ENIX SAS
speaker: jpetazzo
title: Docker intensif (en français)
lang: fr
attend: https://enix.io/fr/services/formation/online/
- date: [2020-10-07, 2020-10-09]
country: www
city: streaming
event: ENIX SAS
speaker: jpetazzo
title: Fondamentaux Kubernetes (en français)
lang: fr
attend: https://enix.io/fr/services/formation/online/
- date: 2020-10-12
country: www
city: streaming
event: ENIX SAS
speaker: jpetazzo
title: Packaging pour Kubernetes (en français)
lang: fr
attend: https://enix.io/fr/services/formation/online/
- date: [2020-10-13, 2020-10-14]
country: www
city: streaming
event: ENIX SAS
speaker: jpetazzo
title: Kubernetes avancé (en français)
lang: fr
attend: https://enix.io/fr/services/formation/online/
- date: [2020-10-19, 2020-10-20]
country: www
city: streaming
event: ENIX SAS
speaker: jpetazzo
title: Opérer Kubernetes (en français)
lang: fr
attend: https://enix.io/fr/services/formation/online/
- date: [2020-09-28, 2020-10-01]
country: www
city: streaming
event: Skills Matter
speaker: jpetazzo
title: Advanced Kubernetes Concepts
attend: https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
- date: [2020-08-29, 2020-08-30]
country: www
city: streaming
event: fwdays
speaker: jpetazzo
title: Intensive Docker Online Workshop
attend: https://fwdays.com/en/event/intensive-docker-workshop
slides: https://2020-08-fwdays.container.training/
- date: [2020-09-12, 2020-09-13]
country: www
city: streaming
event: fwdays
speaker: jpetazzo
title: Kubernetes Intensive Online Workshop
attend: https://fwdays.com/en/event/kubernetes-intensive-workshop
slides: https://2020-09-fwdays.container.training/
- date: [2020-07-07, 2020-07-09]
country: www
city: streaming
event: Ardan Live
speaker: jpetazzo
title: Intensive Docker Bootcamp
attend: https://www.eventbrite.com/e/livestream-intensive-docker-bootcamp-tickets-103258886108
- date: [2020-06-15, 2020-06-16]
country: www
city: streaming
event: ENIX SAS
speaker: jpetazzo
title: Docker intensif (en français)
lang: fr
attend: https://enix.io/fr/services/formation/online/
- date: [2020-06-17, 2020-06-19]
country: www
city: streaming
event: ENIX SAS
speaker: jpetazzo
title: Fondamentaux Kubernetes (en français)
lang: fr
attend: https://enix.io/fr/services/formation/online/
- date: 2020-06-22
country: www
city: streaming
event: ENIX SAS
speaker: jpetazzo
title: Packaging pour Kubernetes (en français)
lang: fr
attend: https://enix.io/fr/services/formation/online/
- date: [2020-06-23, 2020-06-24]
country: www
city: streaming
event: ENIX SAS
speaker: jpetazzo
title: Kubernetes avancé (en français)
lang: fr
attend: https://enix.io/fr/services/formation/online/
- date: [2020-06-25, 2020-06-26]
country: www
city: streaming
event: ENIX SAS
speaker: jpetazzo
title: Opérer Kubernetes (en français)
lang: fr
attend: https://enix.io/fr/services/formation/online/
- date: [2020-06-09, 2020-06-11]
country: www
city: streaming
event: Ardan Live
speaker: jpetazzo
title: Intensive Kubernetes Bootcamp
attend: https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
- date: [2020-05-04, 2020-05-08]
country: www
city: streaming
event: Ardan Live
speaker: jpetazzo
title: Intensive Kubernetes - Advanced Concepts
attend: https://www.eventbrite.com/e/livestream-intensive-kubernetes-advanced-concepts-tickets-102358725704
- date: [2020-03-30, 2020-04-02]
country: www
city: streaming
event: Ardan Live
speaker: jpetazzo
title: Intensive Docker and Kubernetes
attend: https://www.eventbrite.com/e/ardan-labs-live-worldwide-march-30-april-2-2020-tickets-100331129108#
slides: https://2020-03-ardan.container.training/
- date: 2020-03-06
country: uk
city: London

69
slides/intro-fullday.yml Normal file
View File

@@ -0,0 +1,69 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom.md
- shared/toc.md
-
#- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
#- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
#- containers/Start_And_Attach.md
- containers/Naming_And_Inspecting.md
#- containers/Labels.md
- containers/Getting_Inside.md
- containers/Initial_Images.md
-
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
-
- containers/Container_Networking_Basics.md
#- containers/Network_Drivers.md
#- containers/Container_Network_Model.md
- containers/Local_Development_Workflow.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
-
- containers/Multi_Stage_Builds.md
#- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
#- containers/Docker_Machine.md
#- containers/Advanced_Dockerfiles.md
#- containers/Init_Systems.md
#- containers/Application_Configuration.md
#- containers/Logging.md
#- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
#- containers/Container_Engines.md
#- containers/Pods_Anatomy.md
#- containers/Ecosystem.md
#- containers/Orchestration_Overview.md
-
- shared/thankyou.md
- containers/links.md

View File

@@ -0,0 +1,69 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- in-person
chapters:
- shared/title.md
# - shared/logistics.md
- containers/intro.md
- shared/about-slides.md
#- shared/chat-room-im.md
#- shared/chat-room-zoom.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Windows_Containers.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Init_Systems.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Pods_Anatomy.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md

77
slides/intro-twodays.yml Normal file
View File

@@ -0,0 +1,77 @@
title: |
Introduction
to Containers
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom.md
- shared/toc.md
- # DAY 1
- containers/Docker_Overview.md
#- containers/Docker_History.md
- containers/Training_Environment.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Initial_Images.md
-
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
-
- containers/Dockerfile_Tips.md
- containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Exercise_Dockerfile_Advanced.md
-
- containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Start_And_Attach.md
- containers/Getting_Inside.md
- containers/Resource_Limits.md
- # DAY 2
- containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
-
- containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
-
- containers/Installing_Docker.md
- containers/Container_Engines.md
- containers/Init_Systems.md
- containers/Advanced_Dockerfiles.md
-
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Orchestration_Overview.md
-
- shared/thankyou.md
- containers/links.md
#-
#- containers/Docker_Machine.md
#- containers/Ambassadors.md
#- containers/Namespaces_Cgroups.md
#- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
#- containers/Pods_Anatomy.md
#- containers/Ecosystem.md

View File

@@ -129,8 +129,3 @@ installed and set up `kubectl` to communicate with your cluster.
```
]
???
:EN:- Securely accessing internal services
:FR:- Accès sécurisé aux services internes

View File

@@ -1,549 +0,0 @@
# Dynamic Admission Control
- This is one of the many ways to extend the Kubernetes API
- High level summary: dynamic admission control relies on webhooks that are ...
- dynamic (can be added/removed on the fly)
- running inside our outside the cluster
- *validating* (yay/nay) or *mutating* (can change objects that are created/updated)
- selective (can be configured to apply only to some kinds, some selectors...)
- mandatory or optional (should it block operations when webhook is down?)
- Used for themselves (e.g. policy enforcement) or as part of operators
---
## Use cases
Some examples ...
- Stand-alone admission controllers
*validating:* policy enforcement (e.g. quotas, naming conventions ...)
*mutating:* inject or provide default values (e.g. pod presets)
- Admission controllers part of a greater system
*validating:* advanced typing for operators
*mutating:* inject sidecars for service meshes
---
## You said *dynamic?*
- Some admission controllers are built in the API server
- They are enabled/disabled through Kubernetes API server configuration
(e.g. `--enable-admission-plugins`/`--disable-admission-plugins` flags)
- Here, we're talking about *dynamic* admission controllers
- They can be added/remove while the API server is running
(without touching the configuration files or even having access to them)
- This is done through two kinds of cluster-scope resources:
ValidatingWebhookConfiguration and MutatingWebhookConfiguration
---
## You said *webhooks?*
- A ValidatingWebhookConfiguration or MutatingWebhookConfiguration contains:
- a resource filter
<br/>
(e.g. "all pods", "deployments in namespace xyz", "everything"...)
- an operations filter
<br/>
(e.g. CREATE, UPDATE, DELETE)
- the address of the webhook server
- Each time an operation matches the filters, it is sent to the webhook server
---
## What gets sent exactly?
- The API server will `POST` a JSON object to the webhook
- That object will be a Kubernetes API message with `kind` `AdmissionReview`
- It will contain a `request` field, with, notably:
- `request.uid` (to be used when replying)
- `request.object` (the object created/deleted/changed)
- `request.oldObject` (when an object is modified)
- `request.userInfo` (who was making the request to the API in the first place)
(See [the documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#request) for a detailed example showing more fields.)
---
## How should the webhook respond?
- By replying with another `AdmissionReview` in JSON
- It should have a `response` field, with, notably:
- `response.uid` (matching the `request.uid`)
- `response.allowed` (`true`/`false`)
- `response.status.message` (optional string; useful when denying requests)
- `response.patchType` (when a mutating webhook changes the object; e.g. `json`)
- `response.patch` (the patch, encoded in base64)
---
## What if the webhook *does not* respond?
- If "something bad" happens, the API server follows the `failurePolicy` option
- this is a per-webhook option (specified in the webhook configuration)
- it can be `Fail` (the default) or `Ignore` ("allow all, unmodified")
- What's "something bad"?
- webhook responds with something invalid
- webhook takes more than 10 seconds to respond
<br/>
(this can be changed with `timeoutSeconds` field in the webhook config)
- webhook is down or has invalid certificates
<br/>
(TLS! It's not just a good idea; for admission control, it's the law!)
---
## What did you say about TLS?
- The webhook configuration can indicate:
- either `url` of the webhook server (has to begin with `https://`)
- or `service.name` and `service.namespace` of a Service on the cluster
- In the latter case, the Service has to accept TLS connections on port 443
- It has to use a certificate with CN `<name>.<namespace>.svc`
(**and** a `subjectAltName` extension with `DNS:<name>.<namespace>.svc`)
- The certificate needs to be valid (signed by a CA trusted by the API server)
... alternatively, we can pass a `caBundle` in the webhook configuration
---
## Webhook server inside or outside
- "Outside" webhook server is defined with `url` option
- convenient for external webooks (e.g. tamper-resistent audit trail)
- also great for initial development (e.g. with ngrok)
- requires outbound connectivity (duh) and can become a SPOF
- "Inside" webhook server is defined with `service` option
- convenient when the webhook needs to be deployed and managed on the cluster
- also great for air gapped clusters
- development can be harder (but tools like [Tilt](https://tilt.dev) can help)
---
## Developing a simple admission webhook
- We're going to register a custom webhook!
- First, we'll just dump the `AdmissionRequest` object
(using a little Node app)
- Then, we'll implement a strict policy on a specific label
(using a little Flask app)
- Development will happen in local containers, plumbed with ngrok
- The we will deploy to the cluster 🔥
---
## Running the webhook locally
- We prepared a Docker Compose file to start the whole stack
(the Node "echo" app, the Flask app, and one ngrok tunnel for each of them)
.exercise[
- Go to the webhook directory:
```bash
cd ~/container.training/webhooks/admission
```
- Start the webhook in Docker containers:
```bash
docker-compose up
```
]
*Note the URL in `ngrok-echo_1` looking like `url=https://xxxx.ngrok.io`.*
---
class: extra-details
## What's ngrok?
- Ngrok provides secure tunnels to access local services
- Example: run `ngrok http 1234`
- `ngrok` will display a publicly-available URL (e.g. https://xxxxyyyyzzzz.ngrok.io)
- Connections to https://xxxxyyyyzzzz.ngrok.io will terminate at `localhost:1234`
- Basic product is free; extra features (vanity domains, end-to-end TLS...) for $$$
- Perfect to develop our webhook!
- Probably not for production, though
(webhook requests and responses now pass through the ngrok platform)
---
## Update the webhook configuration
- We have a webhook configuration in `k8s/webhook-configuration.yaml`
- We need to update the configuration with the correct `url`
.exercise[
- Edit the webhook configuration manifest:
```bash
vim k8s/webhook-configuration.yaml
```
- **Uncomment** the `url:` line
- **Update** the `.ngrok.io` URL with the URL shown by Compose
- Save and quit
]
---
## Register the webhook configuration
- Just after we register the webhook, it will be called for each matching request
(CREATE and UPDATE on Pods in all namespaces)
- The `failurePolicy` is `Ignore`
(so if the webhook server is down, we can still create pods)
.exercise[
- Register the webhook:
```bash
kubectl apply -f k8s/webhook-configuration.yaml
```
]
It is strongly recommended to tail the logs of the API server while doing that.
---
## Create a pod
- Let's create a pod and try to set a `color` label
.exercise[
- Create a pod named `chroma`:
```bash
kubectl run --restart=Never chroma --image=nginx
```
- Add a label `color` set to `pink`:
```bash
kubectl label pod chroma color=pink
```
]
We should see the `AdmissionReview` objects in the Compose logs.
Note: the webhook doesn't do anything (other than printing the request payload).
---
## Use the "real" admission webhook
- We have a small Flask app implementing a particular policy on pod labels:
- if a pod sets a label `color`, it must be `blue`, `green`, `red`
- once that `color` label is set, it cannot be removed or changed
- That Flask app was started when we did `docker-compose up` earlier
- It is exposed through its own ngrok tunnel
- We are going to use that webhook instead of the other one
(by changing only the `url` field in the ValidatingWebhookConfiguration)
---
## Update the webhook configuration
.exercise[
- First, check the ngrok URL of the tunnel for the Flask app:
```bash
docker-compose logs ngrok-flask
```
- Then, edit the webhook configuration:
```bash
kubectl edit validatingwebhookconfiguration admission.container.training
```
- Find the `url:` field with the `.ngrok.io` URL and update it
- Save and quit; the new configuration is applied immediately
]
---
## Verify the behavior of the webhook
- Try to create a few pods and/or change labels on existing pods
- What happens if we try to make changes to the earlier pod?
(the one that has `label=pink`)
---
## Deploying the webhook on the cluster
- Let's see what's needed to self-host the webhook server!
- The webhook needs to be reachable through a Service on our cluster
- The Service needs to accept TLS connections on port 443
- We need a proper TLS certificate:
- with the right `CN` and `subjectAltName` (`<servicename>.<namespace>.svc`)
- signed by a trusted CA
- We can either use a "real" CA, or use the `caBundle` option to specify the CA cert
(the latter makes it easy to use self-signed certs)
---
## In practice
- We're going to generate a key pair and a self-signed certificate
- We will store them in a Secret
- We will run the webhook in a Deployment, exposed with a Service
- We will update the webhook configuration to use that Service
- The Service will be named `admission`, in Namespace `webhooks`
(keep in mind that the ValidatingWebhookConfiguration itself is at cluster scope)
---
## Let's get to work!
.exercise[
- Make sure we're in the right directory:
```bash
cd ~/container.training/webhooks/admission
```
- Create the namespace:
```bash
kubectl create namespace webhooks
```
- Switch to the namespace:
```bash
kubectl config set-context --current --namespace=webhooks
```
]
---
## Deploying the webhook
- *Normally,* we would author an image for this
- Since our webhook is just *one* Python source file ...
... we'll store it in a ConfigMap, and install dependencies on the fly
.exercise[
- Load the webhook source in a ConfigMap:
```bash
kubectl create configmap admission --from-file=flask/webhook.py
```
- Create the Deployment and Service:
```bash
kubectl apply -f k8s/webhook-server.yaml
```
]
---
## Generating the key pair and certificate
- Let's call OpenSSL to the rescue!
(of course, there are plenty others options; e.g. `cfssl`)
.exercise[
- Generate a self-signed certificate:
```bash
NAMESPACE=webhooks
SERVICE=admission
CN=$SERVICE.$NAMESPACE.svc
openssl req -x509 -newkey rsa:4096 -nodes -keyout key.pem -out cert.pem \
-days 30 -subj /CN=$CN -addext subjectAltName=DNS:$CN
```
- Load up the key and cert in a Secret:
```bash
kubectl create secret tls admission --cert=cert.pem --key=key.pem
```
]
---
## Update the webhook configuration
- Let's reconfigure the webhook to use our Service instead of ngrok
.exercise[
- Edit the webhook configuration manifest:
```bash
vim k8s/webhook-configuration.yaml
```
- Comment out the `url:` line
- Uncomment the `service:` section
- Save, quit
- Update the webhook configuration:
```bash
kubectl apply -f k8s/webhook-configuration.yaml
```
]
---
## Add our self-signed cert to the `caBundle`
- The API server won't accept our self-signed certificate
- We need to add it to the `caBundle` field in the webhook configuration
- The `caBundle` will be our `cert.pem` file, encoded in base64
---
Shell to the rescue!
.exercise[
- Load up our cert and encode it in base64:
```bash
CA=$(base64 -w0 < cert.pem)
```
- Define a patch operation to update the `caBundle`:
```bash
PATCH='[{
"op": "replace",
"path": "/webhooks/0/clientConfig/caBundle",
"value":"'$CA'"
}]'
```
- Patch the webhook configuration:
```bash
kubectl patch validatingwebhookconfiguration \
admission.webhook.container.training \
--type='json' -p="$PATCH"
```
]
---
## Try it out!
- Keep an eye on the API server logs
- Tail the logs of the pod running the webhook server
- Create a few pods; we should see requests in the webhook server logs
- Check that the label `color` is enforced correctly
(it should only allow values of `red`, `green`, `blue`)
???
:EN:- Dynamic admission control with webhooks
:FR:- Contrôle d'admission dynamique (webhooks)

View File

@@ -1,386 +0,0 @@
# The Aggregation Layer
- The aggregation layer is a way to extend the Kubernetes API
- It is similar to CRDs
- it lets us define new resource types
- these resources can then be used with `kubectl` and other clients
- The implementation is very different
- CRDs are handled within the API server
- the aggregation layer offloads requests to another process
- They are designed for very different use-cases
---
## CRDs vs aggregation layer
- The Kubernetes API is a REST-ish API with a hierarchical structure
- It can be extended with Custom Resource Definifions (CRDs)
- Custom resources are managed by the Kubernetes API server
- we don't need to write code
- the API server does all the heavy lifting
- these resources are persisted in Kubernetes' "standard" database
<br/>
(for most installations, that's `etcd`)
- We can also define resources that are *not* managed by the API server
(the API server merely proxies the requests to another server)
---
## Which one is best?
- For things that "map" well to objects stored in a traditional database:
*probably CRDs*
- For things that "exist" only in Kubernetes and don't represent external resources:
*probably CRDs*
- For things that are read-only, at least from Kubernetes' perspective:
*probably aggregation layer*
- For things that can't be stored in etcd because of size or access patterns:
*probably aggregation layer*
---
## How are resources organized?
- Let's have a look at the Kubernetes API hierarchical structure
- Useful: `.metadata.selfLink` contains the URI of a resource
.exercise[
- Check the `apiVersion` and URI of a "core" resource, e.g. a Node:
```bash
kubectl get nodes -o json | jq .items[0].apiVersion
kubectl get nodes -o json | jq .items[0].metadata.selfLink
```
- Get the `apiVersion` and URI for a "non-core" resource, e.g. a ClusterRole:
```bash
kubectl get clusterrole view -o json | jq .apiVersion
kubectl get clusterrole view -o json | jq .metadata.selfLink
```
]
---
## Core vs non-core
- This is the structure of the URIs that we just checked:
```
/api/v1/nodes/node1
↑ ↑ ↑
`version` `kind` `name`
/apis/rbac.authorization.k8s.io/v1/clusterroles/view
↑ ↑ ↑ ↑
`group` `version` `kind` `name`
```
- There is no group for "core" resources
- Or, we could say that the group, `core`, is implied
---
## Group-Version-Kind
- In the API server, the Group-Version-Kind triple maps to a Go type
(look for all the "GVK" occurrences in the source code!)
- In the API server URI router, the GVK is parsed "relatively early"
(so that the server can know which resource we're talking about)
- "Well, actually ..." Things are a bit more complicated, see next slides!
---
class: extra-details
## Namespaced resources
- Here are what namespaced resources URIs look like:
```
/api/v1/namespaces/default/services/kubernetes
↑ ↑ ↑ ↑
`version` `namespace` `kind` `name`
/apis/apps/v1/namespaces/kube-system/daemonsets/kube-proxy
↑ ↑ ↑ ↑ ↑
`group` `version` `namespace` `kind` `name`
```
---
class: extra-details
## Subresources
- Many resources have *subresources*, for instance:
- `/status` (decouples status updates from other updates)
- `/scale` (exposes a consistent interface for autoscalers)
- `/proxy` (allows access to HTTP resources)
- `/portforward` (used by `kubectl port-forward`)
- `/logs` (access pod logs)
- These are added at the end of the URI
---
class: extra-details
## Accessing a subresource
.exercise[
- List `kube-proxy` pods:
```bash
kubectl get pods --namespace=kube-system --selector=k8s-app=kube-proxy
PODNAME=$(
kubectl get pods --namespace=kube-system --selector=k8s-app=kube-proxy \
-o json | jq .items[0].metadata.name)
```
- Execute a command in a pod, showing the API requests:
```bash
kubectl -v6 exec --namespace=kube-system $PODNAME -- echo hello world
```
]
--
The full request looks like:
```
POST https://.../api/v1/namespaces/kube-system/pods/kube-proxy-c7rlw/exec?
command=echo&command=hello&command=world&container=kube-proxy&stderr=true&stdout=true
```
---
## Listing what's supported on the server
- There are at least three useful commands to introspect the API server
.exercise[
- List resources types, their group, kind, short names, and scope:
```bash
kubectl api-resources
```
- List API groups + versions:
```bash
kubectl api-versions
```
- List APIServices:
```bash
kubectl get apiservices
```
]
--
🤔 What's the difference between the last two?
---
## API registration
- `kubectl api-versions` shows all API groups, including `apiregistration.k8s.io`
- `kubectl get apiservices` shows the "routing table" for API requests
- The latter doesn't show `apiregistration.k8s.io`
(APIServices belong to `apiregistration.k8s.io`)
- Most API groups are `Local` (handled internally by the API server)
- If we're running the `metrics-server`, it should handle `metrics.k8s.io`
- This is an API group handled *outside* of the API server
- This is the *aggregation layer!*
---
## Finding resources
The following assumes that `metrics-server` is deployed on your cluster.
.exercise[
- Check that the metrics.k8s.io is registered with `metrics-server`:
```bash
kubectl get apiservices | grep metrics.k8s.io
```
- Check the resource kinds registered in the metrics.k8s.io group:
```bash
kubectl api-resources --api-group=metrics.k8s.io
```
]
(If the output of either command is empty, install `metrics-server` first.)
---
## `nodes` vs `nodes`
- We can have multiple resources with the same name
.exercise[
- Look for resources named `node`:
```bash
kubectl api-resources | grep -w nodes
```
- Compare the output of both commands:
```bash
kubectl get nodes
kubectl get nodes.metrics.k8s.io
```
]
--
🤔 What are the second kind of nodes? How can we see what's really in them?
---
## Node vs NodeMetrics
- `nodes.metrics.k8s.io` (aka NodeMetrics) don't have fancy *printer columns*
- But we can look at the raw data (with `-o json` or `-o yaml`)
.exercise[
- Look at NodeMetrics objects with one of these commands:
```bash
kubectl get -o yaml nodes.metrics.k8s.io
kubectl get -o yaml NodeMetrics
```
]
--
💡 Alright, these are the live metrics (CPU, RAM) for our nodes.
---
## An easier way to consume metrics
- We might have seen these metrics before ... With an easier command!
--
.exercise[
- Display node metrics:
```bash
kubectl top nodes
```
- Check which API requests happen behind the scenes:
```bash
kubectl top nodes -v6
```
]
---
## Aggregation layer in practice
- We can write an API server to handle a subset of the Kubernetes API
- Then we can register that server by creating an APIService resource
.exercise[
- Check the definition used for the `metrics-server`:
```bash
kubectl describe apiservices v1beta1.metrics.k8s.io
```
]
- Group priority is used when multiple API groups provide similar kinds
(e.g. `nodes` and `nodes.metrics.k8s.io` as seen earlier)
---
## Authentication flow
- We have two Kubernetes API servers:
- "aggregator" (the main one; clients connect to it)
- "aggregated" (the one providing the extra API; aggregator connects to it)
- Aggregator deals with client authentication
- Aggregator authenticates with aggregated using mutual TLS
- Aggregator passes (/forwards/proxies/...) requests to aggregated
- Aggregated performs authorization by calling back aggregator
("can subject X perform action Y on resource Z?")
[This doc page](https://kubernetes.io/docs/tasks/extend-kubernetes/configure-aggregation-layer/#authentication-flow) has very nice swim lanes showing that flow.
---
## Discussion
- Aggregation layer is great for metrics
(fast-changing, ephemeral data, that would be outrageously bad for etcd)
- It *could* be a good fit to expose other REST APIs as a pass-thru
(but it's more common to see CRDs instead)
???
:EN:- The aggregation layer
:FR:- Étendre l'API avec le *aggregation layer*

View File

@@ -87,8 +87,3 @@
- Tunnels are also fine
(e.g. [k3s](https://k3s.io/) uses a tunnel to allow each node to contact the API server)
???
:EN:- Ensuring API server availability
:FR:- Assurer la disponibilité du serveur API

View File

@@ -1,179 +0,0 @@
# API server internals
- Understanding the internals of the API server is useful.red[¹]:
- when extending the Kubernetes API server (CRDs, webhooks...)
- when running Kubernetes at scale
- Let's dive into a bit of code!
.footnote[.red[¹]And by *useful*, we mean *strongly recommended or else...*]
---
## The main handler
- The API server parses its configuration, and builds a `GenericAPIServer`
- ... which contains an `APIServerHandler` ([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/server/handler.go#L37
))
- ... which contains a couple of `http.Handler` fields
- Requests go through:
- `FullhandlerChain` (a series of HTTP filters, see next slide)
- `Director` (switches the request to `GoRestfulContainer` or `NonGoRestfulMux`)
- `GoRestfulContainer` is for "normal" APIs; integrates nicely with OpenAPI
- `NonGoRestfulMux` is for everything else (e.g. proxy, delegation)
---
## The chain of handlers
- API requests go through a complex chain of filters ([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/server/config.go#L671))
(note when reading that code: requests start at the bottom and go up)
- This is where authentication, authorization, and admission happen
(as well as a few other things!)
- Let's review an arbitrary selection of some of these handlers!
*In the following slides, the handlers are in chronological order.*
*Note: handlers are nested; so they can act at the beginning and end of a request.*
---
## `WithPanicRecovery`
- Reminder about Go: there is no exception handling in Go; instead:
- functions typically return a composite `(SomeType, error)` type
- when things go really bad, the code can call `panic()`
- `panic()` can be caught with `recover()`
<br/>
(but this is almost never used like an exception handler!)
- The API server code is not supposed to `panic()`
- But just in case, we have that handler to prevent (some) crashes
---
## `WithRequestInfo` ([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/request/requestinfo.go#L163))
- Parse out essential information:
API group, version, namespace, resource, subresource, verb ...
- WithRequestInfo: parse out API group+version, Namespace, resource, subresource ...
- Maps HTTP verbs (GET, PUT, ...) to Kubernetes verbs (list, get, watch, ...)
---
class: extra-details
## HTTP verb mapping
- POST → create
- PUT → update
- PATCH → patch
- DELETE
<br/> → delete (if a resource name is specified)
<br/> → deletecollection (otherwise)
- GET, HEAD
<br/> → get (if a resource name is specified)
<br/> → list (otherwise)
<br/> → watch (if the `?watch=true` option is specified)
---
## `WithWaitGroup`,
- When we shutdown, tells clients (with in-flight requests) to retry
- only for "short" requests
- for long running requests, the client needs to do more
- Long running requests include `watch` verb, `proxy` sub-resource
(See also `WithTimeoutForNonLongRunningRequests`)
---
## AuthN and AuthZ
- `WithAuthentication`:
the request goes through a *chain* of authenticators
([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/filters/authentication.go#L38))
- WithAudit
- WithImpersonation: used for e.g. `kubectl ... --as another.user`
- WithPriorityAndFairness or WithMaxInFlightLimit
(`system:masters` can bypass these)
- WithAuthorization
---
## After all these handlers ...
- We get to the "director" mentioned above
- Api Groups get installed into the "gorestfulhandler"
([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/server/genericapiserver.go#L423))
- REST-ish resources are managed by various handlers
(in [this directory](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/))
- These files show us the code path for each type of request
---
class: extra-details
## Request code path
- [create.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/create.go):
decode to HubGroupVersion; admission; mutating admission; store
- [delete.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/delete.go):
validating admission only; deletion
- [get.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/get.go) (get, list):
directly fetch from rest storage abstraction
- [patch.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/patch.go):
admission; mutating admission; patch
- [update.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/update.go):
decode to HubGroupVersion; admission; mutating admission; store
- [watch.go](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/endpoints/handlers/watch.go):
similar to get.go, but with watch logic
(HubGroupVersion = in-memory, "canonical" version.)
???
:EN:- Kubernetes API server internals
:FR:- Fonctionnement interne du serveur API

View File

@@ -273,26 +273,6 @@ class: extra-details
---
class: extra-details
## Group-Version-Kind, or GVK
- A particular type will be identified by the combination of:
- the API group it belongs to (core, `apps`, `metrics.k8s.io`, ...)
- the version of this API group (`v1`, `v1beta1`, ...)
- the "Kind" itself (Pod, Role, Job, ...)
- "GVK" appears a lot in the API machinery code
- Conversions are possible between different versions and even between API groups
(e.g. when Deployments moved from `extensions` to `apps`)
---
## Update
- Let's update our namespace object
@@ -354,34 +334,6 @@ We demonstrated *update* and *watch* semantics.
---
class: extra-details
## Watch events
- `kubectl get --watch` shows changes
- If we add `--output-watch-events`, we can also see:
- the difference between ADDED and MODIFIED resources
- DELETED resources
.exercise[
- In one terminal, watch pods, displaying full events:
```bash
kubectl get pods --watch --output-watch-events
```
- In another, run a short-lived pod:
```bash
kubectl run pause --image=alpine --rm -ti --restart=Never -- sleep 5
```
]
---
# Other control plane components
- API server ✔️
@@ -429,8 +381,3 @@ class: extra-details
- if the pod has special constraints that can't be met
- if the scheduler is not running (!)
???
:EN:- Kubernetes architecture review
:FR:- Passage en revue de l'architecture de Kubernetes

View File

@@ -1,74 +1,6 @@
# Authentication and authorization
- In this section, we will:
- define authentication and authorization
- explain how they are implemented in Kubernetes
- talk about tokens, certificates, service accounts, RBAC ...
- But first: why do we need all this?
---
## The need for fine-grained security
- The Kubernetes API should only be available for identified users
- we don't want "guest access" (except in very rare scenarios)
- we don't want strangers to use our compute resources, delete our apps ...
- our keys and passwords should not be exposed to the public
- Users will often have different access rights
- cluster admin (similar to UNIX "root") can do everything
- developer might access specific resources, or a specific namespace
- supervision might have read only access to *most* resources
---
## Example: custom HTTP load balancer
- Let's imagine that we have a custom HTTP load balancer for multiple apps
- Each app has its own *Deployment* resource
- By default, the apps are "sleeping" and scaled to zero
- When a request comes in, the corresponding app gets woken up
- After some inactivity, the app is scaled down again
- This HTTP load balancer needs API access (to scale up/down)
- What if *a wild vulnerability appears*?
---
## Consequences of vulnerability
- If the HTTP load balancer has the same API access as we do:
*full cluster compromise (easy data leak, cryptojacking...)*
- If the HTTP load balancer has `update` permissions on the Deployments:
*defacement (easy), MITM / impersonation (medium to hard)*
- If the HTTP load balancer only has permission to `scale` the Deployments:
*denial-of-service*
- All these outcomes are bad, but some are worse than others
---
## Definitions
*And first, a little refresher!*
- Authentication = verifying the identity of a person
@@ -118,7 +50,7 @@
- [HTTP basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication)
(carrying user and password in an HTTP header; [deprecated since Kubernetes 1.19](https://github.com/kubernetes/kubernetes/pull/89069))
(carrying user and password in an HTTP header)
- Authentication proxy
@@ -215,7 +147,7 @@ class: extra-details
(if their key is compromised, or they leave the organization)
- Option 1: re-create a new CA and re-issue everyone's certificates
- Option 1: re-create a new CA and re-issue everyone's certificates
<br/>
→ Maybe OK if we only have a few users; no way otherwise
@@ -699,7 +631,7 @@ class: extra-details
- Let's look for these in existing ClusterRoleBindings:
```bash
kubectl get clusterrolebindings -o yaml |
kubectl get clusterrolebindings -o yaml |
grep -e kubernetes-admin -e system:masters
```
@@ -744,17 +676,3 @@ class: extra-details
- Both are available as standalone programs, or as plugins for `kubectl`
(`kubectl` plugins can be installed and managed with `krew`)
???
:EN:- Authentication and authorization in Kubernetes
:EN:- Authentication with tokens and certificates
:EN:- Authorization with RBAC (Role-Based Access Control)
:EN:- Restricting permissions with Service Accounts
:EN:- Working with Roles, Cluster Roles, Role Bindings, etc.
:FR:- Identification et droits d'accès dans Kubernetes
:FR:- Mécanismes d'identification par jetons et certificats
:FR:- Le modèle RBAC *(Role-Based Access Control)*
:FR:- Restreindre les permissions grâce aux *Service Accounts*
:FR:- Comprendre les *Roles*, *Cluster Roles*, *Role Bindings*, etc.

View File

@@ -1,212 +0,0 @@
# Executing batch jobs
- Deployments are great for stateless web apps
(as well as workers that keep running forever)
- Pods are great for one-off execution that we don't care about
(because they don't get automatically restarted if something goes wrong)
- Jobs are great for "long" background work
("long" being at least minutes our hours)
- CronJobs are great to schedule Jobs at regular intervals
(just like the classic UNIX `cron` daemon with its `crontab` files)
---
## Creating a Job
- A Job will create a Pod
- If the Pod fails, the Job will create another one
- The Job will keep trying until:
- either a Pod succeeds,
- or we hit the *backoff limit* of the Job (default=6)
.exercise[
- Create a Job that has a 50% chance of success:
```bash
kubectl create job flipcoin --image=alpine -- sh -c 'exit $(($RANDOM%2))'
```
]
---
## Our Job in action
- Our Job will create a Pod named `flipcoin-xxxxx`
- If the Pod succeeds, the Job stops
- If the Pod fails, the Job creates another Pod
.exercise[
- Check the status of the Pod(s) created by the Job:
```bash
kubectl get pods --selector=job-name=flipcoin
```
]
---
class: extra-details
## More advanced jobs
- We can specify a number of "completions" (default=1)
- This indicates how many times the Job must be executed
- We can specify the "parallelism" (default=1)
- This indicates how many Pods should be running in parallel
- These options cannot be specified with `kubectl create job`
(we have to write our own YAML manifest to use them)
---
## Scheduling periodic background work
- A Cron Job is a Job that will be executed at specific intervals
(the name comes from the traditional cronjobs executed by the UNIX crond)
- It requires a *schedule*, represented as five space-separated fields:
- minute [0,59]
- hour [0,23]
- day of the month [1,31]
- month of the year [1,12]
- day of the week ([0,6] with 0=Sunday)
- `*` means "all valid values"; `/N` means "every N"
- Example: `*/3 * * * *` means "every three minutes"
---
## Creating a Cron Job
- Let's create a simple job to be executed every three minutes
- Careful: make sure that the job terminates!
(The Cron Job will not hold if a previous job is still running)
.exercise[
- Create the Cron Job:
```bash
kubectl create cronjob every3mins --schedule="*/3 * * * *" \
--image=alpine -- sleep 10
```
- Check the resource that was created:
```bash
kubectl get cronjobs
```
]
---
## Cron Jobs in action
- At the specified schedule, the Cron Job will create a Job
- The Job will create a Pod
- The Job will make sure that the Pod completes
(re-creating another one if it fails, for instance if its node fails)
.exercise[
- Check the Jobs that are created:
```bash
kubectl get jobs
```
]
(It will take a few minutes before the first job is scheduled.)
---
class: extra-details
## Setting a time limit
- It is possible to set a time limit (or deadline) for a job
- This is done with the field `spec.activeDeadlineSeconds`
(by default, it is unlimited)
- When the job is older than this time limit, all its pods are terminated
- Note that there can also be a `spec.activeDeadlineSeconds` field in pods!
- They can be set independently, and have different effects:
- the deadline of the job will stop the entire job
- the deadline of the pod will only stop an individual pod
---
class: extra-details
## What about `kubectl run` before v1.18?
- Creating a Deployment:
`kubectl run`
- Creating a Pod:
`kubectl run --restart=Never`
- Creating a Job:
`kubectl run --restart=OnFailure`
- Creating a Cron Job:
`kubectl run --restart=OnFailure --schedule=...`
*Avoid using these forms, as they are deprecated since Kubernetes 1.18!*
---
## Beyond `kubectl create`
- As hinted earlier, `kubectl create` doesn't always expose all options
- can't express parallelism or completions of Jobs
- can't express healthchecks, resource limits
- `kubectl create` and `kubectl run` are *helpers* that generate YAML manifests
- If we write these manifests ourselves, we can use all features and options
- We'll see later how to do that!
???
:EN:- Running batch and cron jobs
:FR:- Tâches périodiques *(cron)* et traitement par lots *(batch)*

View File

@@ -257,8 +257,3 @@ This is the TLS bootstrap mechanism, step by step.
- [kubeadm token](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-token/) command
- [kubeadm join](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/) command (has details about [the join workflow](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/#join-workflow))
???
:EN:- Leveraging TLS bootstrap to join nodes
:FR:- Ajout de nœuds grâce au *TLS bootstrap*

View File

@@ -1,244 +0,0 @@
# cert-manager
- cert-manager¹ facilitates certificate signing through the Kubernetes API:
- we create a Certificate object (that's a CRD)
- cert-manager creates a private key
- it signs that key ...
- ... or interacts with a certificate authority to obtain the signature
- it stores the resulting key+cert in a Secret resource
- These Secret resources can be used in many places (Ingress, mTLS, ...)
.footnote[.red[¹]Always lower case, words separated with a dash; see the [style guide](https://cert-manager.io/docs/faq/style/_.)]
---
## Getting signatures
- cert-manager can use multiple *Issuers* (another CRD), including:
- self-signed
- cert-manager acting as a CA
- the [ACME protocol](https://en.wikipedia.org/wiki/Automated_Certificate_Management_Environment]) (notably used by Let's Encrypt)
- [HashiCorp Vault](https://www.vaultproject.io/)
- Multiple issuers can be configured simultaneously
- Issuers can be available in a single namespace, or in the whole cluster
(then we use the *ClusterIssuer* CRD)
---
## cert-manager in action
- We will install cert-manager
- We will create a ClusterIssuer to obtain certificates with Let's Encrypt
(this will involve setting up an Ingress Controller)
- We will create a Certificate request
- cert-manager will honor that request and create a TLS Secret
---
## Installing cert-manager
- It can be installed with a YAML manifest, or with Helm
.exercise[
- Create the namespace for cert-manager:
```bash
kubectl create ns cert-manager
```
- Add the Jetstack repository:
```bash
helm repo add jetstack https://charts.jetstack.io
```
- Install cert-manager:
```bash
helm install cert-manager jetstack/cert-manager \
--namespace cert-manager \
--set installCRDs=true
```
]
---
## ClusterIssuer manifest
```yaml
@@INCLUDE[k8s/cm-clusterissuer.yaml]
```
---
## Creating the ClusterIssuer
- The manifest shown on the previous slide is in @@LINK[k8s/cm-clusterissuer.yaml]
.exercise[
- Create the ClusterIssuer:
```bash
kubectl apply -f ~/container.training/k8s/cm-clusterissuer.yaml
```
]
---
## Certificate manifest
```yaml
@@INCLUDE[k8s/cm-certificate.yaml]
```
- The `name`, `secretName`, and `dnsNames` don't have to match
- There can be multiple `dnsNames`
- The `issuerRef` must match the ClusterIssuer that we created earlier
---
## Creating the Certificate
- The manifest shown on the previous slide is in @@LINK[k8s/cm-certificate.yaml]
.exercise[
- Edit the Certificate to update the domain name
(make sure to replace A.B.C.D with the IP address of one of your nodes!)
- Create the Certificate:
```bash
kubectl apply -f ~/container.training/k8s/cm-certificate.yaml
```
]
---
## What's happening?
- cert-manager will create:
- the secret key
- a Pod, a Service, and an Ingress to complete the HTTP challenge
- then it waits for the challenge to complete
.exercise[
- View the resources created by cert-manager:
```bash
kubectl get pods,services,ingresses \
--selector=acme.cert-manager.io/http01-solver=true
```
]
---
## HTTP challenge
- The CA (in this case, Let's Encrypt) will fetch a particular URL:
`http://<our-domain>/.well-known/acme-challenge/<token>`
.exercise[
- Check the *path* of the Ingress in particular:
```bash
kubectl describe ingress
--selector=acme.cert-manager.io/http01-solver=true
```
]
---
## What's missing ?
--
An Ingress Controller! 😅
.exercise[
- Install an Ingress Controller:
```bash
kubectl apply -f ~/container.training/k8s/traefik-v2.yaml
```
- Wait a little bit, and check that we now have a `kubernetes.io/tls` Secret:
```bash
kubectl get secrets
```
]
---
class: extra-details
## Using the secret
- For bonus points, try to use the secret in an Ingress!
- This is what the manifest would look like:
```yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: xyz
spec:
tls:
- secretName: xyz.A.B.C.D.nip.io
hosts:
- xyz.A.B.C.D.nip.io
rules:
...
```
---
class: extra-details
## Let's Encrypt and nip.io
- Let's Encrypt has [rate limits](https://letsencrypt.org/docs/rate-limits/) per domain
(the limits only apply to the production environment, not staging)
- There is a limit of 50 certificates per registered domain
- If we try to use the production environment, we will probably hit the limit
- It's fine to use the staging environment for these experiments
(our certs won't validate in a browser, but we can always check
the details of the cert to verify that it was issued by Let's Encrypt!)
???
:EN:- Obtaining certificates with cert-manager
:FR:- Obtenir des certificats avec cert-manager

View File

@@ -142,8 +142,3 @@ The list includes the following providers:
- [configuration](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) (mainly for OpenStack)
- [deployment](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/)
???
:EN:- The Cloud Controller Manager
:FR:- Le *Cloud Controller Manager*

View File

@@ -217,16 +217,15 @@ docker run --rm --net host -v $PWD:/vol \
## How can we remember all these flags?
- Older versions of kubeadm did add a healthcheck probe with all these flags
- Look at the static pod manifest for etcd
- That healthcheck probe was calling `etcdctl` with all the right flags
(in `/etc/kubernetes/manifests`)
- With recent versions of kubeadm, we're on our own!
- The healthcheck probe is calling `etcdctl` with all the right flags
😉👍✌️
- Exercise: write the YAML for a batch job to perform the backup
(how will you access the key and certificate required to connect?)
---
## Restoring an etcd snapshot
@@ -365,8 +364,3 @@ docker run --rm --net host -v $PWD:/vol \
- [bivac](https://github.com/camptocamp/bivac)
Backup Interface for Volumes Attached to Containers
???
:EN:- Backing up clusters
:FR:- Politiques de sauvegarde

View File

@@ -165,12 +165,3 @@ class: extra-details
- Security advantage (stronger isolation between pods)
Check [this blog post](http://jpetazzo.github.io/2019/02/13/running-kubernetes-without-nodes-with-kiyot/) for more details.
???
:EN:- What happens when the cluster is at, or over, capacity
:EN:- Cluster sizing and scaling
:FR:- Ce qui se passe quand il n'y a plus assez de ressources
:FR:- Dimensionner et redimensionner ses clusters

Some files were not shown because too many files have changed in this diff Show More