Compare commits

..

2 Commits

Author SHA1 Message Date
Jérôme Petazzoni
fc0e6e762f ZOS week 2 2021-11-05 14:19:36 +01:00
Jérôme Petazzoni
27a5fafdbc Prepare ZOS specific content 2021-11-05 14:19:36 +01:00
248 changed files with 4847 additions and 9844 deletions

View File

@@ -1,3 +1,2 @@
hostname frr
ip nht resolve-via-default
log stdout

View File

@@ -2,36 +2,30 @@ version: "3"
services:
bgpd:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel --no_zebra
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
restart: always
zebra:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
cap_add:
- NET_ADMIN
- SYS_ADMIN
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
restart: always
vtysh:
image: frrouting/frr:v8.2.2
image: ajones17/frr:662
volumes:
- ./conf:/etc/frr
- ./run:/var/run/frr
network_mode: host
entrypoint: vtysh
entrypoint: vtysh -c "show ip bgp"
chmod:
image: alpine

View File

@@ -1,72 +1,14 @@
# (1) Setting up a registry, and telling Tilt to use it.
# Tilt needs a registry to store images.
# The following manifest defines a Deployment to run a basic Docker registry,
# and a NodePort Service to access it. Using a NodePort means that we don't
# need to obtain a TLS certificate, because we will be accessing the registry
# through localhost.
k8s_yaml('../k8s/tilt-registry.yaml')
# Tell Tilt to use the registry that we just deployed instead of whatever
# is defined in our Kubernetes resources. Tilt will patch image names to
# use our registry.
default_registry('localhost:30555')
# Create a port forward so that we can access the registry from our local
# environment, too. Note that if you run Tilt directly from a Kubernetes node
# (which is not typical, but might happen in some lab/training environments)
# the following might cause an error because port 30555 is already taken.
k8s_resource(workload='tilt-registry', port_forwards='30555:5000')
# (2) Telling Tilt how to build and run our app.
# The following two lines will use the kubectl-build plugin
# to leverage buildkit and build the images in our Kubernetes
# cluster. This is not enabled by default, because it requires
# the plugin to be installed.
# See https://github.com/vmware-tanzu/buildkit-cli-for-kubectl
# for more information about this plugin.
#load('ext://kubectl_build', 'kubectl_build')
#docker_build = kubectl_build
# Our Kubernetes manifests use images 'dockercoins/...' so we tell Tilt
# how each of these images should be built. The first argument is the name
# of the image, the second argument is the directory containing the build
# context (i.e. the Dockerfile to build the image).
docker_build('dockercoins/hasher', 'hasher')
docker_build('dockercoins/rng', 'rng')
docker_build('dockercoins/webui', 'webui')
docker_build('dockercoins/worker', 'worker')
# The following manifests defines five Deployments and four Services for
# our application.
k8s_yaml('../k8s/dockercoins.yaml')
# (3) Finishing touches.
# Uncomment the following line to let tilt run with the default kubeadm cluster-admin context.
#allow_k8s_contexts('kubernetes-admin@kubernetes')
# The following line lets Tilt run with the default kubeadm cluster-admin context.
allow_k8s_contexts('kubernetes-admin@kubernetes')
# Note: the whole section below (to set up ngrok tunnels) is disabled,
# because ngrok now requires to set up an account to serve HTML
# content. So we can still use ngrok for e.g. webhooks and "raw" APIs,
# but not to serve web pages like the Tilt UI.
# # This will run an ngrok tunnel to expose Tilt to the outside world.
# # This is intended to be used when Tilt runs on a remote machine.
# local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
# # This will wait until the ngrok tunnel is up, and show its URL to the user.
# # We send the output to /dev/tty so that it doesn't get intercepted by
# # Tilt, and gets displayed to the user's terminal instead.
# # Note: this assumes that the ngrok instance will be running on port 4040.
# # If you have other ngrok instances running on the machine, this might not work.
# local_resource(name='ngrok:showurl', cmd='''
# while sleep 1; do
# TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
# [ "$TUNNELS" ] && break
# done
# printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
# '''
# )
# While we're here: if you're controlling a remote cluster, uncomment that line.
# It will create a port forward so that you can access the remote registry.
#k8s_resource(workload='registry', port_forwards='30555:5000')

View File

@@ -1,6 +1,6 @@
FROM node:4-slim
RUN npm install express
RUN npm install redis@3
RUN npm install redis
COPY files/ /files/
COPY webui.js /
CMD ["node", "webui.js"]

View File

@@ -1,16 +0,0 @@
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
kind: PodSecurityConfiguration
defaults:
enforce: baseline
audit: baseline
warn: baseline
exemptions:
usernames:
- cluster-admin
namespaces:
- kube-system

View File

@@ -3,12 +3,6 @@
# - no actual persistence
# - scaling down to 1 will break the cluster
# - pods may be colocated
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
@@ -34,6 +28,11 @@ subjects:
name: consul
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
@@ -62,7 +61,7 @@ spec:
serviceAccountName: consul
containers:
- name: consul
image: "consul:1.11"
image: "consul:1.8"
env:
- name: NAMESPACE
valueFrom:

View File

@@ -2,12 +2,6 @@
# There is still no actual persistence, but:
# - podAntiaffinity prevents pod colocation
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
@@ -33,6 +27,11 @@ subjects:
name: consul
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
@@ -69,7 +68,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.11"
image: "consul:1.8"
env:
- name: NAMESPACE
valueFrom:

View File

@@ -1,11 +1,5 @@
# Even better Consul cluster.
# That one uses a volumeClaimTemplate to achieve true persistence.
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
@@ -31,6 +25,11 @@ subjects:
name: consul
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
---
apiVersion: v1
kind: Service
metadata:
name: consul
@@ -76,7 +75,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.11"
image: "consul:1.8"
volumeMounts:
- name: data
mountPath: /consul/data

View File

@@ -9,273 +9,377 @@ metadata:
spec: {}
status: {}
---
---
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/secret.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-dashboard-certs
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-csrf
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-key-holder
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/configmap.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
data:
---
apiVersion: rbac.authorization.k8s.io/v1
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/role.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# Source: kubernetes-dashboard/templates/rolebinding.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/service.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: http
port: 443
targetPort: http
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
type: NodePort
ports:
- port: 443
targetPort: http
name: http
selector:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/deployment.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
containers:
- args:
- --namespace=kubernetes-dashboard
- --sidecar-host=http://127.0.0.1:8000
- --enable-skip-login
- --enable-insecure-login
image: kubernetesui/dashboard:v2.5.0
- name: kubernetes-dashboard
image: "kubernetesui/dashboard:v2.3.1"
imagePullPolicy: IfNotPresent
args:
- --namespace=kubernetes-dashboard
- --metrics-provider=none
- --enable-skip-login
- --enable-insecure-login
ports:
- name: http
containerPort: 9090
protocol: TCP
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 9090
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 9090
name: http
protocol: TCP
resources:
limits:
cpu: 2
@@ -288,42 +392,102 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
- name: tmp-volume
emptyDir: {}
---
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/ingress.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/pdb.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/psp.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding

View File

@@ -9,272 +9,376 @@ metadata:
spec: {}
status: {}
---
---
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/secret.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-dashboard-certs
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-csrf
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-key-holder
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/configmap.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
data:
---
apiVersion: rbac.authorization.k8s.io/v1
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/role.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# Source: kubernetes-dashboard/templates/rolebinding.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/service.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: https
port: 443
targetPort: https
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
type: ClusterIP
ports:
- port: 443
targetPort: https
name: https
selector:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/deployment.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.5.0
- name: kubernetes-dashboard
image: "kubernetesui/dashboard:v2.3.1"
imagePullPolicy: IfNotPresent
args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --metrics-provider=none
ports:
- name: https
containerPort: 8443
protocol: TCP
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 2
@@ -287,39 +391,99 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
- name: tmp-volume
emptyDir: {}
---
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/ingress.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/pdb.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/psp.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -9,272 +9,376 @@ metadata:
spec: {}
status: {}
---
---
# Source: kubernetes-dashboard/templates/serviceaccount.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/secret.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-dashboard-certs
apiVersion: v1
kind: Secret
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-csrf
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/secret.yaml
# kubernetes-dashboard-key-holder
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
# Source: kubernetes-dashboard/templates/configmap.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
data: null
kind: ConfigMap
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
data:
---
apiVersion: rbac.authorization.k8s.io/v1
# Source: kubernetes-dashboard/templates/clusterrole-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations: null
name: "kubernetes-dashboard-metrics"
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/role.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-key-holder
- kubernetes-dashboard-certs
- kubernetes-dashboard-csrf
resources:
- secrets
verbs:
- get
- update
- delete
- apiGroups:
- ""
resourceNames:
- kubernetes-dashboard-settings
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resourceNames:
- heapster
- dashboard-metrics-scraper
resources:
- services
verbs:
- proxy
- apiGroups:
- ""
resourceNames:
- heapster
- 'http:heapster:'
- 'https:heapster:'
- dashboard-metrics-scraper
- http:dashboard-metrics-scraper
resources:
- services/proxy
verbs:
- get
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# Source: kubernetes-dashboard/templates/rolebinding.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations: null
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/service.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- name: https
port: 443
targetPort: https
selector:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
type: NodePort
ports:
- port: 443
targetPort: https
name: https
selector:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
---
# Source: kubernetes-dashboard/templates/deployment.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
labels:
app.kubernetes.io/name: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/name: kubernetes-dashboard
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/component: kubernetes-dashboard
template:
metadata:
annotations: null
labels:
app.kubernetes.io/component: kubernetes-dashboard
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
helm.sh/chart: kubernetes-dashboard-5.0.2
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/version: "2.3.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.5.0
- name: kubernetes-dashboard
image: "kubernetesui/dashboard:v2.3.1"
imagePullPolicy: IfNotPresent
args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --metrics-provider=none
ports:
- name: https
containerPort: 8443
protocol: TCP
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
scheme: HTTPS
initialDelaySeconds: 30
timeoutSeconds: 30
name: kubernetes-dashboard
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 2
@@ -287,42 +391,102 @@ spec:
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /certs
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 30
name: dashboard-metrics-scraper
ports:
- containerPort: 8000
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 2001
runAsUser: 1001
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubernetes-dashboard
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- emptyDir: {}
name: tmp-volume
- name: tmp-volume
emptyDir: {}
---
# Source: kubernetes-dashboard/templates/clusterrole-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/ingress.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/networkpolicy.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/pdb.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
# Source: kubernetes-dashboard/templates/psp.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding

View File

@@ -1,16 +1,18 @@
global
daemon
maxconn 256
defaults
mode tcp
timeout connect 5s
timeout client 50s
timeout server 50s
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
listen very-basic-load-balancer
frontend the-frontend
bind *:80
server blue color.blue.svc:80
server green color.green.svc:80
default_backend the-backend
backend the-backend
server google.com-80 google.com:80 maxconn 32 check
server ibm.fr-80 ibm.fr:80 maxconn 32 check
# Note: the services above must exist,
# otherwise HAproxy won't start.

View File

@@ -1,28 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
number: 80
path: /
pathType: Prefix

View File

@@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[0].name}}"
operator: Equals
value: http
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: http
operator: In
value: "{{request.object.spec.ports[*].name}}"
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,34 +0,0 @@
# Note: this policy uses the operator "AnyIn", which was introduced in Kyverno 1.6.
# (This policy won't work with Kyverno 1.5!)
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[*].port}}"
operator: AnyIn
value: [ 80 ]
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,37 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: ingress-domain-name
spec:
rules:
- name: create-ingress
context:
- name: configmap
configMap:
name: ingress-domain-name
namespace: "{{request.object.metadata.namespace}}"
match:
resources:
kinds:
- Service
preconditions:
- key: "{{request.object.spec.ports[0].name}}"
operator: Equals
value: http
generate:
kind: Ingress
name: "{{request.object.metadata.name}}"
namespace: "{{request.object.metadata.namespace}}"
data:
spec:
rules:
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.{{configmap.data.domain}}"
http:
paths:
- backend:
service:
name: "{{request.object.metadata.name}}"
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,20 +0,0 @@
kind: Pod
apiVersion: v1
metadata:
generateName: mounter-
labels:
container.training/mounter: ""
spec:
volumes:
- name: pvc
persistentVolumeClaim:
claimName: my-pvc-XYZ45
containers:
- name: mounter
image: alpine
stdin: true
tty: true
volumeMounts:
- name: pvc
mountPath: /pvc
workingDir: /pvc

View File

@@ -3,7 +3,8 @@ apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector: {}
podSelector:
matchLabels:
ingress:
- from:
- podSelector: {}

View File

@@ -1,20 +0,0 @@
kind: PersistentVolume
apiVersion: v1
metadata:
generateName: my-pv-
labels:
container.training/pv: ""
spec:
accessModes:
- ReadWriteOnce
- ReadWriteMany
capacity:
storage: 1G
hostPath:
path: /tmp/my-pv
#storageClassName: my-sc
#claimRef:
# kind: PersistentVolumeClaim
# apiVersion: v1
# namespace: default
# name: my-pvc-XYZ45

View File

@@ -1,13 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
generateName: my-pvc-
labels:
container.training/pvc: ""
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1G
#storageClassName: my-sc

View File

@@ -1,147 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: blue
labels:
app: rainbow
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rainbow
color: blue
name: color
namespace: blue
spec:
selector:
matchLabels:
app: rainbow
color: blue
template:
metadata:
labels:
app: rainbow
color: blue
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rainbow
color: blue
name: color
namespace: blue
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: rainbow
color: blue
type: ClusterIP
---
apiVersion: v1
kind: Namespace
metadata:
name: green
labels:
app: rainbow
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rainbow
color: green
name: color
namespace: green
spec:
selector:
matchLabels:
app: rainbow
color: green
template:
metadata:
labels:
app: rainbow
color: green
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rainbow
color: green
name: color
namespace: green
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: rainbow
color: green
type: ClusterIP
---
apiVersion: v1
kind: Namespace
metadata:
name: red
labels:
app: rainbow
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rainbow
color: red
name: color
namespace: red
spec:
selector:
matchLabels:
app: rainbow
color: red
template:
metadata:
labels:
app: rainbow
color: red
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rainbow
color: red
name: color
namespace: red
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: rainbow
color: red
type: ClusterIP

View File

@@ -35,9 +35,6 @@ spec:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
- name: admin
containerPort: 8080
hostPort: 8080
@@ -103,12 +100,3 @@ subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
---
kind: IngressClass
apiVersion: networking.k8s.io/v1
metadata:
name: traefik
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
spec:
controller: traefik.io/ingress-controller

View File

@@ -5,34 +5,25 @@ banner() {
echo "#"
}
create_namespace() {
namespace() {
# 'helm template --namespace ... --create-namespace'
# doesn't create the namespace, so we need to create it.
# https://github.com/helm/helm/issues/9813
echo ---
kubectl create namespace kubernetes-dashboard \
-o yaml --dry-run=client
echo ---
}
add_namespace() {
# 'helm template --namespace ...' doesn't add namespace information,
# so we do it with this convenient filter instead.
# https://github.com/helm/helm/issues/10737
kubectl create -f- -o yaml --dry-run=client --namespace kubernetes-dashboard
}
(
banner
create_namespace
namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set "extraArgs={--enable-skip-login,--enable-insecure-login}" \
--set metricsScraper.enabled=true \
--set protocolHttp=true \
--set service.type=NodePort \
| add_namespace
#
echo ---
kubectl create clusterrolebinding kubernetes-dashboard:insecure \
--clusterrole=cluster-admin \
@@ -43,23 +34,21 @@ add_namespace() {
(
banner
create_namespace
namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set metricsScraper.enabled=true \
| add_namespace
#
) > dashboard-recommended.yaml
(
banner
create_namespace
namespace
helm template kubernetes-dashboard kubernetes-dashboard \
--repo https://kubernetes.github.io/dashboard/ \
--create-namespace --namespace kubernetes-dashboard \
--set metricsScraper.enabled=true \
--set service.type=NodePort \
| add_namespace
#
echo ---
kubectl create clusterrolebinding kubernetes-dashboard:cluster-admin \
--clusterrole=cluster-admin \

View File

@@ -1,107 +1,17 @@
⚠️ This is work in progress. The UX needs to be improved,
and the docs could be better.
This directory contains a Terraform configuration to deploy
a bunch of Kubernetes clusters on various cloud providers,
using their respective managed Kubernetes products.
a bunch of Kubernetes clusters on various cloud providers, using their respective managed Kubernetes products.
## With shell wrapper
This is the recommended use. It makes it easy to start N clusters
on any provider. It will create a directory with a name like
`tag-YYYY-MM-DD-HH-MM-SS-SEED-PROVIDER`, copy the Terraform configuration
to that directory, then create the clusters using that configuration.
1. One-time setup: configure provider authentication for the provider(s) that you wish to use.
- Digital Ocean:
```bash
doctl auth init
```
- Google Cloud Platform: you will need to create a project named `prepare-tf`
and enable the relevant APIs for this project (sorry, if you're new to GCP,
this sounds vague; but if you're familiar with it you know what to do; if you
want to change the project name you can edit the Terraform configuration)
- Linode:
```bash
linode-cli configure
```
- Oracle Cloud: FIXME
(set up `oci` through the `oci-cli` Python package)
- Scaleway: run `scw init`
2. Optional: set number of clusters, cluster size, and region.
By default, 1 cluster will be configured, with 2 nodes, and auto-scaling up to 5 nodes.
If you want, you can override these parameters, with the following variables.
```bash
export TF_VAR_how_many_clusters=5
export TF_VAR_min_nodes_per_pool=2
export TF_VAR_max_nodes_per_pool=4
export TF_VAR_location=xxx
```
The `location` variable is optional. Each provider should have a default value.
The value of the `location` variable is provider-specific. Examples:
| Provider | Example value | How to see possible values
|---------------|-------------------|---------------------------
| Digital Ocean | `ams3` | `doctl compute region list`
| Google Cloud | `europe-north1-a` | `gcloud compute zones list`
| Linode | `eu-central` | `linode-cli regions list`
| Oracle Cloud | `eu-stockholm-1` | `oci iam region list`
You can also specify multiple locations, and then they will be
used in round-robin fashion.
For example, with Google Cloud, since the default quotas are very
low (my account is limited to 8 public IP addresses per zone, and
my requests to increase that quota were denied) you can do the
following:
```bash
export TF_VAR_location=$(gcloud compute zones list --format=json | jq -r .[].name | grep ^europe)
```
Then when you apply, clusters will be created across all available
zones in Europe. (When I write this, there are 20+ zones in Europe,
so even with my quota, I can create 40 clusters.)
3. Run!
```bash
./run.sh <providername>
```
(If you don't specify a provider name, it will list available providers.)
4. Shutting down
Go to the directory that was created by the previous step (`tag-YYYY-MM...`)
and run `terraform destroy`.
You can also run `./clean.sh` which will destroy ALL clusters deployed by the previous run script.
## Without shell wrapper
Expert mode.
Useful to run steps sperarately, and/or when working on the Terraform configurations.
To use it:
1. Select the provider you wish to use.
Go to the `source` directory and edit `main.tf`.
Change the `source` attribute of the `module "clusters"` section.
Check the content of the `modules` directory to see available choices.
```bash
vim main.tf
```
2. Initialize the provider.
```bash
@@ -110,20 +20,24 @@ terraform init
3. Configure provider authentication.
See steps above, and add the following extra steps:
- Digital Coean:
```bash
export DIGITALOCEAN_ACCESS_TOKEN=$(grep ^access-token ~/.config/doctl/config.yaml | cut -d: -f2 | tr -d " ")
```
- Linode:
```bash
export LINODE_TOKEN=$(grep ^token ~/.config/linode-cli | cut -d= -f2 | tr -d " ")
```
- Digital Ocean: `export DIGITALOCEAN_ACCESS_TOKEN=...`
(check `~/.config/doctl/config.yaml` for the token)
- Linode: `export LINODE_TOKEN=...`
(check `~/.config/linode-cli` for the token)
- Oracle Cloud: it should use `~/.oci/config`
- Scaleway: run `scw init`
4. Decide how many clusters and how many nodes per clusters you want.
```bash
export TF_VAR_how_many_clusters=5
export TF_VAR_min_nodes_per_pool=2
# Optional (will enable autoscaler when available)
export TF_VAR_max_nodes_per_pool=4
# Optional (will only work on some providers)
export TF_VAR_enable_arm_pool=true
```
5. Provision clusters.
```bash
@@ -132,7 +46,7 @@ terraform apply
6. Perform second stage provisioning.
This will install an SSH server on the clusters.
This will install a SSH server on the clusters.
```bash
cd stage2
@@ -158,5 +72,5 @@ terraform destroy
9. Clean up stage2.
```bash
rm stage2/terraform.tfstate*
rm stage/terraform.tfstate*
```

View File

@@ -1,9 +0,0 @@
#!/bin/sh
export LINODE_TOKEN=$(grep ^token ~/.config/linode-cli | cut -d= -f2 | tr -d " ")
export DIGITALOCEAN_ACCESS_TOKEN=$(grep ^access-token ~/.config/doctl/config.yaml | cut -d: -f2 | tr -d " ")
for T in tag-*; do
(
cd $T
terraform apply -destroy -auto-approve && mv ../$T ../deleted$T
)
done

16
prepare-tf/locals.tf Normal file
View File

@@ -0,0 +1,16 @@
resource "random_string" "_" {
length = 5
special = false
upper = false
}
resource "time_static" "_" {}
locals {
tag = format("tf-%s-%s", formatdate("YYYY-MM-DD-hh-mm", time_static._.rfc3339), random_string._.result)
# Common tags to be assigned to all resources
common_tags = [
"created-by=terraform",
"tag=${local.tag}"
]
}

View File

@@ -1,5 +1,5 @@
module "clusters" {
source = "./modules/PROVIDER"
source = "./modules/linode"
for_each = local.clusters
cluster_name = each.value.cluster_name
min_nodes_per_pool = var.min_nodes_per_pool
@@ -7,24 +7,22 @@ module "clusters" {
enable_arm_pool = var.enable_arm_pool
node_size = var.node_size
common_tags = local.common_tags
location = each.value.location
}
locals {
clusters = {
for i in range(101, 101 + var.how_many_clusters) :
i => {
cluster_name = format("%s-%03d", local.tag, i)
kubeconfig_path = format("./stage2/kubeconfig.%03d", i)
cluster_name = format("%s-%03d", local.tag, i)
kubeconfig_path = format("./stage2/kubeconfig.%03d", i)
#dashdash_kubeconfig = format("--kubeconfig=./stage2/kubeconfig.%03d", i)
externalips_path = format("./stage2/externalips.%03d", i)
flags_path = format("./stage2/flags.%03d", i)
location = local.locations[i % length(local.locations)]
}
}
}
resource "local_file" "stage2" {
filename = "./stage2/main.tf"
filename = "./stage2/main.tf"
file_permission = "0644"
content = templatefile(
"./stage2.tmpl",
@@ -32,15 +30,6 @@ resource "local_file" "stage2" {
)
}
resource "local_file" "flags" {
for_each = local.clusters
filename = each.value.flags_path
file_permission = "0600"
content = <<-EOT
has_metrics_server: ${module.clusters[each.key].has_metrics_server}
EOT
}
resource "local_file" "kubeconfig" {
for_each = local.clusters
filename = each.value.kubeconfig_path
@@ -70,8 +59,8 @@ resource "null_resource" "wait_for_nodes" {
}
data "external" "externalips" {
for_each = local.clusters
depends_on = [null_resource.wait_for_nodes]
for_each = local.clusters
depends_on = [ null_resource.wait_for_nodes ]
program = [
"sh",
"-c",

View File

@@ -1,13 +1,12 @@
resource "digitalocean_kubernetes_cluster" "_" {
name = var.cluster_name
tags = var.common_tags
# Region is mandatory, so let's provide a default value.
region = var.location != null ? var.location : "nyc1"
name = var.cluster_name
tags = local.common_tags
region = var.region
version = var.k8s_version
node_pool {
name = "x86"
tags = var.common_tags
name = "dok-x86"
tags = local.common_tags
size = local.node_type
auto_scale = true
min_nodes = var.min_nodes_per_pool

View File

@@ -5,7 +5,3 @@ output "kubeconfig" {
output "cluster_id" {
value = digitalocean_kubernetes_cluster._.id
}
output "has_metrics_server" {
value = false
}

View File

@@ -8,6 +8,10 @@ variable "common_tags" {
default = []
}
locals {
common_tags = [for tag in var.common_tags : replace(tag, "=", "-")]
}
variable "node_size" {
type = string
default = "M"
@@ -42,16 +46,14 @@ locals {
node_type = var.node_types[var.node_size]
}
# To view supported regions, run:
# doctl compute region list
variable "location" {
variable "region" {
type = string
default = null
default = "ams3"
}
# To view supported versions, run:
# doctl kubernetes options versions -o json | jq -r .[].slug
variable "k8s_version" {
type = string
default = "1.22.8-do.1"
default = "1.21.3-do.0"
}

View File

@@ -1,8 +1,7 @@
resource "linode_lke_cluster" "_" {
label = var.cluster_name
tags = var.common_tags
# "region" is mandatory, so let's provide a default value if none was given.
region = var.location != null ? var.location : "eu-central"
label = var.cluster_name
tags = var.common_tags
region = var.region
k8s_version = var.k8s_version
pool {

View File

@@ -5,7 +5,3 @@ output "kubeconfig" {
output "cluster_id" {
value = linode_lke_cluster._.id
}
output "has_metrics_server" {
value = false
}

View File

@@ -42,16 +42,16 @@ locals {
node_type = var.node_types[var.node_size]
}
# To view supported regions, run:
# To view supported versions, run:
# linode-cli regions list
variable "location" {
variable "region" {
type = string
default = null
default = "us-east"
}
# To view supported versions, run:
# linode-cli lke versions-list --json | jq -r .[].id
variable "k8s_version" {
type = string
default = "1.22"
default = "1.21"
}

View File

@@ -1,7 +1,6 @@
resource "oci_identity_compartment" "_" {
name = var.cluster_name
description = var.cluster_name
enable_delete = true
name = var.cluster_name
description = var.cluster_name
}
locals {

View File

@@ -9,7 +9,3 @@ output "kubeconfig" {
output "cluster_id" {
value = oci_containerengine_cluster._.id
}
output "has_metrics_server" {
value = false
}

View File

@@ -70,13 +70,6 @@ locals {
node_type = var.node_types[var.node_size]
}
# To view supported regions, run:
# oci iam region list | jq .data[].name
variable "location" {
type = string
default = null
}
# To view supported versions, run:
# oci ce cluster-options get --cluster-option-id all | jq -r '.data["kubernetes-versions"][]'
variable "k8s_version" {

View File

@@ -1,15 +1,13 @@
resource "scaleway_k8s_cluster" "_" {
name = var.cluster_name
region = var.location
tags = var.common_tags
version = var.k8s_version
cni = var.cni
delete_additional_resources = true
name = var.cluster_name
tags = var.common_tags
version = var.k8s_version
cni = var.cni
}
resource "scaleway_k8s_pool" "_" {
cluster_id = scaleway_k8s_cluster._.id
name = "x86"
name = "scw-x86"
tags = var.common_tags
node_type = local.node_type
size = var.min_nodes_per_pool

View File

@@ -5,7 +5,3 @@ output "kubeconfig" {
output "cluster_id" {
value = scaleway_k8s_cluster._.id
}
output "has_metrics_server" {
value = sort([var.k8s_version, "1.22"])[0] == "1.22"
}

View File

@@ -47,12 +47,7 @@ variable "cni" {
default = "cilium"
}
variable "location" {
type = string
default = null
}
# To view supported versions, run:
# See supported versions with:
# scw k8s version list -o json | jq -r .[].name
variable "k8s_version" {
type = string

View File

@@ -1,49 +0,0 @@
#!/bin/sh
set -e
TIME=$(which time)
PROVIDER=$1
[ "$PROVIDER" ] || {
echo "Please specify a provider as first argument, or 'ALL' for parallel mode."
echo "Available providers:"
ls -1 source/modules
exit 1
}
[ "$TAG" ] || {
TIMESTAMP=$(date +%Y-%m-%d-%H-%M-%S)
RANDOMTAG=$(base64 /dev/urandom | tr A-Z a-z | tr -d /+ | head -c5)
export TAG=tag-$TIMESTAMP-$RANDOMTAG
}
[ "$PROVIDER" = "ALL" ] && {
for PROVIDER in $(ls -1 source/modules); do
$TERMINAL -T $TAG-$PROVIDER -e sh -c "
export TAG=$TAG-$PROVIDER
$0 $PROVIDER
cd $TAG-$PROVIDER
bash
" &
done
exit 0
}
[ -d "source/modules/$PROVIDER" ] || {
echo "Provider '$PROVIDER' not found."
echo "Available providers:"
ls -1 source/modules
exit 1
}
export LINODE_TOKEN=$(grep ^token ~/.config/linode-cli | cut -d= -f2 | tr -d " ")
export DIGITALOCEAN_ACCESS_TOKEN=$(grep ^access-token ~/.config/doctl/config.yaml | cut -d: -f2 | tr -d " ")
cp -a source $TAG
cd $TAG
cp -r modules/$PROVIDER modules/PROVIDER
$TIME -o time.1.init terraform init
$TIME -o time.2.stage1 terraform apply -auto-approve
cd stage2
$TIME -o ../time.3.init terraform init
$TIME -o ../time.4.stage2 terraform apply -auto-approve

View File

@@ -1,19 +0,0 @@
resource "random_string" "_" {
length = 4
number = false
special = false
upper = false
}
resource "time_static" "_" {}
locals {
timestamp = formatdate("YYYY-MM-DD-hh-mm", time_static._.rfc3339)
tag = random_string._.result
# Common tags to be assigned to all resources
common_tags = [
"created-by-terraform",
format("created-at-%s", local.timestamp),
format("created-for-%s", local.tag)
]
}

View File

@@ -1,65 +0,0 @@
resource "google_container_cluster" "_" {
name = var.cluster_name
project = local.project
location = local.location
min_master_version = var.k8s_version
# To deploy private clusters, uncomment the section below,
# and uncomment the block in network.tf.
# Private clusters require extra resources (Cloud NAT,
# router, network, subnet) and the quota for some of these
# resources is fairly low on GCP; so if you want to deploy
# a lot of private clusters (more than 10), you can use these
# blocks as a base but you will probably have to refactor
# things quite a bit (you will at least need to define a single
# shared router and use it across all the clusters).
/*
network = google_compute_network._.name
subnetwork = google_compute_subnetwork._.name
private_cluster_config {
enable_private_nodes = true
# This must be set to "false".
# (Otherwise, access to the public endpoint is disabled.)
enable_private_endpoint = false
# This must be set to a /28.
# I think it shouldn't collide with the pod network subnet.
master_ipv4_cidr_block = "10.255.255.0/28"
}
# Private clusters require "VPC_NATIVE" networking mode
# (as opposed to the legacy "ROUTES").
networking_mode = "VPC_NATIVE"
# ip_allocation_policy is required for VPC_NATIVE clusters.
ip_allocation_policy {
# This is the block that will be used for pods.
cluster_ipv4_cidr_block = "10.0.0.0/12"
# The services block is optional
# (GKE will pick one automatically).
#services_ipv4_cidr_block = ""
}
*/
node_pool {
name = "x86"
node_config {
tags = var.common_tags
machine_type = local.node_type
}
initial_node_count = var.min_nodes_per_pool
autoscaling {
min_node_count = var.min_nodes_per_pool
max_node_count = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
}
}
# This is not strictly necessary.
# We'll see if we end up using it.
# (If it is removed, make sure to also remove the corresponding
# key+cert variables from outputs.tf!)
master_auth {
client_certificate_config {
issue_client_certificate = true
}
}
}

View File

@@ -1,38 +0,0 @@
/*
resource "google_compute_network" "_" {
name = var.cluster_name
project = local.project
# The default is to create subnets automatically.
# However, this creates one subnet per zone in all regions,
# which causes a quick exhaustion of the subnet quota.
auto_create_subnetworks = false
}
resource "google_compute_subnetwork" "_" {
name = var.cluster_name
ip_cidr_range = "10.254.0.0/16"
region = local.region
network = google_compute_network._.id
project = local.project
}
resource "google_compute_router" "_" {
name = var.cluster_name
region = local.region
network = google_compute_network._.name
project = local.project
}
resource "google_compute_router_nat" "_" {
name = var.cluster_name
router = google_compute_router._.name
region = local.region
project = local.project
# Everyone in the network is allowed to NAT out.
# (We would change this if we only wanted to allow specific subnets to NAT out.)
source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
# Pick NAT addresses automatically.
# (We would change this if we wanted to use specific addresses to NAT out.)
nat_ip_allocate_option = "AUTO_ONLY"
}
*/

View File

@@ -1,35 +0,0 @@
data "google_client_config" "_" {}
output "kubeconfig" {
value = <<-EOT
apiVersion: v1
kind: Config
current-context: ${google_container_cluster._.name}
clusters:
- name: ${google_container_cluster._.name}
cluster:
server: https://${google_container_cluster._.endpoint}
certificate-authority-data: ${google_container_cluster._.master_auth[0].cluster_ca_certificate}
contexts:
- name: ${google_container_cluster._.name}
context:
cluster: ${google_container_cluster._.name}
user: client-token
users:
- name: client-cert
user:
client-key-data: ${google_container_cluster._.master_auth[0].client_key}
client-certificate-data: ${google_container_cluster._.master_auth[0].client_certificate}
- name: client-token
user:
token: ${data.google_client_config._.access_token}
EOT
}
output "cluster_id" {
value = google_container_cluster._.id
}
output "has_metrics_server" {
value = true
}

View File

@@ -1,8 +0,0 @@
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "4.5.0"
}
}
}

View File

@@ -1,68 +0,0 @@
variable "cluster_name" {
type = string
default = "deployed-with-terraform"
}
variable "common_tags" {
type = list(string)
default = []
}
variable "node_size" {
type = string
default = "M"
}
variable "min_nodes_per_pool" {
type = number
default = 2
}
variable "max_nodes_per_pool" {
type = number
default = 5
}
# FIXME
variable "enable_arm_pool" {
type = bool
default = false
}
variable "node_types" {
type = map(string)
default = {
"S" = "e2-small"
"M" = "e2-medium"
"L" = "e2-standard-2"
}
}
locals {
node_type = var.node_types[var.node_size]
}
# To view supported locations, run:
# gcloud compute zones list
variable "location" {
type = string
default = null
}
# To view supported versions, run:
# gcloud container get-server-config --region=europe-north1 '--format=flattened(channels)'
# But it's also possible to just specify e.g. "1.20" and it figures it out.
variable "k8s_version" {
type = string
default = "1.21"
}
locals {
location = var.location != null ? var.location : "europe-north1-a"
region = replace(local.location, "/-[a-z]$/", "")
# Unfortunately, the following line doesn't work
# (that attribute just returns an empty string)
# so we have to hard-code the project name.
#project = data.google_client_config._.project
project = "prepare-tf"
}

View File

@@ -1,40 +0,0 @@
variable "how_many_clusters" {
type = number
default = 1
}
variable "node_size" {
type = string
default = "M"
# Can be S, M, L.
# We map these values to different specific instance types for each provider,
# but the idea is that they shoudl correspond to the following sizes:
# S = 2 GB RAM
# M = 4 GB RAM
# L = 8 GB RAM
}
variable "min_nodes_per_pool" {
type = number
default = 1
}
variable "max_nodes_per_pool" {
type = number
default = 0
}
variable "enable_arm_pool" {
type = bool
default = false
}
variable "location" {
type = string
default = null
}
# TODO: perhaps handle if it's space-separated instead of newline?
locals {
locations = var.location == null ? [null] : split("\n", var.location)
}

View File

@@ -2,7 +2,7 @@ terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.7.1"
version = "2.0.3"
}
}
}
@@ -119,11 +119,6 @@ resource "kubernetes_cluster_role_binding" "shpod_${index}" {
name = "shpod"
namespace = "shpod"
}
subject {
api_group = "rbac.authorization.k8s.io"
kind = "Group"
name = "shpod-cluster-admins"
}
}
resource "random_string" "shpod_${index}" {
@@ -140,20 +135,23 @@ provider "helm" {
}
resource "helm_release" "metrics_server_${index}" {
# Some providers pre-install metrics-server.
# Some don't. Let's install metrics-server,
# but only if it's not already installed.
count = yamldecode(file("./flags.${index}"))["has_metrics_server"] ? 0 : 1
provider = helm.cluster_${index}
repository = "https://kubernetes-sigs.github.io/metrics-server/"
repository = "https://charts.bitnami.com/bitnami"
chart = "metrics-server"
version = "3.8.2"
name = "metrics-server"
namespace = "metrics-server"
create_namespace = true
set {
name = "args"
value = "{--kubelet-insecure-tls}"
name = "apiService.create"
value = "true"
}
set {
name = "extraArgs.kubelet-insecure-tls"
value = "true"
}
set {
name = "extraArgs.kubelet-preferred-address-types"
value = "InternalIP"
}
}
@@ -183,7 +181,7 @@ resource "kubernetes_config_map" "kubeconfig_${index}" {
- name: cluster-admin
user:
client-key-data: $${base64encode(tls_private_key.cluster_admin_${index}.private_key_pem)}
client-certificate-data: $${base64encode(kubernetes_certificate_signing_request_v1.cluster_admin_${index}.certificate)}
client-certificate-data: $${base64encode(kubernetes_certificate_signing_request.cluster_admin_${index}.certificate)}
EOT
}
}
@@ -197,14 +195,11 @@ resource "tls_cert_request" "cluster_admin_${index}" {
private_key_pem = tls_private_key.cluster_admin_${index}.private_key_pem
subject {
common_name = "cluster-admin"
# Note: CSR API v1 doesn't allow issuing certs with "system:masters" anymore.
#organization = "system:masters"
# We'll use this custom group name instead.cluster-admin user.
organization = "shpod-cluster-admins"
organization = "system:masters"
}
}
resource "kubernetes_certificate_signing_request_v1" "cluster_admin_${index}" {
resource "kubernetes_certificate_signing_request" "cluster_admin_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "cluster-admin"
@@ -212,7 +207,6 @@ resource "kubernetes_certificate_signing_request_v1" "cluster_admin_${index}" {
spec {
usages = ["client auth"]
request = tls_cert_request.cluster_admin_${index}.cert_request_pem
signer_name = "kubernetes.io/kube-apiserver-client"
}
auto_approve = true
}

28
prepare-tf/variables.tf Normal file
View File

@@ -0,0 +1,28 @@
variable "how_many_clusters" {
type = number
default = 2
}
variable "node_size" {
type = string
default = "M"
# Can be S, M, L.
# S = 2 GB RAM
# M = 4 GB RAM
# L = 8 GB RAM
}
variable "min_nodes_per_pool" {
type = number
default = 1
}
variable "max_nodes_per_pool" {
type = number
default = 0
}
variable "enable_arm_pool" {
type = bool
default = true
}

View File

@@ -14,9 +14,7 @@ These tools can help you to create VMs on:
- [Docker](https://docs.docker.com/engine/installation/)
- [Docker Compose](https://docs.docker.com/compose/install/)
- [Parallel SSH](https://github.com/lilydjwg/pssh)
(should be installable with `pip install git+https://github.com/lilydjwg/pssh`;
on a Mac, try `brew install pssh`)
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
Depending on the infrastructure that you want to use, you also need to install
the CLI that is specific to that cloud. For OpenStack deployments, you will

View File

@@ -1,5 +1,4 @@
INFRACLASS=terraform
TERRAFORM=openstack
INFRACLASS=openstack-tf
# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix")
# and customize the variables below.

View File

@@ -75,11 +75,9 @@ _cmd_createuser() {
echo '$USER_LOGIN ALL=(ALL) NOPASSWD:ALL' | sudo tee /etc/sudoers.d/$USER_LOGIN
"
# The MaxAuthTries is here to help with folks who have many SSH keys.
pssh "
set -e
sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config
sudo sed -i 's/#MaxAuthTries 6/MaxAuthTries 42/' /etc/ssh/sshd_config
sudo service ssh restart
"
@@ -178,13 +176,6 @@ _cmd_clusterize() {
# install --owner=ubuntu --mode=600 /root/.ssh/authorized_keys --target-directory /home/ubuntu/.ssh"
#fi
# Special case for oracle since their iptables blocks everything but SSH
pssh "
if [ -f /etc/iptables/rules.v4 ]; then
sudo sed -i 's/-A INPUT -j REJECT --reject-with icmp-host-prohibited//' /etc/iptables/rules.v4
sudo netfilter-persistent start
fi"
# Copy settings and install Python YAML parser
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
pssh "
@@ -192,10 +183,10 @@ _cmd_clusterize() {
sudo apt-get install -y python-yaml"
# If there is no "python" binary, symlink to python3
pssh "
if ! which python; then
sudo ln -s $(which python3) /usr/local/bin/python
fi"
#pssh "
#if ! which python; then
# ln -s $(which python3) /usr/local/bin/python
#fi"
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
pssh -I tee /tmp/clusterize.py <lib/clusterize.py
@@ -239,37 +230,23 @@ _cmd_docker() {
sudo ln -sfn /mnt/docker /var/lib/docker
fi
# containerd 1.6 breaks Weave.
# See https://github.com/containerd/containerd/issues/6921
sudo tee /etc/apt/preferences.d/containerd <<EOF
Package: containerd.io
Pin: version 1.5.*
Pin-Priority: 1000
EOF
# This will install the latest Docker.
sudo apt-get -qy install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository 'deb https://download.docker.com/linux/ubuntu bionic stable'
sudo apt-get -q update
sudo apt-get -qy install docker-ce
# Add registry mirror configuration.
if ! [ -f /etc/docker/daemon.json ]; then
echo '{\"registry-mirrors\": [\"https://mirror.gcr.io\"]}' | sudo tee /etc/docker/daemon.json
sudo systemctl restart docker
fi
"
##VERSION## https://github.com/docker/compose/releases
if [ "$ARCHITECTURE" ]; then
COMPOSE_VERSION=v2.2.3
COMPOSE_VERSION=v2.0.1
COMPOSE_PLATFORM='linux-$(uname -m)'
else
COMPOSE_VERSION=1.29.2
COMPOSE_PLATFORM='Linux-$(uname -m)'
fi
pssh "
pssh -i "
set -e
### Install docker-compose.
sudo curl -fsSL -o /usr/local/bin/docker-compose \
@@ -326,15 +303,13 @@ _cmd_kube() {
need_login_password
# Optional version, e.g. 1.13.5
SETTINGS=tags/$TAG/settings.yaml
KUBEVERSION=$(awk '/^kubernetes_version:/ {print $2}' $SETTINGS)
KUBEVERSION=$2
if [ "$KUBEVERSION" ]; then
pssh "
sudo tee /etc/apt/preferences.d/kubernetes <<EOF
Package: kubectl kubeadm kubelet
Pin: version $KUBEVERSION*
Pin-Priority: 1000
EOF"
EXTRA_APTGET="=$KUBEVERSION-00"
EXTRA_KUBEADM="kubernetesVersion: v$KUBEVERSION"
else
EXTRA_APTGET=""
EXTRA_KUBEADM=""
fi
# Install packages
@@ -345,8 +320,7 @@ EOF"
sudo tee /etc/apt/sources.list.d/kubernetes.list"
pssh --timeout 200 "
sudo apt-get update -q &&
sudo apt-get install -qy kubelet kubeadm kubectl &&
sudo apt-mark hold kubelet kubeadm kubectl
sudo apt-get install -qy kubelet$EXTRA_APTGET kubeadm$EXTRA_APTGET kubectl$EXTRA_APTGET &&
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
@@ -358,11 +332,6 @@ EOF"
sudo swapoff -a"
fi
# Re-enable CRI interface in containerd
pssh "
echo '# Use default parameters for containerd.' | sudo tee /etc/containerd/config.toml
sudo systemctl restart containerd"
# Initialize kube control plane
pssh --timeout 200 "
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
@@ -372,38 +341,19 @@ kind: InitConfiguration
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- token: \$(cat /tmp/token)
nodeRegistration:
# Comment out the next line to switch back to Docker.
criSocket: /run/containerd/containerd.sock
ignorePreflightErrors:
- NumCPU
---
kind: JoinConfiguration
apiVersion: kubeadm.k8s.io/v1beta2
discovery:
bootstrapToken:
apiServerEndpoint: \$(cat /etc/name_of_first_node):6443
token: \$(cat /tmp/token)
unsafeSkipCAVerification: true
nodeRegistration:
# Comment out the next line to switch back to Docker.
criSocket: /run/containerd/containerd.sock
ignorePreflightErrors:
- NumCPU
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
# The following line is necessary when using Docker.
# It doesn't seem necessary when using containerd.
#cgroupDriver: cgroupfs
cgroupDriver: cgroupfs
---
kind: ClusterConfiguration
apiVersion: kubeadm.k8s.io/v1beta2
apiServer:
certSANs:
- \$(cat /tmp/ipv4)
$EXTRA_KUBEADM
EOF
sudo kubeadm init --config=/tmp/kubeadm-config.yaml
sudo kubeadm init --config=/tmp/kubeadm-config.yaml --ignore-preflight-errors=NumCPU
fi"
# Put kubeconfig in ubuntu's and $USER_LOGIN's accounts
@@ -427,17 +377,14 @@ EOF
pssh --timeout 200 "
if ! i_am_first_node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
FIRSTNODE=\$(cat /etc/name_of_first_node) &&
ssh $SSHOPTS \$FIRSTNODE cat /tmp/kubeadm-config.yaml > /tmp/kubeadm-config.yaml &&
sudo kubeadm join --config /tmp/kubeadm-config.yaml
TOKEN=\$(ssh $SSHOPTS \$FIRSTNODE cat /tmp/token) &&
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN \$FIRSTNODE:6443
fi"
# Install metrics server
pssh "
if i_am_first_node; then
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
#helm upgrade --install metrics-server \
# --repo https://kubernetes-sigs.github.io/metrics-server/ metrics-server \
# --namespace kube-system --set args={--kubelet-insecure-tls}
fi"
}
@@ -495,7 +442,7 @@ EOF
# Install stern
##VERSION## https://github.com/stern/stern/releases
STERN_VERSION=1.20.1
FILENAME=stern_${STERN_VERSION}_linux_${ARCH}
FILENAME=stern_${STERN_VERSION}_linux_${HERP_DERP_ARCH}
URL=https://github.com/stern/stern/releases/download/v$STERN_VERSION/$FILENAME.tar.gz
pssh "
if [ ! -x /usr/local/bin/stern ]; then
@@ -517,12 +464,12 @@ EOF
# Install kustomize
##VERSION## https://github.com/kubernetes-sigs/kustomize/releases
KUSTOMIZE_VERSION=v4.4.0
URL=https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${ARCH}.tar.gz
URL=https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${HERP_DERP_ARCH}.tar.gz
pssh "
if [ ! -x /usr/local/bin/kustomize ]; then
curl -fsSL $URL |
sudo tar -C /usr/local/bin -zx kustomize
kustomize completion bash | sudo tee /etc/bash_completion.d/kustomize
echo complete -C /usr/local/bin/kustomize kustomize | sudo tee /etc/bash_completion.d/kustomize
kustomize version
fi"
@@ -582,9 +529,8 @@ EOF
# But the install script is not arch-aware (see https://github.com/tilt-dev/tilt/pull/5050).
pssh "
if [ ! -x /usr/local/bin/tilt ]; then
TILT_VERSION=0.22.15
FILENAME=tilt.\$TILT_VERSION.linux.$TILT_ARCH.tar.gz
curl -fsSL https://github.com/tilt-dev/tilt/releases/download/v\$TILT_VERSION/\$FILENAME |
FILENAME=tilt.0.22.13.linux.$TILT_ARCH.tar.gz
curl -fsSL https://github.com/tilt-dev/tilt/releases/latest/download/\$FILENAME |
sudo tar -zxvf- -C /usr/local/bin tilt
tilt version
fi"
@@ -606,16 +552,16 @@ EOF
fi"
##VERSION## https://github.com/bitnami-labs/sealed-secrets/releases
KUBESEAL_VERSION=0.17.4
#case $ARCH in
#amd64) FILENAME=kubeseal-linux-amd64;;
#arm64) FILENAME=kubeseal-arm64;;
#*) FILENAME=nope;;
#esac
pssh "
KUBESEAL_VERSION=v0.16.0
case $ARCH in
amd64) FILENAME=kubeseal-linux-amd64;;
arm64) FILENAME=kubeseal-arm64;;
*) FILENAME=nope;;
esac
[ "$FILENAME" = "nope" ] || pssh "
if [ ! -x /usr/local/bin/kubeseal ]; then
curl -fsSL https://github.com/bitnami-labs/sealed-secrets/releases/download/v$KUBESEAL_VERSION/kubeseal-$KUBESEAL_VERSION-linux-$ARCH.tar.gz |
sudo tar -zxvf- -C /usr/local/bin kubeseal
curl -fsSLo kubeseal https://github.com/bitnami-labs/sealed-secrets/releases/download/$KUBESEAL_VERSION/$FILENAME &&
sudo install kubeseal /usr/local/bin
kubeseal --version
fi"
}
@@ -731,7 +677,7 @@ _cmd_tailhist () {
ARCH=${ARCHITECTURE-amd64}
[ "$ARCH" = "aarch64" ] && ARCH=arm64
pssh "
pssh -i "
set -e
wget https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0-linux_$ARCH.zip
unzip websocketd-0.3.0-linux_$ARCH.zip websocketd
@@ -1069,8 +1015,7 @@ _cmd_webssh() {
need_tag
pssh "
sudo apt-get update &&
sudo apt-get install python-tornado python-paramiko -y ||
sudo apt-get install python3-tornado python3-paramiko -y"
sudo apt-get install python-tornado python-paramiko -y"
pssh "
cd /opt
[ -d webssh ] || sudo git clone https://github.com/jpetazzo/webssh"

View File

@@ -26,24 +26,12 @@ infra_start() {
info " Name: $NAME"
info " Instance type: $LINODE_TYPE"
ROOT_PASS="$(base64 /dev/urandom | cut -c1-20 | head -n 1)"
MAX_TRY=5
TRY=1
WAIT=1
while ! linode-cli linodes create \
linode-cli linodes create \
--type=${LINODE_TYPE} --region=${LINODE_REGION} \
--image=linode/ubuntu18.04 \
--authorized_keys="${LINODE_SSHKEY}" \
--root_pass="${ROOT_PASS}" \
--tags=${TAG} --label=${NAME}; do
warning "Failed to create VM (attempt $TRY/$MAX_TRY)."
if [ $TRY -ge $MAX_TRY ]; then
die "Giving up."
fi
info "Waiting $WAIT seconds and retrying."
sleep $WAIT
TRY=$(($TRY+1))
WAIT=$(($WAIT*2))
done
--tags=${TAG} --label=${NAME}
done
sep

View File

@@ -0,0 +1,28 @@
infra_start() {
COUNT=$1
cp terraform/*.tf tags/$TAG
(
cd tags/$TAG
if ! terraform init; then
error "'terraform init' failed."
error "If it mentions the following error message:"
error "openpgp: signature made by unknown entity."
error "Then you need to upgrade Terraform to 0.11.15"
error "to upgrade its signing keys following the"
error "codecov breach."
die "Aborting."
fi
echo prefix = \"$TAG\" >> terraform.tfvars
echo count = \"$COUNT\" >> terraform.tfvars
terraform apply -auto-approve
terraform output ip_addresses > ips.txt
)
}
infra_stop() {
(
cd tags/$TAG
terraform destroy -auto-approve
)
}

View File

@@ -1,47 +0,0 @@
error_terraform_configuration() {
error "When using the terraform infraclass, the TERRAFORM"
error "environment variable must be set to one of the available"
error "terraform configurations. These configurations are in"
error "the prepare-vm/terraform subdirectory. You should probably"
error "update your infra file and set the variable."
error "(e.g. with TERRAFORM=openstack)"
}
if [ "$TERRAFORM" = "" ]; then
error_terraform_configuration
die "Aborting because TERRAFORM variable is not set."
fi
if [ ! -d terraform/$TERRAFORM ]; then
error_terraform_configuration
die "Aborting because no terraform configuration was found in 'terraform/$TERRAFORM'."
fi
infra_start() {
COUNT=$1
cp terraform/$TERRAFORM/*.tf tags/$TAG
(
cd tags/$TAG
if ! terraform init; then
error "'terraform init' failed."
error "If it mentions the following error message:"
error "openpgp: signature made by unknown entity."
error "Then you need to upgrade Terraform to 0.11.15"
error "to upgrade its signing keys following the"
error "codecov breach."
die "Aborting."
fi
echo prefix = \"$TAG\" >> terraform.tfvars
echo how_many_nodes = \"$COUNT\" >> terraform.tfvars
terraform apply -auto-approve
terraform output -raw ip_addresses > ips.txt
)
}
infra_stop() {
(
cd tags/$TAG
terraform destroy -auto-approve
)
}

View File

@@ -26,7 +26,6 @@ pssh() {
$PSSH -h $HOSTFILE -l $LOGIN \
--par 100 \
--timeout 300 \
-O LogLevel=ERROR \
-O UserKnownHostsFile=/dev/null \
-O StrictHostKeyChecking=no \

View File

@@ -60,10 +60,7 @@ while domains and clusters:
zone += f"node{node} 300 IN A {ip}\n"
r = requests.put(
f"{apiurl}/{domain}/records",
headers={
"x-api-key": apikey,
"content-type": "text/plain",
},
headers={"x-api-key": apikey},
data=zone)
print(r.text)

View File

@@ -1,82 +0,0 @@
#!/bin/sh
# https://open-api.netlify.com/#tag/dnsZone
[ "$1" ] || {
echo ""
echo "Add a record in Netlify DNS."
echo "This script is hardcoded to add a record to container.training".
echo ""
echo "Syntax:"
echo "$0 list"
echo "$0 add <name> <ipaddr>"
echo "$0 del <recordid>"
echo ""
echo "Example to create a A record for eu.container.training:"
echo "$0 add eu 185.145.250.0"
echo ""
exit 1
}
NETLIFY_USERID=$(jq .userId < ~/.config/netlify/config.json)
NETLIFY_TOKEN=$(jq -r .users[$NETLIFY_USERID].auth.token < ~/.config/netlify/config.json)
netlify() {
URI=$1
shift
http https://api.netlify.com/api/v1/$URI "$@" "Authorization:Bearer $NETLIFY_TOKEN"
}
ZONE_ID=$(netlify dns_zones |
jq -r '.[] | select ( .name == "container.training" ) | .id')
_list() {
netlify dns_zones/$ZONE_ID/dns_records |
jq -r '.[] | select(.type=="A") | [.hostname, .type, .value, .id] | @tsv'
}
_add() {
NAME=$1.container.training
ADDR=$2
# It looks like if we create two identical records, then delete one of them,
# Netlify DNS ends up in a weird state (the name doesn't resolve anymore even
# though it's still visible through the API and the website?)
if netlify dns_zones/$ZONE_ID/dns_records |
jq '.[] | select(.hostname=="'$NAME'" and .type=="A" and .value=="'$ADDR'")' |
grep .
then
echo "It looks like that record already exists. Refusing to create it."
exit 1
fi
netlify dns_zones/$ZONE_ID/dns_records type=A hostname=$NAME value=$ADDR ttl=300
netlify dns_zones/$ZONE_ID/dns_records |
jq '.[] | select(.hostname=="'$NAME'")'
}
_del() {
RECORD_ID=$1
# OK, since that one is dangerous, I'm putting the whole request explicitly here
http DELETE \
https://api.netlify.com/api/v1/dns_zones/$ZONE_ID/dns_records/$RECORD_ID \
"Authorization:Bearer $NETLIFY_TOKEN"
}
case "$1" in
list)
_list
;;
add)
_add $2 $3
;;
del)
_del $2
;;
*)
echo "Unknown command '$1'."
exit 1
;;
esac

View File

@@ -1,33 +0,0 @@
# Number of VMs per cluster
clustersize: 3
# The hostname of each node will be clusterprefix + a number
clusterprefix: oldversion
# Jinja2 template to use to generate ready-to-cut cards
cards_template: cards.html
# Use "Letter" in the US, and "A4" everywhere else
paper_size: A4
# Login and password that students will use
user_login: k8s
user_password: training
# For a list of old versions, check:
# https://kubernetes.io/releases/patch-releases/#non-active-branch-history
kubernetes_version: 1.18.20
image:
steps:
- wait
- clusterize
- tools
- docker
- createuser
- webssh
- tailhist
- kube
- kubetools
- kubetest

View File

@@ -3,7 +3,7 @@ set -e
export AWS_INSTANCE_TYPE=t3a.small
INFRA=infra/aws-eu-north-1
INFRA=infra/aws-us-east-2
STUDENTS=2
@@ -33,15 +33,9 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--students $STUDENTS
INFRA=infra/enix
#INFRA=infra/aws-us-west-1
SETTINGS=admin-oldversion
TAG=$PREFIX-$SETTINGS
./workshopctl start \
--tag $TAG \
--infra $INFRA \
--settings settings/$SETTINGS.yaml \
--students $STUDENTS
export AWS_INSTANCE_TYPE=t3a.medium
SETTINGS=admin-test
TAG=$PREFIX-$SETTINGS

View File

@@ -0,0 +1,5 @@
resource "openstack_compute_keypair_v2" "ssh_deploy_key" {
name = "${var.prefix}"
public_key = "${file("~/.ssh/id_rsa.pub")}"
}

View File

@@ -0,0 +1,34 @@
resource "openstack_compute_instance_v2" "machine" {
count = "${var.count}"
name = "${format("%s-%04d", "${var.prefix}", count.index+1)}"
image_name = "${var.image}"
flavor_name = "${var.flavor}"
security_groups = ["${openstack_networking_secgroup_v2.full_access.name}"]
key_pair = "${openstack_compute_keypair_v2.ssh_deploy_key.name}"
network {
name = "${openstack_networking_network_v2.internal.name}"
fixed_ip_v4 = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}"
}
}
resource "openstack_compute_floatingip_v2" "machine" {
count = "${var.count}"
# This is something provided to us by Enix when our tenant was provisioned.
pool = "Public Floating"
}
resource "openstack_compute_floatingip_associate_v2" "machine" {
count = "${var.count}"
floating_ip = "${openstack_compute_floatingip_v2.machine.*.address[count.index]}"
instance_id = "${openstack_compute_instance_v2.machine.*.id[count.index]}"
fixed_ip = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}"
}
output "ip_addresses" {
value = "${join("\n", openstack_compute_floatingip_v2.machine.*.address)}"
}
variable "flavor" {}
variable "image" {}

View File

@@ -1,23 +1,23 @@
resource "openstack_networking_network_v2" "internal" {
name = var.prefix
name = "${var.prefix}"
}
resource "openstack_networking_subnet_v2" "internal" {
name = var.prefix
network_id = openstack_networking_network_v2.internal.id
name = "${var.prefix}"
network_id = "${openstack_networking_network_v2.internal.id}"
cidr = "10.10.0.0/16"
ip_version = 4
dns_nameservers = ["1.1.1.1"]
}
resource "openstack_networking_router_v2" "router" {
name = var.prefix
name = "${var.prefix}"
external_network_id = "15f0c299-1f50-42a6-9aff-63ea5b75f3fc"
}
resource "openstack_networking_router_interface_v2" "router_internal" {
router_id = openstack_networking_router_v2.router.id
subnet_id = openstack_networking_subnet_v2.internal.id
router_id = "${openstack_networking_router_v2.router.id}"
subnet_id = "${openstack_networking_subnet_v2.internal.id}"
}

View File

@@ -1,48 +0,0 @@
resource "oci_identity_compartment" "_" {
name = var.prefix
description = var.prefix
enable_delete = true
}
locals {
compartment_id = oci_identity_compartment._.id
}
data "oci_identity_availability_domains" "_" {
compartment_id = local.compartment_id
}
data "oci_core_images" "_" {
compartment_id = local.compartment_id
shape = var.shape
operating_system = "Canonical Ubuntu"
operating_system_version = "20.04"
#operating_system = "Oracle Linux"
#operating_system_version = "7.9"
}
resource "oci_core_instance" "_" {
count = var.how_many_nodes
display_name = format("%s-%04d", var.prefix, count.index + 1)
availability_domain = data.oci_identity_availability_domains._.availability_domains[var.availability_domain].name
compartment_id = local.compartment_id
shape = var.shape
shape_config {
memory_in_gbs = var.memory_in_gbs_per_node
ocpus = var.ocpus_per_node
}
source_details {
source_id = data.oci_core_images._.images[0].id
source_type = "image"
}
create_vnic_details {
subnet_id = oci_core_subnet._.id
}
metadata = {
ssh_authorized_keys = local.authorized_keys
}
}
output "ip_addresses" {
value = join("", formatlist("%s\n", oci_core_instance._.*.public_ip))
}

View File

@@ -1,63 +0,0 @@
resource "oci_core_vcn" "_" {
compartment_id = local.compartment_id
cidr_block = "10.0.0.0/16"
display_name = "tf-vcn"
}
#
# On OCI, you can have either "public" or "private" subnets.
# In both cases, instances get addresses in the VCN CIDR block;
# but instances in "public" subnets also get a public address.
#
# Then, to enable communication to the outside world, you need:
# - for public subnets, an "internet gateway"
# (will allow inbound and outbound traffic)
# - for private subnets, a "NAT gateway"
# (will only allow outbound traffic)
# - optionally, for private subnets, a "service gateway"
# (to access other OCI services, e.g. object store)
#
# In this configuration, we use public subnets, and since we
# need outside access, we add an internet gateway.
#
# Note that the default routing table in a VCN is empty, so we
# add the internet gateway to the default routing table.
# Similarly, the default security group in a VCN blocks almost
# everything, so we add a blanket rule in that security group.
#
resource "oci_core_internet_gateway" "_" {
compartment_id = local.compartment_id
display_name = "tf-igw"
vcn_id = oci_core_vcn._.id
}
resource "oci_core_default_route_table" "_" {
manage_default_resource_id = oci_core_vcn._.default_route_table_id
route_rules {
destination = "0.0.0.0/0"
destination_type = "CIDR_BLOCK"
network_entity_id = oci_core_internet_gateway._.id
}
}
resource "oci_core_default_security_list" "_" {
manage_default_resource_id = oci_core_vcn._.default_security_list_id
ingress_security_rules {
protocol = "all"
source = "0.0.0.0/0"
}
egress_security_rules {
protocol = "all"
destination = "0.0.0.0/0"
}
}
resource "oci_core_subnet" "_" {
compartment_id = local.compartment_id
cidr_block = "10.0.0.0/20"
vcn_id = oci_core_vcn._.id
display_name = "tf-subnet"
route_table_id = oci_core_default_route_table._.id
security_list_ids = [oci_core_default_security_list._.id]
}

View File

@@ -1,8 +0,0 @@
terraform {
required_version = ">= 1"
required_providers {
openstack = {
source = "hashicorp/oci"
version = "4.48.0" }
}
}

View File

@@ -1,42 +0,0 @@
variable "prefix" {
type = string
default = "provisioned-with-terraform"
}
variable "how_many_nodes" {
type = number
default = 2
}
locals {
authorized_keys = file("~/.ssh/id_rsa.pub")
}
/*
Available flex shapes:
"VM.Optimized3.Flex" # Intel Ice Lake
"VM.Standard3.Flex" # Intel Ice Lake
"VM.Standard.A1.Flex" # Ampere Altra
"VM.Standard.E3.Flex" # AMD Rome
"VM.Standard.E4.Flex" # AMD Milan
*/
variable "shape" {
type = string
default = "VM.Standard.A1.Flex"
}
variable "availability_domain" {
type = number
default = 0
}
variable "ocpus_per_node" {
type = number
default = 1
}
variable "memory_in_gbs_per_node" {
type = number
default = 4
}

View File

@@ -1,5 +0,0 @@
resource "openstack_compute_keypair_v2" "ssh_deploy_key" {
name = var.prefix
public_key = file("~/.ssh/id_rsa.pub")
}

View File

@@ -1,34 +0,0 @@
resource "openstack_compute_instance_v2" "machine" {
count = var.how_many_nodes
name = format("%s-%04d", var.prefix, count.index+1)
image_name = var.image
flavor_name = var.flavor
security_groups = [openstack_networking_secgroup_v2.full_access.name]
key_pair = openstack_compute_keypair_v2.ssh_deploy_key.name
network {
name = openstack_networking_network_v2.internal.name
fixed_ip_v4 = cidrhost(openstack_networking_subnet_v2.internal.cidr, count.index+10)
}
}
resource "openstack_compute_floatingip_v2" "machine" {
count = var.how_many_nodes
# This is something provided to us by Enix when our tenant was provisioned.
pool = "Public Floating"
}
resource "openstack_compute_floatingip_associate_v2" "machine" {
count = var.how_many_nodes
floating_ip = openstack_compute_floatingip_v2.machine.*.address[count.index]
instance_id = openstack_compute_instance_v2.machine.*.id[count.index]
fixed_ip = cidrhost(openstack_networking_subnet_v2.internal.cidr, count.index+10)
}
output "ip_addresses" {
value = join("", formatlist("%s\n", openstack_compute_floatingip_v2.machine.*.address))
}
variable "flavor" {}
variable "image" {}

View File

@@ -1,23 +0,0 @@
terraform {
required_version = ">= 1"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.45.0"
}
}
}
provider "openstack" {
user_name = var.user
tenant_name = var.tenant
domain_name = var.domain
password = var.password
auth_url = var.auth_url
}
variable "user" {}
variable "tenant" {}
variable "domain" {}
variable "password" {}
variable "auth_url" {}

View File

@@ -1,7 +0,0 @@
variable "prefix" {
type = string
}
variable "how_many_nodes" {
type = number
}

View File

@@ -0,0 +1,13 @@
provider "openstack" {
user_name = "${var.user}"
tenant_name = "${var.tenant}"
domain_name = "${var.domain}"
password = "${var.password}"
auth_url = "${var.auth_url}"
}
variable "user" {}
variable "tenant" {}
variable "domain" {}
variable "password" {}
variable "auth_url" {}

View File

@@ -7,6 +7,6 @@ resource "openstack_networking_secgroup_rule_v2" "full_access" {
ethertype = "IPv4"
protocol = ""
remote_ip_prefix = "0.0.0.0/0"
security_group_id = openstack_networking_secgroup_v2.full_access.id
security_group_id = "${openstack_networking_secgroup_v2.full_access.id}"
}

View File

@@ -0,0 +1,7 @@
variable "prefix" {
type = "string"
}
variable "count" {
type = "string"
}

View File

@@ -2,7 +2,7 @@
#/ /kube-halfday.yml.html 200!
#/ /kube-fullday.yml.html 200!
#/ /kube-twodays.yml.html 200!
/ /craft.yml.html 200!
/ /week2.yml.html 200!
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
@@ -19,8 +19,6 @@
#/next https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
/next https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
/hi5 https://enix.io/fr/services/formation/online/
/us https://www.ardanlabs.com/live-training-events/deploying-microservices-and-traditional-applications-with-kubernetes-march-28-2022.html
/uk https://skillsmatter.com/workshops/827-deploying-microservices-and-traditional-applications-with-kubernetes-with-jerome-petazzoni
# Survey form
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform

View File

@@ -1,362 +0,0 @@
# Buildkit
- "New" backend for Docker builds
- announced in 2017
- ships with Docker Engine 18.09
- enabled by default on Docker Desktop in 2021
- Huge improvements in build efficiency
- 100% compatible with existing Dockerfiles
- New features for multi-arch
- Not just for building container images
---
## Old vs New
- Classic `docker build`:
- copy whole build context
- linear execution
- `docker run` + `docker commit` + `docker run` + `docker commit`...
- Buildkit:
- copy files only when they are needed; cache them
- compute dependency graph (dependencies are expressed by `COPY`)
- parallel execution
- doesn't rely on Docker, but on internal runner/snapshotter
- can run in "normal" containers (including in Kubernetes pods)
---
## Parallel execution
- In multi-stage builds, all stages can be built in parallel
(example: https://github.com/jpetazzo/shpod; [before] and [after])
- Stages are built only when they are necessary
(i.e. if their output is tagged or used in another necessary stage)
- Files are copied from context only when needed
- Files are cached in the builder
[before]: https://github.com/jpetazzo/shpod/blob/c6efedad6d6c3dc3120dbc0ae0a6915f85862474/Dockerfile
[after]: https://github.com/jpetazzo/shpod/blob/d20887bbd56b5fcae2d5d9b0ce06cae8887caabf/Dockerfile
---
## Turning it on and off
- On recent version of Docker Desktop (since 2021):
*enabled by default*
- On older versions, or on Docker CE (Linux):
`export DOCKER_BUILDKIT=1`
- Turning it off:
`export DOCKER_BUILDKIT=0`
---
## Multi-arch support
- Historically, Docker only ran on x86_64 / amd64
(Intel/AMD 64 bits architecture)
- Folks have been running it on 32-bit ARM for ages
(e.g. Raspberry Pi)
- This required a Go compiler and appropriate base images
(which means changing/adapting Dockerfiles to use these base images)
- Docker [image manifest v2 schema 2][manifest] introduces multi-arch images
(`FROM alpine` automatically gets the right image for your architecture)
[manifest]: https://docs.docker.com/registry/spec/manifest-v2-2/
---
## Why?
- Raspberry Pi (32-bit and 64-bit ARM)
- Other ARM-based embedded systems (ODROID, NVIDIA Jetson...)
- Apple M1
- AWS Graviton
- Ampere Altra (e.g. on Oracle Cloud)
- ...
---
## Multi-arch builds in a nutshell
Use the `docker buildx build` command:
```bash
docker buildx build … \
--platform linux/amd64,linux/arm64,linux/arm/v7,linux/386 \
[--tag jpetazzo/hello --push]
```
- Requires all base images to be available for these platforms
- Must not use binary downloads with hard-coded architectures!
(streamlining a Dockerfile for multi-arch: [before], [after])
[before]: https://github.com/jpetazzo/shpod/blob/d20887bbd56b5fcae2d5d9b0ce06cae8887caabf/Dockerfile
[after]: https://github.com/jpetazzo/shpod/blob/c50789e662417b34fea6f5e1d893721d66d265b7/Dockerfile
---
## Native vs emulated vs cross
- Native builds:
*aarch64 machine running aarch64 programs building aarch64 images/binaries*
- Emulated builds:
*x86_64 machine running aarch64 programs building aarch64 images/binaries*
- Cross builds:
*x86_64 machine running x86_64 programs building aarch64 images/binaries*
---
## Native
- Dockerfiles are (relatively) simple to write
(nothing special to do to handle multi-arch; just avoid hard-coded archs)
- Best performance
- Requires "exotic" machines
- Requires setting up a build farm
---
## Emulated
- Dockerfiles are (relatively) simple to write
- Emulation performance can vary
(from "OK" to "ouch this is slow")
- Emulation isn't always perfect
(weird bugs/crashes are rare but can happen)
- Doesn't require special machines
- Supports arbitrary architectures thanks to QEMU
---
## Cross
- Dockerfiles are more complicated to write
- Requires cross-compilation toolchains
- Performance is good
- Doesn't require special machines
---
## Native builds
- Requires base images to be available
- To view available architectures for an image:
```bash
regctl manifest get --list <imagename>
docker manifest inspect <imagename>
```
- Nothing special to do, *except* when downloading binaries!
```
https://releases.hashicorp.com/terraform/1.1.5/terraform_1.1.5_linux_`amd64`.zip
```
---
## Finding the right architecture
`uname -m` → armv7l, aarch64, i686, x86_64
`GOARCH` (from `go env`) → arm, arm64, 386, amd64
In Dockerfile, add `ARG TARGETARCH` (or `ARG TARGETPLATFORM`)
- `TARGETARCH` matches `GOARCH`
- `TARGETPLAFORM` → linux/arm/v7, linux/arm64, linux/386, linux/amd64
---
class: extra-details
## Welp
Sometimes, binary releases be like:
```
Linux_arm64.tar.gz
Linux_ppc64le.tar.gz
Linux_s390x.tar.gz
Linux_x86_64.tar.gz
```
This needs a bit of custom mapping.
---
## Emulation
- Leverages `binfmt_misc` and QEMU on Linux
- Enabling:
```bash
docker run --rm --privileged aptman/qus -s -- -p
```
- Disabling:
```bash
docker run --rm --privileged aptman/qus -- -r
```
- Checking status:
```bash
ls -l /proc/sys/fs/binfmt_misc
```
---
class: extra-details
## How it works
- `binfmt_misc` lets us register _interpreters_ for binaries, e.g.:
- [DOSBox][dosbox] for DOS programs
- [Wine][wine] for Windows programs
- [QEMU][qemu] for Linux programs for other architectures
- When we try to execute e.g. a SPARC binary on our x86_64 machine:
- `binfmt_misc` detects the binary format and invokes `qemu-<arch> the-binary ...`
- QEMU translates SPARC instructions to x86_64 instructions
- system calls go straight to the kernel
[dosbox]: https://www.dosbox.com/
[QEMU]: https://www.qemu.org/
[wine]: https://www.winehq.org/
---
class: extra-details
## QEMU registration
- The `aptman/qus` image mentioned earlier contains static QEMU builds
- It registers all these interpreters with the kernel
- For more details, check:
- https://github.com/dbhi/qus
- https://dbhi.github.io/qus/
---
## Cross-compilation
- Cross-compilation is about 10x faster than emulation
(non-scientific benchmarks!)
- In Dockerfile, add:
`ARG BUILDARCH BUILDPLATFORM TARGETARCH TARGETPLATFORM`
- Can use `FROM --platform=$BUILDPLATFORM <image>`
- Then use `$TARGETARCH` or `$TARGETPLATFORM`
(e.g. for Go, `export GOARCH=$TARGETARCH`)
- Check [tonistiigi/xx][xx] and [Toni's blog][toni] for some amazing cross tools!
[xx]: https://github.com/tonistiigi/xx
[toni]: https://medium.com/@tonistiigi/faster-multi-platform-builds-dockerfile-cross-compilation-guide-part-1-ec087c719eaf
---
## Checking runtime capabilities
Build and run the following Dockerfile:
```dockerfile
FROM --platform=linux/amd64 busybox AS amd64
FROM --platform=linux/arm64 busybox AS arm64
FROM --platform=linux/arm/v7 busybox AS arm32
FROM --platform=linux/386 busybox AS ia32
FROM alpine
RUN apk add file
WORKDIR /root
COPY --from=amd64 /bin/busybox /root/amd64/busybox
COPY --from=arm64 /bin/busybox /root/arm64/busybox
COPY --from=arm32 /bin/busybox /root/arm32/busybox
COPY --from=ia32 /bin/busybox /root/ia32/busybox
CMD for A in *; do echo "$A => $($A/busybox uname -a)"; done
```
It will indicate which executables can be run on your engine.
---
## More than builds
- Buildkit is also used in other systems:
- [Earthly] - generic repeatable build pipelines
- [Dagger] - CICD pipelines that run anywhere
- and more!
[Earthly]: https://earthly.dev/
[Dagger]: https://dagger.io/

View File

@@ -58,7 +58,7 @@ class: pic
- it uses different concepts (Compose services ≠ Kubernetes services)
- it needs a Docker Engine (although containerd support might be coming)
- it needs a Docker Engine (althought containerd support might be coming)
---
@@ -96,7 +96,7 @@ Compose will be smart, and only recreate the containers that have changed.
When working with interpreted languages:
- don't rebuild each time
- dont' rebuild each time
- leverage a `volumes` section instead
@@ -250,24 +250,6 @@ For the full list, check: https://docs.docker.com/compose/compose-file/
---
## Configuring a Compose stack
- Follow [12-factor app configuration principles][12factorconfig]
(configure the app through environment variables)
- Provide (in the repo) a default environment file suitable for development
(no secret or sensitive value)
- Copy the default environment file to `.env` and tweak it
(or: provide a script to generate `.env` from a template)
[12factorconfig]: https://12factor.net/config
---
## Running multiple copies of a stack
- Copy the stack in two different directories, e.g. `front` and `frontcopy`
@@ -349,7 +331,7 @@ Use `docker-compose down -v` to remove everything including volumes.
- The data in the old container is lost...
- ...Except if the container is using a *volume*
- ... Except if the container is using a *volume*
- Compose will then re-attach that volume to the new container
@@ -361,102 +343,6 @@ Use `docker-compose down -v` to remove everything including volumes.
---
## Gotchas with volumes
- Unfortunately, Docker volumes don't have labels or metadata
- Compose tracks volumes thanks to their associated container
- If the container is deleted, the volume gets orphaned
- Example: `docker-compose down && docker-compose up`
- the old volume still exists, detached from its container
- a new volume gets created
- `docker-compose down -v`/`--volumes` deletes volumes
(but **not** `docker-compose down && docker-compose down -v`!)
---
## Managing volumes explicitly
Option 1: *named volumes*
```yaml
services:
app:
volumes:
- data:/some/path
volumes:
data:
```
- Volume will be named `<project>_data`
- It won't be orphaned with `docker-compose down`
- It will correctly be removed with `docker-compose down -v`
---
## Managing volumes explicitly
Option 2: *relative paths*
```yaml
services:
app:
volumes:
- ./data:/some/path
```
- Makes it easy to colocate the app and its data
(for migration, backups, disk usage accounting...)
- Won't be removed by `docker-compose down -v`
---
## Managing complex stacks
- Compose provides multiple features to manage complex stacks
(with many containers)
- `-f`/`--file`/`$COMPOSE_FILE` can be a list of Compose files
(separated by `:` and merged together)
- Services can be assigned to one or more *profiles*
- `--profile`/`$COMPOSE_PROFILE` can be a list of comma-separated profiles
(see [Using service profiles][profiles] in the Compose documentation)
- These variables can be set in `.env`
[profiles]: https://docs.docker.com/compose/profiles/
---
## Dependencies
- A service can have a `depends_on` section
(listing one or more other services)
- This is used when bringing up individual services
(e.g. `docker-compose up blah` or `docker-compose run foo`)
⚠️ It doesn't make a service "wait" for another one to be up!
---
class: extra-details
## A bit of history and trivia

View File

@@ -111,7 +111,7 @@ CMD ["python", "app.py"]
RUN wget http://.../foo.tar.gz \
&& tar -zxf foo.tar.gz \
&& mv foo/fooctl /usr/local/bin \
&& rm -rf foo foo.tar.gz
&& rm -rf foo
...
```

View File

@@ -317,11 +317,9 @@ class: extra-details
## Trash your servers and burn your code
*(This is the title of a
[2013 blog post][immutable-deployments]
[2013 blog post](http://chadfowler.com/2013/06/23/immutable-deployments.html)
by Chad Fowler, where he explains the concept of immutable infrastructure.)*
[immutable-deployments]: https://web.archive.org/web/20160305073617/http://chadfowler.com/blog/2013/06/23/immutable-deployments/
--
* Let's majorly mess up our container.

View File

@@ -32,432 +32,6 @@ The last item should be done for educational purposes only!
---
# Control groups
- Control groups provide resource *metering* and *limiting*.
- This covers a number of "usual suspects" like:
- memory
- CPU
- block I/O
- network (with cooperation from iptables/tc)
- And a few exotic ones:
- huge pages (a special way to allocate memory)
- RDMA (resources specific to InfiniBand / remote memory transfer)
---
## Crowd control
- Control groups also allow to group processes for special operations:
- freezer (conceptually similar to a "mass-SIGSTOP/SIGCONT")
- perf_event (gather performance events on multiple processes)
- cpuset (limit or pin processes to specific CPUs)
- There is a "pids" cgroup to limit the number of processes in a given group.
- There is also a "devices" cgroup to control access to device nodes.
(i.e. everything in `/dev`.)
---
## Generalities
- Cgroups form a hierarchy (a tree).
- We can create nodes in that hierarchy.
- We can associate limits to a node.
- We can move a process (or multiple processes) to a node.
- The process (or processes) will then respect these limits.
- We can check the current usage of each node.
- In other words: limits are optional (if we only want accounting).
- When a process is created, it is placed in its parent's groups.
---
## Example
The numbers are PIDs.
The names are the names of our nodes (arbitrarily chosen).
.small[
```bash
cpu memory
├── batch ├── stateless
│ ├── cryptoscam │ ├── 25
│ │ └── 52 │ ├── 26
│ └── ffmpeg │ ├── 27
│ ├── 109 │ ├── 52
│ └── 88 │ ├── 109
└── realtime │ └── 88
├── nginx └── databases
│ ├── 25 ├── 1008
│ ├── 26 └── 524
│ └── 27
├── postgres
│ └── 524
└── redis
└── 1008
```
]
---
class: extra-details, deep-dive
## Cgroups v1 vs v2
- Cgroups v1 are available on all systems (and widely used).
- Cgroups v2 are a huge refactor.
(Development started in Linux 3.10, released in 4.5.)
- Cgroups v2 have a number of differences:
- single hierarchy (instead of one tree per controller),
- processes can only be on leaf nodes (not inner nodes),
- and of course many improvements / refactorings.
- Cgroups v2 enabled by default on Fedora 31 (2019), Ubuntu 21.10...
---
## Memory cgroup: accounting
- Keeps track of pages used by each group:
- file (read/write/mmap from block devices),
- anonymous (stack, heap, anonymous mmap),
- active (recently accessed),
- inactive (candidate for eviction).
- Each page is "charged" to a group.
- Pages can be shared across multiple groups.
(Example: multiple processes reading from the same files.)
- To view all the counters kept by this cgroup:
```bash
$ cat /sys/fs/cgroup/memory/memory.stat
```
---
## Memory cgroup v1: limits
- Each group can have (optional) hard and soft limits.
- Limits can be set for different kinds of memory:
- physical memory,
- kernel memory,
- total memory (including swap).
---
## Soft limits and hard limits
- Soft limits are not enforced.
(But they influence reclaim under memory pressure.)
- Hard limits *cannot* be exceeded:
- if a group of processes exceeds a hard limit,
- and if the kernel cannot reclaim any memory,
- then the OOM (out-of-memory) killer is triggered,
- and processes are killed until memory gets below the limit again.
---
class: extra-details, deep-dive
## Avoiding the OOM killer
- For some workloads (databases and stateful systems), killing
processes because we run out of memory is not acceptable.
- The "oom-notifier" mechanism helps with that.
- When "oom-notifier" is enabled and a hard limit is exceeded:
- all processes in the cgroup are frozen,
- a notification is sent to user space (instead of killing processes),
- user space can then raise limits, migrate containers, etc.,
- once the memory usage is below the hard limit, unfreeze the cgroup.
---
class: extra-details, deep-dive
## Overhead of the memory cgroup
- Each time a process grabs or releases a page, the kernel update counters.
- This adds some overhead.
- Unfortunately, this cannot be enabled/disabled per process.
- It has to be done system-wide, at boot time.
- Also, when multiple groups use the same page:
- only the first group gets "charged",
- but if it stops using it, the "charge" is moved to another group.
---
class: extra-details, deep-dive
## Setting up a limit with the memory cgroup
Create a new memory cgroup:
```bash
$ CG=/sys/fs/cgroup/memory/onehundredmegs
$ sudo mkdir $CG
```
Limit it to approximately 100MB of memory usage:
```bash
$ sudo tee $CG/memory.memsw.limit_in_bytes <<< 100000000
```
Move the current process to that cgroup:
```bash
$ sudo tee $CG/tasks <<< $$
```
The current process *and all its future children* are now limited.
(Confused about `<<<`? Look at the next slide!)
---
class: extra-details, deep-dive
## What's `<<<`?
- This is a "here string". (It is a non-POSIX shell extension.)
- The following commands are equivalent:
```bash
foo <<< hello
```
```bash
echo hello | foo
```
```bash
foo <<EOF
hello
EOF
```
- Why did we use that?
---
class: extra-details, deep-dive
## Writing to cgroups pseudo-files requires root
Instead of:
```bash
sudo tee $CG/tasks <<< $$
```
We could have done:
```bash
sudo sh -c "echo $$ > $CG/tasks"
```
The following commands, however, would be invalid:
```bash
sudo echo $$ > $CG/tasks
```
```bash
sudo -i # (or su)
echo $$ > $CG/tasks
```
---
class: extra-details, deep-dive
## Testing the memory limit
Start the Python interpreter:
```bash
$ python
Python 3.6.4 (default, Jan 5 2018, 02:35:40)
[GCC 7.2.1 20171224] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>>
```
Allocate 80 megabytes:
```python
>>> s = "!" * 1000000 * 80
```
Add 20 megabytes more:
```python
>>> t = "!" * 1000000 * 20
Killed
```
---
## Memory cgroup v2: limits
- `memory.min` = hard reservation (guaranteed memory for this cgroup)
- `memory.low` = soft reservation ("*try* not to reclaim memory if we're below this")
- `memory.high` = soft limit (aggressively reclaim memory; don't trigger OOMK)
- `memory.max` = hard limit (triggers OOMK)
- `memory.swap.high` = aggressively reclaim memory when using that much swap
- `memory.swap.max` = prevent using more swap than this
---
## CPU cgroup
- Keeps track of CPU time used by a group of processes.
(This is easier and more accurate than `getrusage` and `/proc`.)
- Keeps track of usage per CPU as well.
(i.e., "this group of process used X seconds of CPU0 and Y seconds of CPU1".)
- Allows setting relative weights used by the scheduler.
---
## Cpuset cgroup
- Pin groups to specific CPU(s).
- Use-case: reserve CPUs for specific apps.
- Warning: make sure that "default" processes aren't using all CPUs!
- CPU pinning can also avoid performance loss due to cache flushes.
- This is also relevant for NUMA systems.
- Provides extra dials and knobs.
(Per zone memory pressure, process migration costs...)
---
## Blkio cgroup
- Keeps track of I/Os for each group:
- per block device
- read vs write
- sync vs async
- Set throttle (limits) for each group:
- per block device
- read vs write
- ops vs bytes
- Set relative weights for each group.
- Note: most writes go through the page cache.
<br/>(So classic writes will appear to be unthrottled at first.)
---
## Net_cls and net_prio cgroup
- Only works for egress (outgoing) traffic.
- Automatically set traffic class or priority
for traffic generated by processes in the group.
- Net_cls will assign traffic to a class.
- Classes have to be matched with tc or iptables, otherwise traffic just flows normally.
- Net_prio will assign traffic to a priority.
- Priorities are used by queuing disciplines.
---
## Devices cgroup
- Controls what the group can do on device nodes
- Permissions include read/write/mknod
- Typical use:
- allow `/dev/{tty,zero,random,null}` ...
- deny everything else
- A few interesting nodes:
- `/dev/net/tun` (network interface manipulation)
- `/dev/fuse` (filesystems in user space)
- `/dev/kvm` (VMs in containers, yay inception!)
- `/dev/dri` (GPU)
---
# Namespaces
- Provide processes with their own view of the system.
@@ -472,8 +46,6 @@ Killed
- uts
- ipc
- user
- time
- cgroup
(We are going to detail them individually.)
@@ -1047,25 +619,411 @@ class: extra-details, deep-dive
---
## Time namespace
# Control groups
- Virtualize time
- Control groups provide resource *metering* and *limiting*.
- Expose a slower/faster clock to some processes
- This covers a number of "usual suspects" like:
(for e.g. simulation purposes)
- memory
- Expose a clock offset to some processes
- CPU
(simulation, suspend/restore...)
- block I/O
- network (with cooperation from iptables/tc)
- And a few exotic ones:
- huge pages (a special way to allocate memory)
- RDMA (resources specific to InfiniBand / remote memory transfer)
---
## Cgroup namespace
## Crowd control
- Virtualize access to `/proc/<PID>/cgroup`
- Control groups also allow to group processes for special operations:
- Lets containerized processes view their relative cgroup tree
- freezer (conceptually similar to a "mass-SIGSTOP/SIGCONT")
- perf_event (gather performance events on multiple processes)
- cpuset (limit or pin processes to specific CPUs)
- There is a "pids" cgroup to limit the number of processes in a given group.
- There is also a "devices" cgroup to control access to device nodes.
(i.e. everything in `/dev`.)
---
## Generalities
- Cgroups form a hierarchy (a tree).
- We can create nodes in that hierarchy.
- We can associate limits to a node.
- We can move a process (or multiple processes) to a node.
- The process (or processes) will then respect these limits.
- We can check the current usage of each node.
- In other words: limits are optional (if we only want accounting).
- When a process is created, it is placed in its parent's groups.
---
## Example
The numbers are PIDs.
The names are the names of our nodes (arbitrarily chosen).
.small[
```bash
cpu memory
├── batch ├── stateless
│ ├── cryptoscam │ ├── 25
│ │ └── 52 │ ├── 26
│ └── ffmpeg │ ├── 27
│ ├── 109 │ ├── 52
│ └── 88 │ ├── 109
└── realtime │ └── 88
├── nginx └── databases
│ ├── 25 ├── 1008
│ ├── 26 └── 524
│ └── 27
├── postgres
│ └── 524
└── redis
└── 1008
```
]
---
class: extra-details, deep-dive
## Cgroups v1 vs v2
- Cgroups v1 are available on all systems (and widely used).
- Cgroups v2 are a huge refactor.
(Development started in Linux 3.10, released in 4.5.)
- Cgroups v2 have a number of differences:
- single hierarchy (instead of one tree per controller),
- processes can only be on leaf nodes (not inner nodes),
- and of course many improvements / refactorings.
---
## Memory cgroup: accounting
- Keeps track of pages used by each group:
- file (read/write/mmap from block devices),
- anonymous (stack, heap, anonymous mmap),
- active (recently accessed),
- inactive (candidate for eviction).
- Each page is "charged" to a group.
- Pages can be shared across multiple groups.
(Example: multiple processes reading from the same files.)
- To view all the counters kept by this cgroup:
```bash
$ cat /sys/fs/cgroup/memory/memory.stat
```
---
## Memory cgroup: limits
- Each group can have (optional) hard and soft limits.
- Limits can be set for different kinds of memory:
- physical memory,
- kernel memory,
- total memory (including swap).
---
## Soft limits and hard limits
- Soft limits are not enforced.
(But they influence reclaim under memory pressure.)
- Hard limits *cannot* be exceeded:
- if a group of processes exceeds a hard limit,
- and if the kernel cannot reclaim any memory,
- then the OOM (out-of-memory) killer is triggered,
- and processes are killed until memory gets below the limit again.
---
class: extra-details, deep-dive
## Avoiding the OOM killer
- For some workloads (databases and stateful systems), killing
processes because we run out of memory is not acceptable.
- The "oom-notifier" mechanism helps with that.
- When "oom-notifier" is enabled and a hard limit is exceeded:
- all processes in the cgroup are frozen,
- a notification is sent to user space (instead of killing processes),
- user space can then raise limits, migrate containers, etc.,
- once the memory usage is below the hard limit, unfreeze the cgroup.
---
class: extra-details, deep-dive
## Overhead of the memory cgroup
- Each time a process grabs or releases a page, the kernel update counters.
- This adds some overhead.
- Unfortunately, this cannot be enabled/disabled per process.
- It has to be done system-wide, at boot time.
- Also, when multiple groups use the same page:
- only the first group gets "charged",
- but if it stops using it, the "charge" is moved to another group.
---
class: extra-details, deep-dive
## Setting up a limit with the memory cgroup
Create a new memory cgroup:
```bash
$ CG=/sys/fs/cgroup/memory/onehundredmegs
$ sudo mkdir $CG
```
Limit it to approximately 100MB of memory usage:
```bash
$ sudo tee $CG/memory.memsw.limit_in_bytes <<< 100000000
```
Move the current process to that cgroup:
```bash
$ sudo tee $CG/tasks <<< $$
```
The current process *and all its future children* are now limited.
(Confused about `<<<`? Look at the next slide!)
---
class: extra-details, deep-dive
## What's `<<<`?
- This is a "here string". (It is a non-POSIX shell extension.)
- The following commands are equivalent:
```bash
foo <<< hello
```
```bash
echo hello | foo
```
```bash
foo <<EOF
hello
EOF
```
- Why did we use that?
---
class: extra-details, deep-dive
## Writing to cgroups pseudo-files requires root
Instead of:
```bash
sudo tee $CG/tasks <<< $$
```
We could have done:
```bash
sudo sh -c "echo $$ > $CG/tasks"
```
The following commands, however, would be invalid:
```bash
sudo echo $$ > $CG/tasks
```
```bash
sudo -i # (or su)
echo $$ > $CG/tasks
```
---
class: extra-details, deep-dive
## Testing the memory limit
Start the Python interpreter:
```bash
$ python
Python 3.6.4 (default, Jan 5 2018, 02:35:40)
[GCC 7.2.1 20171224] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>>
```
Allocate 80 megabytes:
```python
>>> s = "!" * 1000000 * 80
```
Add 20 megabytes more:
```python
>>> t = "!" * 1000000 * 20
Killed
```
---
## CPU cgroup
- Keeps track of CPU time used by a group of processes.
(This is easier and more accurate than `getrusage` and `/proc`.)
- Keeps track of usage per CPU as well.
(i.e., "this group of process used X seconds of CPU0 and Y seconds of CPU1".)
- Allows setting relative weights used by the scheduler.
---
## Cpuset cgroup
- Pin groups to specific CPU(s).
- Use-case: reserve CPUs for specific apps.
- Warning: make sure that "default" processes aren't using all CPUs!
- CPU pinning can also avoid performance loss due to cache flushes.
- This is also relevant for NUMA systems.
- Provides extra dials and knobs.
(Per zone memory pressure, process migration costs...)
---
## Blkio cgroup
- Keeps track of I/Os for each group:
- per block device
- read vs write
- sync vs async
- Set throttle (limits) for each group:
- per block device
- read vs write
- ops vs bytes
- Set relative weights for each group.
- Note: most writes go through the page cache.
<br/>(So classic writes will appear to be unthrottled at first.)
---
## Net_cls and net_prio cgroup
- Only works for egress (outgoing) traffic.
- Automatically set traffic class or priority
for traffic generated by processes in the group.
- Net_cls will assign traffic to a class.
- Classes have to be matched with tc or iptables, otherwise traffic just flows normally.
- Net_prio will assign traffic to a priority.
- Priorities are used by queuing disciplines.
---
## Devices cgroup
- Controls what the group can do on device nodes
- Permissions include read/write/mknod
- Typical use:
- allow `/dev/{tty,zero,random,null}` ...
- deny everything else
- A few interesting nodes:
- `/dev/net/tun` (network interface manipulation)
- `/dev/fuse` (filesystems in user space)
- `/dev/kvm` (VMs in containers, yay inception!)
- `/dev/dri` (GPU)
---
@@ -1168,8 +1126,8 @@ See `man capabilities` for the full list and details.
???
:EN:Containers internals
:EN:- Control groups (cgroups)
:EN:- Linux kernel namespaces
:EN:- Control groups (cgroups)
:FR:Fonctionnement interne des conteneurs
:FR:- Les "control groups" (cgroups)
:FR:- Les namespaces du noyau Linux
:FR:- Les "control groups" (cgroups)

View File

@@ -109,7 +109,7 @@ class: extra-details
- Example: [ctr.run](https://ctr.run/)
.lab[
.exercise[
- Use ctr.run to automatically build a container image and run it:
```bash

View File

@@ -13,7 +13,7 @@
- ... Or be comfortable spending some time reading the Docker
[documentation](https://docs.docker.com/) ...
- ... And looking for answers in the [Docker forums](https://forums.docker.com),
- ... And looking for answers in the [Docker forums](forums.docker.com),
[StackOverflow](http://stackoverflow.com/questions/tagged/docker),
and other outlets
@@ -28,7 +28,7 @@ class: self-paced
- Likewise, it will take more than merely *reading* these slides
to make you an expert
- These slides include *tons* of demos, exercises, and examples
- These slides include *tons* of exercises and examples
- They assume that you have access to a machine running Docker

View File

@@ -1,68 +0,0 @@
title: |
Advanced Kubernetes
CRAFT 2022
chat: "[Slack](https://craftconf.slack.com/archives/C03J4BP4SKA)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2022-05-craft.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom.md
- shared/prereqs.md
- shared/webssh.md
- shared/connecting.md
- shared/toc.md
- exercises/sealed-secrets-brief.md
- exercises/kyverno-ingress-domain-name-brief.md
- #1
- k8s/demo-apps.md
- k8s/netpol.md
- k8s/authn-authz.md
- k8s/sealed-secrets.md
#- k8s/cert-manager.md
#- k8s/ingress-tls.md
- exercises/sealed-secrets-details.md
- #2
- k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/horizontal-pod-autoscaler.md
- k8s/apiserver-deepdive.md
- k8s/aggregation-layer.md
- k8s/hpa-v2.md
- #3
- k8s/extending-api.md
- k8s/crd.md
- k8s/operators.md
- k8s/admission.md
- k8s/kyverno.md
- exercises/kyverno-ingress-domain-name-details.md
- #4
- k8s/statefulsets.md
- k8s/consul.md
- k8s/pv-pvc-sc.md
- k8s/volume-claim-templates.md
#- k8s/eck.md
#- k8s/portworx.md
- k8s/openebs.md
- k8s/stateful-failover.md
- #Extra
- |
# (Extra materials)
- k8s/operators-design.md
- k8s/operators-example.md
- k8s/owners-and-dependents.md
- k8s/events.md
- k8s/finalizers.md

View File

@@ -1,5 +0,0 @@
## Exercise — Application Configuration
- Configure an application with a ConfigMap
- Generate configuration file from the downward API

View File

@@ -1,87 +0,0 @@
# Exercise — Application Configuration
- We want to configure an application with a ConfigMap
- We will use the "rainbow" example shown previously
(HAProxy load balancing traffic to services in multiple namespaces)
- We won't provide the HAProxy configuration file
- Instead, we will provide a list of namespaces
(e.g. as a space-delimited list in a ConfigMap)
- Our Pod should generate the HAProxy configuration using the ConfigMap
---
## Setup
- Let's say that we have the "rainbow" app deployed:
```bash
kubectl apply -f ~/container.training/k8s/rainbow.yaml
```
- And a ConfigMap like the following one:
```bash
kubectl create configmap rainbow --from-literal=namespaces="blue green"
```
---
## Goal 1
- We want a Deployment and a Service called `rainbow`
- The `rainbow` Service should load balance across Namespaces `blue` and `green`
(i.e. to the Services called `color` in both these Namespaces)
- We want to be able to update the configuration:
- update the ConfigMap to put `blue green red`
- what should we do so that HAproxy picks up the change?
---
## Goal 2
- Check what happens if we specify a backend that doesn't exist
(e.g. add `purple` to the list of namespaces)
- If we specify invalid backends to HAProxy, it won't start!
- Implement a workaround among these two:
- remove invalid backends from the list before starting HAProxy
- wait until all backends are valid before starting HAProxy
---
## Goal 3
- We'd like HAProxy to pick up ConfigMap updates automatically
- How can we do that?
---
## Hints
- Check the following slides if you need help!
--
- We want to generate the HAProxy configuration in an `initContainer`
--
- The `namespaces` entry of the `rainbow` ConfigMap should be exposed to the `initContainer`
--
- The HAProxy configuration should be in a volume shared with HAProxy

View File

@@ -1,7 +0,0 @@
## Exercise — Build a Cluster
- Deploy a cluster by configuring and running each component manually
- Add CNI networking
- Generate and validate ServiceAccount tokens

View File

@@ -1,33 +0,0 @@
# Exercise — Build a Cluster
- Step 1: deploy a cluster
- follow the steps in the "Dessine-moi un cluster" section
- Step 2: add CNI networking
- une kube-router
- interconnect with the route-reflector
- check that you receive the routes of other clusters
- Step 3: generate and validate ServiceAccount tokens
- see next slide for help!
---
## ServiceAccount tokens
- We need to generate a TLS key pair and certificate
- A self-signed key will work
- We don't need anything particular in the certificate
(no particular CN, key use flags, etc.)
- The key needs to be passed to both API server and controller manager
- Check that ServiceAccount tokens are generated correctly

View File

@@ -4,6 +4,8 @@
(we will use the `rng` service in the dockercoins app)
- See what happens when the load increses
- Observe the correct behavior of the readiness probe
(spoiler alert: it involves timeouts!)
(when deploying e.g. an invalid image)
- Observe the behavior of the liveness probe

View File

@@ -2,85 +2,36 @@
- We want to add healthchecks to the `rng` service in dockercoins
- The `rng` service exhibits an interesting behavior under load:
*its latency increases (which will cause probes to time out!)*
- We want to see:
- what happens when the readiness probe fails
- what happens when the liveness probe fails
- how to set "appropriate" probes and probe parameters
---
## Setup
- First, deploy a new copy of dockercoins
(for instance, in a brand new namespace)
- Then, add a readiness probe on the `rng` service
- Pro tip #1: ping (e.g. with `httping`) the `rng` service at all times
- it should initially show a few milliseconds latency
- that will increase when we scale up
- it will also let us detect when the service goes "boom"
- Pro tip #2: also keep an eye on the web UI
---
## Readiness
- Add a readiness probe to `rng`
- this requires editing the pod template in the Deployment manifest
- use a simple HTTP check on the `/` route of the service
- keep all other parameters (timeouts, thresholds...) at their default values
(using a simple HTTP check on the `/` route of the service)
- Check what happens when deploying an invalid image for `rng` (e.g. `alpine`)
*(If the probe was set up correctly, the app will continue to work,
because Kubernetes won't switch over the traffic to the `alpine` containers,
because they don't pass the readiness probe.)*
- Then roll back `rng` to the original image and add a liveness probe
(with the same parameters)
- Scale up the `worker` service (to 15+ workers) and observe
- What happens, and how can we improve the situation?
---
## Readiness under load
## Goal
- Then roll back `rng` to the original image
- *Before* adding the readiness probe:
- Check what happens when we scale up the `worker` Deployment to 15+ workers
updating the image of the `rng` service with `alpine` should break it
(get the latency above 1 second)
- *After* adding the readiness probe:
*(We should now observe intermittent unavailability of the service, i.e. every
30 seconds it will be unreachable for a bit, then come back, then go away again, etc.)*
updating the image of the `rng` service with `alpine` shouldn't break it
---
- When adding the liveness probe, nothing special should happen
## Liveness
- Scaling the `worker` service will then cause disruptions
- Now replace the readiness probe with a liveness probe
- What happens now?
*(At first the behavior looks the same as with the readiness probe:
service becomes unreachable, then reachable again, etc.; but there is
a significant difference behind the scenes. What is it?)*
---
## Readiness and liveness
- Bonus questions!
- What happens if we enable both probes at the same time?
- What strategies can we use so that both probes are useful?
- The final goal is to understand why, and how to fix it

View File

@@ -6,7 +6,7 @@
- the web app itself (dockercoins, NGINX, whatever we want)
- an ingress controller
- an ingress controller (we suggest Traefik)
- a domain name (`use \*.nip.io` or `\*.localdev.me`)
@@ -16,7 +16,7 @@
## Goal
- We want to be able to access the web app using a URL like:
- We want to be able to access the web app using an URL like:
http://webapp.localdev.me
@@ -30,13 +30,11 @@
## Hints
- For the ingress controller, we can use:
- Traefik can be installed with Helm
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/index.md)
(it can be found on the Artifact Hub)
- the [Traefik Helm chart](https://doc.traefik.io/traefik/getting-started/install-traefik/#use-the-helm-chart)
- the container.training [Traefik DaemonSet](https://raw.githubusercontent.com/jpetazzo/container.training/main/k8s/traefik-v2.yaml)
- If using Kubernetes 1.22+, make sure to use Traefik 2.5+
- If our cluster supports LoadBalancer Services: easy

View File

@@ -1,5 +1,3 @@
⚠️ BROKEN EXERCISE - DO NOT USE
## Exercise — Ingress Secret Policy
*Implement policy to limit impact of ingress controller vulnerabilities.*

View File

@@ -1,5 +1,3 @@
⚠️ BROKEN EXERCISE - DO NOT USE
# Exercise — Ingress Secret Policy
- Most ingress controllers have access to all Secrets
@@ -10,86 +8,56 @@
(by allowing attacker to access all secrets, including API tokens)
- See for instance [CVE-2021-25742](https://github.com/kubernetes/ingress-nginx/issues/7837)
- How can we prevent that?
---
## Step 1: Ingress Controller
## Preparation
- Deploy an Ingress Controller
(e.g. Traefik or NGINX; you can use @@LINK[k8s/traefik-v2.yaml])
- Create a trivial web app (e.g. NGINX, `jpetazzo/color`...)
- Expose it with an Ingress
(e.g. use `app.<ip-address>.nip.io`)
- Check that you can access it through `http://app.<ip-address>.nip.io`
---
## Step 2: cert-manager
- Deploy an ingress controller
- Deploy cert-manager
- Create a ClusterIssuer using Let's Encrypt staging environment
- Create a ClusterIssuer using Let's Encrypt
(e.g. with @@LINK[k8s/cm-clusterissuer.yaml])
(suggestion: also create a ClusterIssuer using LE's staging env)
- Create a trivial web app (e.g. NGINX, `jpetazzo/color`...)
- Create an Ingress for the app, with TLS enabled
(e.g. use `appsecure.<ip-address>.nip.io`)
- Tell cert-manager to obtain a certificate for that Ingress
- option 1: manually create a Certificate (e.g. with @@LINK[k8s/cm-certificate.yaml])
- option 2: use the `cert-manager.io/cluster-issuer` annotation
- Check that you get the Let's Encrypt certificate was issued
(suggestion: use the `cert-manager.io/cluster-issuer` annotation)
---
## Step 3: RBAC
## Strategy
- Remove the Ingress Controller's permission to read all Secrets
- Remove the ingress controller's permission to read all Secrets
- Restart the Ingress Controller
- Grant selective access to Secrets
- Check that https://appsecure doesn't serve the Let's Encrypt cert
(only give access to secrets that hold ingress TLS keys and certs)
- Grant permission to read the certificate's Secret
- Automatically grant access by using Kyverno's "generate" mechanism
- Check that https://appsecure serve the Let's Encrypt cert again
(automatically create Role + RoleBinding when Certificate is created)
- Bonus: think about threat model for an insider attacker
(and how to mitigate it)
---
## Step 4: Kyverno
## Goal
- Install Kyverno
- When a Certificate (cert-manager CRD) is created, automatically create:
- Write a Kyverno policy to automatically grant permission to read Secrets
- A Role granting read access to the Certificate's Secret
(e.g. when a cert-manager Certificate is created)
- A RoleBinding granting that Role to our Ingress controller
- Check @@LINK[k8s/kyverno-namespace-setup.yaml] for inspiration
- Check that the Ingress controller TLS still works
- Hint: you need to automatically create a Role and RoleBinding
- Create another app + another Ingress with TLS
- Check that the Certificate, Secret, Role, RoleBinding are created
- Check that the new app correctly serves the Let's Encrypt cert
---
## Step 5: double-check
- Check that the Ingress Controller can't access other secrets
(e.g. by manually creating a Secret and checking with `kubectl exec`?)
- ...But that the Ingress controller can't read other secrets

View File

@@ -8,37 +8,25 @@
- We'll use one Deployment for each component
(created with `kubectl create deployment`)
(see next slide for the images to use)
- We'll connect them with Services
(create with `kubectl expose`)
- We'll check that we can access the web UI in a browser
---
## Images
- We'll use the following images:
- hasher → `dockercoins/hasher:v0.1`
- hasher → `dockercoins/hasher:v0.1`
- redis → `redis`
- redis`redis`
- rng`dockercoins/rng:v0.1`
- rng`dockercoins/rng:v0.1`
- webui`dockercoins/webui:v0.1`
- webui`dockercoins/webui:v0.1`
- worker → `dockercoins/worker:v0.1`
- All services should be internal services, except the web UI
(since we want to be able to connect to the web UI from outside)
---
class: pic
![Dockercoins architecture diagram](images/dockercoins-diagram.png)
- worker`dockercoins/worker:v0.1`
---
@@ -46,7 +34,7 @@ class: pic
- We should be able to see the web UI in our browser
(with the graph showing approximately 3-4 hashes/second)
(with the graph showing approximatiely 3-4 hashes/second)
---
@@ -56,4 +44,4 @@ class: pic
(check the logs of the worker; they indicate the port numbers)
- The web UI can be exposed with a NodePort or LoadBalancer Service
- The web UI can be exposed with a NodePort Service

Some files were not shown because too many files have changed in this diff Show More