Compare commits

..

19 Commits

Author SHA1 Message Date
Gerry S
2d54606bc8 Added Ingress to Extras 2022-07-29 10:08:16 -04:00
Gerry S
663e56bc1d Reorg TOC to match class; added alias+refs; added exploring-images 2022-07-29 09:53:55 -04:00
Gerry S
0863210811 Added Survey to Logistics 2022-07-25 09:25:57 -04:00
Gerry S
990b669e3a Added Survey to Logistics 2022-07-25 09:19:17 -04:00
Gerry S
8fca9536e0 Added Survey to Logistics 2022-07-25 09:15:19 -04:00
Gerry S
9404603cf6 Gerry Logistics Template 2022-07-25 09:07:40 -04:00
Gerry S
047bed4acd Added Macro View and Consolidated Training Environment 2022-07-25 08:54:34 -04:00
Jérôme Petazzoni
7d6871ad82 Fix heading space in YAML 2022-07-22 12:17:44 +02:00
Gerry S
b3717dafe9 Still poking to figure out why generated website doesn't work 2022-07-22 11:48:40 +02:00
Gerry S
b983cec512 Giving Up 2022-07-22 11:48:40 +02:00
Gerry S
eca21c017d Fake2 2022-07-22 11:48:40 +02:00
Gerry S
ce00e97d72 Fake 2022-07-22 11:48:40 +02:00
Gerry S
0f0a1482b1 Re-Reverted pp.yml 2022-07-22 11:48:40 +02:00
Gerry S
56babaead2 Reverting pp.yml 2022-07-22 11:48:40 +02:00
Gerry S
8d36d5edd8 Whiteboard 2022-07-22 11:48:40 +02:00
Gerry S
042404019c fix 2022-07-22 11:48:40 +02:00
Gerry S
70b566b0fe Whiteboard Discussion 2022-07-22 11:48:40 +02:00
Gerry S
c5e30e01df Removed Reference to undefined contiainer ticktock 2022-07-22 11:48:40 +02:00
Jérôme Petazzoni
b31ed37aee Proofpoint July session (20 hours over 5 days) 2022-07-22 11:48:33 +02:00
559 changed files with 36200 additions and 29704 deletions

View File

@@ -1,26 +0,0 @@
{
"name": "container.training environment to get started with Docker and/or Kubernetes",
"image": "ghcr.io/jpetazzo/shpod",
"features": {
//"ghcr.io/devcontainers/features/common-utils:2": {}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [],
//"postCreateCommand": "... install extra packages...",
"postStartCommand": "dind.sh ; kind.sh",
// This lets us use "docker-outside-docker".
// Unfortunately, minikube, kind, etc. don't work very well that way;
// so for now, we'll likely use "docker-in-docker" instead (with a
// privilege dcontainer). But we're still exposing that socket in case
// someone wants to do something interesting with it.
"mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
// This is for docker-in-docker.
"privileged": true,
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
"remoteUser": "k8s"
}

23
.gitignore vendored
View File

@@ -2,23 +2,23 @@
*.swp
*~
**/terraform.tfstate
**/terraform.tfstate.backup
prepare-labs/terraform/lab-environments
prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
prepare-labs/terraform/tags
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
prepare-labs/terraform/virtual-machines/proxmox/*.tfvars
prepare-labs/www
prepare-vms/tags
prepare-vms/infra
prepare-vms/www
prepare-tf/.terraform*
prepare-tf/terraform.*
prepare-tf/stage2/*.tf
prepare-tf/stage2/kubeconfig.*
prepare-tf/stage2/.terraform*
prepare-tf/stage2/terraform.*
prepare-tf/stage2/externalips.*
slides/*.yml.html
slides/autopilot/state.yaml
slides/index.html
slides/past.html
slides/slides.zip
slides/_academy_*
slides/fragments
node_modules
### macOS ###
@@ -32,4 +32,3 @@ node_modules
Thumbs.db
ehthumbs.db
ehthumbs_vista.db

View File

@@ -1,24 +1,26 @@
services:
version: "2"
services:
rng:
build: rng
ports:
- "8001:80"
- "8001:80"
hasher:
build: hasher
ports:
- "8002:80"
- "8002:80"
webui:
build: webui
ports:
- "8000:80"
- "8000:80"
volumes:
- "./webui/files/:/files/"
- "./webui/files/:/files/"
redis:
image: redis
worker:
build: worker

View File

@@ -1,8 +1,7 @@
FROM ruby:alpine
WORKDIR /app
RUN apk add --update build-base curl
RUN gem install sinatra --version '~> 3'
RUN gem install sinatra
RUN gem install thin
COPY hasher.rb .
CMD ["ruby", "hasher.rb", "-o", "::"]
ADD hasher.rb /
CMD ["ruby", "hasher.rb"]
EXPOSE 80

View File

@@ -2,6 +2,7 @@ require 'digest'
require 'sinatra'
require 'socket'
set :bind, '0.0.0.0'
set :port, 80
post '/' do

View File

@@ -1,7 +1,5 @@
FROM python:alpine
WORKDIR /app
RUN pip install Flask
COPY rng.py .
ENV FLASK_APP=rng FLASK_RUN_HOST=:: FLASK_RUN_PORT=80
CMD ["flask", "run", "--without-threads"]
COPY rng.py /
CMD ["python", "rng.py"]
EXPOSE 80

View File

@@ -28,5 +28,5 @@ def rng(how_many_bytes):
if __name__ == "__main__":
app.run(port=80)
app.run(host="0.0.0.0", port=80, threaded=False)

View File

@@ -1,8 +1,7 @@
FROM node:23-alpine
WORKDIR /app
FROM node:4-slim
RUN npm install express
RUN npm install morgan
RUN npm install redis@5
COPY . .
RUN npm install redis@3
COPY files/ /files/
COPY webui.js /
CMD ["node", "webui.js"]
EXPOSE 80

View File

@@ -1,34 +1,26 @@
import express from 'express';
import morgan from 'morgan';
import { createClient } from 'redis';
var client = await createClient({
url: "redis://redis",
socket: {
family: 0
}
})
.on("error", function (err) {
console.error("Redis error", err);
})
.connect();
var express = require('express');
var app = express();
var redis = require('redis');
app.use(morgan('common'));
var client = redis.createClient(6379, 'redis');
client.on("error", function (err) {
console.error("Redis error", err);
});
app.get('/', function (req, res) {
res.redirect('/index.html');
});
app.get('/json', async(req, res) => {
var coins = await client.hLen('wallet');
var hashes = await client.get('hashes');
var now = Date.now() / 1000;
res.json({
coins: coins,
hashes: hashes,
now: now
app.get('/json', function (req, res) {
client.hlen('wallet', function (err, coins) {
client.get('hashes', function (err, hashes) {
var now = Date.now() / 1000;
res.json( {
coins: coins,
hashes: hashes,
now: now
});
});
});
});

View File

@@ -1,6 +1,5 @@
FROM python:alpine
WORKDIR /app
RUN pip install redis
RUN pip install requests
COPY worker.py .
COPY worker.py /
CMD ["python", "worker.py"]

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
data:
use-forwarded-headers: true
compute-full-forwarded-for: true
use-proxy-protocol: true

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: ingress-nginx

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- M6-ingress-nginx-components.yaml
- sync.yaml
patches:
- path: M6-ingress-nginx-cm-patch.yaml
target:
kind: ConfigMap
- path: M6-ingress-nginx-svc-patch.yaml
target:
kind: Service

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
annotations:
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: true
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: true

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: kyverno

View File

@@ -1,72 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: flux-multi-tenancy
spec:
validationFailureAction: enforce
rules:
- name: serviceAccountName
exclude:
resources:
namespaces:
- flux-system
match:
resources:
kinds:
- Kustomization
- HelmRelease
validate:
message: ".spec.serviceAccountName is required"
pattern:
spec:
serviceAccountName: "?*"
- name: kustomizationSourceRefNamespace
exclude:
resources:
namespaces:
- flux-system
- ingress-nginx
- kyverno
- monitoring
- openebs
match:
resources:
kinds:
- Kustomization
preconditions:
any:
- key: "{{request.object.spec.sourceRef.namespace}}"
operator: NotEquals
value: ""
validate:
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
deny:
conditions:
- key: "{{request.object.spec.sourceRef.namespace}}"
operator: NotEquals
value: "{{request.object.metadata.namespace}}"
- name: helmReleaseSourceRefNamespace
exclude:
resources:
namespaces:
- flux-system
- ingress-nginx
- kyverno
- monitoring
- openebs
match:
resources:
kinds:
- HelmRelease
preconditions:
any:
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
operator: NotEquals
value: ""
validate:
message: "spec.chart.spec.sourceRef.namespace must be the same as metadata.namespace"
deny:
conditions:
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
operator: NotEquals
value: "{{request.object.metadata.namespace}}"

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: monitoring
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana
namespace: monitoring
spec:
ingressClassName: nginx
rules:
- host: grafana.test.metal.mybestdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kube-prometheus-stack-grafana
port:
number: 80

View File

@@ -1,35 +0,0 @@
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-webui
spec:
podSelector:
matchLabels:
app: web
ingress:
- from: []
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-db
spec:
podSelector:
matchLabels:
app: db
ingress:
- from:
- podSelector:
matchLabels:
app: web

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: openebs

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: openebs
resources:
- M6-openebs-components.yaml
- sync.yaml
configMapGenerator:
- name: openebs-values
files:
- values.yaml=M6-openebs-values.yaml
configurations:
- M6-openebs-kustomizeconfig.yaml

View File

@@ -1,6 +0,0 @@
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease

View File

@@ -1,15 +0,0 @@
# helm install openebs --namespace openebs openebs/openebs
# --set engines.replicated.mayastor.enabled=false
# --set lvm-localpv.lvmNode.kubeletDir=/var/lib/k0s/kubelet/
# --create-namespace
engines:
replicated:
mayastor:
enabled: false
# Needed for k0s install since kubelet install is slightly divergent from vanilla install >:-(
lvm-localpv:
lvmNode:
kubeletDir: /var/lib/k0s/kubelet/
localprovisioner:
hostpathClass:
isDefaultClass: true

View File

@@ -1,38 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
namespace: rocky-test
name: rocky-full-access
rules:
- apiGroups: ["", extensions, apps]
resources: [deployments, replicasets, pods, services, ingresses, statefulsets]
verbs: [get, list, watch, create, update, patch, delete] # You can also use [*]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rocky-pv-access
rules:
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, create, patch]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
toolkit.fluxcd.io/tenant: rocky
name: rocky-reconciler2
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rocky-pv-access
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: gotk:rocky-test:reconciler
- kind: ServiceAccount
name: rocky
namespace: rocky-test

View File

@@ -1,19 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rocky
namespace: rocky-test
spec:
ingressClassName: nginx
rules:
- host: rocky.test.mybestdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web
port:
number: 80

View File

@@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base/rocky
patches:
- path: M6-rocky-test-patch.yaml
target:
kind: Kustomization

View File

@@ -1,7 +0,0 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: rocky
namespace: rocky-test
spec:
path: ./k8s/plain

View File

@@ -1,33 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 1
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- name: "80"
port: 80
selector:
app: blue

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,8 +253,8 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
@@ -262,7 +262,7 @@ spec:
- --sidecar-host=http://127.0.0.1:8000
- --enable-skip-login
- --enable-insecure-login
image: kubernetesui/dashboard:v2.7.0
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -293,7 +293,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,15 +253,15 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.7.0
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -292,7 +292,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@@ -17,8 +17,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
@@ -30,8 +30,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
@@ -43,8 +43,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
@@ -56,8 +56,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
@@ -71,8 +71,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
@@ -84,8 +84,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
rules:
- apiGroups:
@@ -106,8 +106,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -126,8 +126,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
@@ -182,8 +182,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
@@ -204,8 +204,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kubernetes-dashboard
@@ -229,8 +229,8 @@ metadata:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
@@ -253,15 +253,15 @@ spec:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-dashboard
app.kubernetes.io/version: 2.7.0
helm.sh/chart: kubernetes-dashboard-6.0.0
app.kubernetes.io/version: 2.5.0
helm.sh/chart: kubernetes-dashboard-5.2.0
spec:
containers:
- args:
- --namespace=kubernetes-dashboard
- --auto-generate-certificates
- --sidecar-host=http://127.0.0.1:8000
image: kubernetesui/dashboard:v2.7.0
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -292,7 +292,7 @@ spec:
name: kubernetes-dashboard-certs
- mountPath: /tmp
name: tmp-volume
- image: kubernetesui/metrics-scraper:v1.0.8
- image: kubernetesui/metrics-scraper:v1.0.7
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -344,12 +344,3 @@ metadata:
creationTimestamp: null
name: cluster-admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: cluster-admin-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: cluster-admin

View File

@@ -16,7 +16,8 @@ spec:
hostPath:
path: /root
tolerations:
- operator: Exists
- effect: NoSchedule
operator: Exists
initContainers:
- name: hacktheplanet
image: alpine
@@ -26,7 +27,7 @@ spec:
command:
- sh
- -c
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys >> /root/.ssh/authorized_keys"
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -12,5 +12,5 @@ listen very-basic-load-balancer
server blue color.blue.svc:80
server green color.green.svc:80
### Note: the services above must exist,
### otherwise HAproxy won't start.
# Note: the services above must exist,
# otherwise HAproxy won't start.

View File

@@ -1,5 +1,5 @@
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2
apiVersion: autoscaling/v2beta2
metadata:
name: rng
spec:

View File

@@ -1,12 +0,0 @@
# This removes the haproxy Deployment.
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
patches:
- patch: |-
$patch: delete
kind: Deployment
apiVersion: apps/v1
metadata:
name: haproxy

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
# Within a Kustomization, it is not possible to specify in which
# order transformations (patches, replacements, etc) should be
# executed. If we want to execute transformations in a specific
# order, one possibility is to put them in individual components,
# and then invoke these components in the order we want.
# It works, but it creates an extra level of indirection, which
# reduces readability and complicates maintenance.
components:
- setup
- cleanup

View File

@@ -1,20 +0,0 @@
global
#log stdout format raw local0
#daemon
maxconn 32
defaults
#log global
timeout client 1h
timeout connect 1h
timeout server 1h
mode http
option abortonclose
frontend metrics
bind :9000
http-request use-service prometheus-exporter
frontend ollama_frontend
bind :8000
default_backend ollama_backend
maxconn 16
backend ollama_backend
server ollama_server localhost:11434 check

View File

@@ -1,39 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: haproxy
name: haproxy
spec:
selector:
matchLabels:
app: haproxy
template:
metadata:
labels:
app: haproxy
spec:
volumes:
- name: haproxy
configMap:
name: haproxy
containers:
- image: haproxy:3.0
name: haproxy
volumeMounts:
- name: haproxy
mountPath: /usr/local/etc/haproxy
readinessProbe:
httpGet:
port: 9000
ports:
- name: haproxy
containerPort: 8000
- name: metrics
containerPort: 9000
resources:
requests:
cpu: 0.05
limits:
cpu: 1

View File

@@ -1,75 +0,0 @@
# This adds a sidecar to the ollama Deployment, by taking
# the pod template and volumes from the haproxy Deployment.
# The idea is to allow to run ollama+haproxy in two modes:
# - separately (each with their own Deployment),
# - together in the same Pod, sidecar-style.
# The YAML files define how to run them separetely, and this
# "replacements" directive fetches a specific volume and
# a specific container from the haproxy Deployment, to add
# them to the ollama Deployment.
#
# This would be simpler if kustomize allowed to append or
# merge lists in "replacements"; but it doesn't seem to be
# possible at the moment.
#
# It would be even better if kustomize allowed to perform
# a strategic merge using a fieldPath as the source, because
# we could merge both the containers and the volumes in a
# single operation.
#
# Note that technically, it might be possible to layer
# multiple kustomizations so that one generates the patch
# to be used in another; but it wouldn't be very readable
# or maintainable so we decided to not do that right now.
#
# However, the current approach (fetching fields one by one)
# has an advantage: it could let us transform the haproxy
# container into a real sidecar (i.e. an initContainer with
# a restartPolicy=Always).
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
resources:
- haproxy.yaml
configMapGenerator:
- name: haproxy
files:
- haproxy.cfg
replacements:
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.volumes.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.volumes.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.containers.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy].ports.[name=haproxy].containerPort
targets:
- select:
kind: Service
name: ollama
fieldPaths:
- spec.ports.[name=11434].targetPort

View File

@@ -1,34 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 2
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- port: 80
selector:
app: blue

View File

@@ -1,94 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# Each of these YAML files contains a Deployment and a Service.
# The blue.yaml file is here just to demonstrate that the rest
# of this Kustomization can be precisely scoped to the ollama
# Deployment (and Service): the blue Deployment and Service
# shouldn't be affected by our kustomize transformers.
resources:
- ollama.yaml
- blue.yaml
buildMetadata:
# Add a label app.kubernetes.io/managed-by=kustomize-vX.Y.Z
- managedByLabel
# Add an annotation config.kubernetes.io/origin, indicating:
# - which file defined that resource;
# - if it comes from a git repository, which one, and which
# ref (tag, branch...) it was.
- originAnnotations
# Add an annotation alpha.config.kubernetes.io/transformations
# indicating which patches and other transformers have changed
# each resource.
- transformerAnnotations
# Let's generate a ConfigMap with literal values.
# Note that this will actually add a suffix to the name of the
# ConfigMaps (e.g.: ollama-8bk8bd8m76) and it will update all
# references to the ConfigMap (e.g. in Deployment manifests)
# accordingly. The suffix is a hash of the ConfigMap contents,
# so that basically, if the ConfigMap is edited, any workload
# using that ConfigMap will automatically do a rolling update.
configMapGenerator:
- name: ollama
literals:
- "model=gemma3:270m"
- "prompt=If you visit Paris, I suggest that you"
- "queue=4"
name: ollama
patches:
# The Deployment manifest in ollama.yaml doesn't specify
# resource requests and limits, so that it can run on any
# cluster (including resource-constrained local clusters
# like KiND or minikube). The example belows add CPU
# requests and limits using a strategic merge patch.
# The patch is inlined here, but it could also be put
# in a file and referenced with "path: xxxxxx.yaml".
- patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
spec:
template:
spec:
containers:
- name: ollama
resources:
requests:
cpu: 1
limits:
cpu: 2
# This will have the same effect, with one little detail:
# JSON patches cannot specify containers by name, so this
# assumes that the ollama container is the first one in
# the pod template (whereas the strategic merge patch can
# use "merge keys" and identify containers by their name).
#- target:
# kind: Deployment
# name: ollama
# patch: |
# - op: add
# path: /spec/template/spec/containers/0/resources
# value:
# requests:
# cpu: 1
# limits:
# cpu: 2
# A "component" is a bit like a "base", in the sense that
# it lets us define some reusable resources and behaviors.
# There is a key different, though:
# - a "base" will be evaluated in isolation: it will
# generate+transform some resources, then these resources
# will be included in the main Kustomization;
# - a "component" has access to all the resources that
# have been generated by the main Kustomization, which
# means that it can transform them (with patches etc).
components:
- add-haproxy-sidecar

View File

@@ -1,73 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ollama
name: ollama
spec:
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
volumes:
- name: ollama
hostPath:
path: /opt/ollama
type: DirectoryOrCreate
containers:
- image: ollama/ollama
name: ollama
env:
- name: OLLAMA_MAX_QUEUE
valueFrom:
configMapKeyRef:
name: ollama
key: queue
- name: MODEL
valueFrom:
configMapKeyRef:
name: ollama
key: model
volumeMounts:
- name: ollama
mountPath: /root/.ollama
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- ollama pull $MODEL
livenessProbe:
httpGet:
port: 11434
readinessProbe:
exec:
command:
- /bin/sh
- -c
- ollama show $MODEL
ports:
- name: ollama
containerPort: 11434
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ollama
name: ollama
spec:
ports:
- name: "11434"
port: 11434
protocol: TCP
targetPort: 11434
selector:
app: ollama
type: ClusterIP

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices
- redis

View File

@@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices.yaml
transformers:
- |
apiVersion: builtin
kind: PrefixSuffixTransformer
metadata:
name: use-ghcr-io
prefix: ghcr.io/
fieldSpecs:
- path: spec/template/spec/containers/image

View File

@@ -1,125 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- redis.yaml

View File

@@ -1,35 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP

View File

@@ -1,160 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,30 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- dockercoins.yaml
replacements:
- sourceValue: ghcr.io/dockercoins
targets:
- select:
kind: Deployment
labelSelector: "app in (hasher,rng,webui,worker)"
# It will soon be possible to use regexes in replacement selectors,
# meaning that the "labelSelector:" above can be replaced with the
# following "name:" selector which is a tiny bit simpler:
#name: hasher|rng|webui|worker
# Regex support in replacement selectors was added by this PR:
# https://github.com/kubernetes-sigs/kustomize/pull/5863
# This PR was merged in August 2025, but as of October 2025, the
# latest release of Kustomize is 5.7.1, which was released in July.
# Hopefully the feature will be available in the next release :)
# Another possibility would be to select all Deployments, and then
# reject the one(s) for which we don't want to update the registry;
# for instance:
#reject:
# kind: Deployment
# name: redis
fieldPaths:
- spec.template.spec.containers.*.image
options:
delimiter: "/"
index: 0

View File

@@ -1,56 +0,0 @@
# This is a Kyverno policy to automatically generate an Ingress resource,
# similar to the other ones in this directory; but instead of using the
# "old-style" policies (ClusterPolicy with spec.rules.generate), it is
# using the new CR GeneratingPolicy and CEL.
apiVersion: policies.kyverno.io/v1
kind: GeneratingPolicy
metadata:
name: generate-ingress-for-service
spec:
matchConstraints:
resourceRules:
- apiGroups: ['']
apiVersions: ['v1']
operations: ['CREATE']
resources: ['services']
variables:
- name: host
expression: |
object.metadata.name + "." + object.metadata.namespace + ".example.com"
- name: ingress
expression: >-
[
{
"kind": dyn("Ingress"),
"apiVersion": dyn("networking.k8s.io/v1"),
"metadata": dyn({
"name": object.metadata.name,
"namespace": object.metadata.namespace,
}),
"spec": dyn({
"rules": [
{
"host": dyn(variables.host),
"http": dyn({
"paths": [
{
"path": dyn("/"),
"pathType": dyn("Prefix"),
"backend": dyn({
"service": {
"name": dyn(object.metadata.name),
"port": dyn({
"number": 80
})
}
})
}
]
})
}
]
})
}
]
generate:
- expression: generator.Apply(object.metadata.namespace, variables.ingress)

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-1
spec:
validationFailureAction: enforce
rules:
- name: ensure-pod-color-is-valid
match:
@@ -17,6 +18,5 @@ spec:
operator: NotIn
values: [ red, green, blue ]
validate:
failureAction: Enforce
message: "If it exists, the label color must be red, green, or blue."
deny: {}

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-2
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
@@ -14,14 +15,13 @@ spec:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color || '' }}"
- key: "{{ request.object.metadata.labels.color }}"
operator: NotEquals
value: ""
validate:
failureAction: Enforce
message: "Once label color has been added, it cannot be changed."
deny:
conditions:

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-3
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
@@ -14,13 +15,14 @@ spec:
- key: "{{ request.operation }}"
operator: Equals
value: UPDATE
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
- key: "{{ request.oldObject.metadata.labels.color }}"
operator: NotEquals
value: ""
- key: "{{ request.object.metadata.labels.color || '' }}"
- key: "{{ request.object.metadata.labels.color }}"
operator: Equals
value: ""
validate:
failureAction: Enforce
message: "Once label color has been added, it cannot be removed."
deny: {}
deny:
conditions:

View File

@@ -1,37 +0,0 @@
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingAdmissionPolicy
metadata:
name: ensure-security-label
spec:
# What to do if an error happens when evaluating that policy? (Fail or Ignore)
failurePolicy: Fail
matchConstraints:
resourceRules:
- apiGroups: [""]
apiVersions: ["v1"]
operations: ["CREATE", "UPDATE"]
resources: ["pods"]
scope: Namespaced # "Cluster", "Namespaced", or "*"
validations:
- expression: |
'security' in object.metadata.labels
&&
object.metadata.labels.security in [ "public", "private", "namespace" ]
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingAdmissionPolicyBinding
metadata:
name: ensure-security-label
spec:
policyName: ensure-security-label
# What to do when a policy doesn't validate: Deny, Warn, Audit.
# (Note: it doesn't make sense to put Deny and Warn together.)
validationActions: [ Deny ]
matchResources:
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: NotIn
values: [ kube-system, local-path-storage, kyverno ]

View File

@@ -1,13 +0,0 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: my-pdb
spec:
#minAvailable: 2
#minAvailable: 90%
maxUnavailable: 1
#maxUnavailable: 10%
selector:
matchLabels:
app: my-app

View File

@@ -1,27 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: sysctl
spec:
selector:
matchLabels:
app: sysctl
template:
metadata:
labels:
app: sysctl
spec:
tolerations:
- operator: Exists
initContainers:
- name: sysctl
image: alpine
securityContext:
privileged: true
command:
- sysctl
- fs.inotify.max_user_instances=99999
containers:
- name: pause
image: registry.k8s.io/pause:3.8

87
k8s/traefik-v1.yaml Normal file
View File

@@ -0,0 +1,87 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

114
k8s/traefik-v2.yaml Normal file
View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
hostNetwork: true
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:v2.5
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --accesslog
- --api
- --api.insecure
- --log.level=INFO
- --metrics.prometheus
- --providers.kubernetesingress
- --entrypoints.http.Address=:80
- --entrypoints.https.Address=:443
- --entrypoints.https.http.tls.certResolver=default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
---
kind: IngressClass
apiVersion: networking.k8s.io/v1
metadata:
name: traefik
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
spec:
controller: traefik.io/ingress-controller

View File

@@ -1,123 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: traefik
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik
namespace: traefik
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik
namespace: traefik
labels:
app: traefik
spec:
selector:
matchLabels:
app: traefik
template:
metadata:
labels:
app: traefik
name: traefik
spec:
tolerations:
- effect: NoSchedule
operator: Exists
# If, for some reason, our CNI plugin doesn't support hostPort,
# we can enable hostNetwork instead. That should work everywhere
# but it doesn't provide the same isolation.
#hostNetwork: true
serviceAccountName: traefik
terminationGracePeriodSeconds: 60
containers:
- image: traefik:v3.5
name: traefik
ports:
- name: http
containerPort: 80
hostPort: 80
- name: https
containerPort: 443
hostPort: 443
- name: admin
containerPort: 8080
hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --accesslog
- --api
- --api.insecure
- --entrypoints.http.Address=:80
- --entrypoints.https.Address=:443
- --global.sendAnonymousUsage=true
- --log.level=INFO
- --metrics.prometheus
- --providers.kubernetesingress
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik
subjects:
- kind: ServiceAccount
name: traefik
namespace: traefik
---
kind: IngressClass
apiVersion: networking.k8s.io/v1
metadata:
name: traefik
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
spec:
controller: traefik.io/ingress-controller

1
k8s/traefik.yaml Symbolic link
View File

@@ -0,0 +1 @@
traefik-v2.yaml

View File

@@ -70,15 +70,4 @@ add_namespace() {
kubectl create serviceaccount -n kubernetes-dashboard cluster-admin \
-o yaml --dry-run=client \
#
echo ---
cat <<EOF
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: cluster-admin-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: cluster-admin
EOF
) > dashboard-with-token.yaml

View File

@@ -2,3 +2,4 @@
base = "slides"
publish = "slides"
command = "./build.sh once"

View File

@@ -1,222 +0,0 @@
# Tools to create lab environments
This directory contains tools to create lab environments for Docker and Kubernetes courses and workshops.
It also contains Terraform configurations that can be used stand-alone to create simple Kubernetes clusters.
Assuming that you have installed all the necessary dependencies, and placed cloud provider access tokens in the right locations, you could do, for instance:
```bash
# For a Docker course with 50 students,
# create 50 VMs on Digital Ocean.
./labctl create --students 50 --settings settings/docker.env --provider digitalocean
# For a Kubernetes training with 20 students,
# create 20 clusters of 4 VMs each using kubeadm,
# on a private Openstack cluster.
./labctl create --students 20 --settings settings/kubernetes.env --provider openstack/enix
# For a Kubernetes workshop with 80 students,
# create 80 clusters with 2 VMs each,
# using Scaleway Kapsule (managed Kubernetes).
./labctl create --students 20 --settings settings/mk8s.env --provider scaleway --mode mk8s
```
Interested? Read on!
## Software requirements
For Docker labs and Kubernetes labs based on kubeadm:
- [Parallel SSH](https://github.com/lilydjwg/pssh)
(should be installable with `pip install git+https://github.com/lilydjwg/pssh`;
on a Mac, try `brew install pssh`)
For all labs:
- Terraform
If you want to generate printable cards:
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
- [jinja2](https://pypi.python.org/pypi/Jinja2)
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
may have changed your default Python to Python 2.
You will also need an account with the cloud provider(s) that you want to use to deploy the lab environments.
## Cloud provider account(s) and credentials
These scripts create VMs or Kubernetes cluster on cloud providers, so you will need cloud provider account(s) and credentials.
Generally, we try to use the credentials stored in the configuration file used by the cloud providers CLI tools.
This means, for instance, that for Linode, if you install `linode-cli` and configure it properly, it will place your credentials in `~/.config/linode-cli`, and our Terraform configurations will try to read that file and use the credentials in it.
You don't **have to** install the CLI tools of the cloud provider(s) that you want to use; but we recommend that you do.
If you want to provide your cloud credentials through other means, you will have to adjust the Terraform configuration files in `terraform/provider-config` accordingly.
Here is where we look for credentials for each provider:
- AWS: Terraform defaults; see [AWS provider documentation][creds-aws] (for instance, you can use the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables, or AWS config and profile files)
- Azure: Terraform defaults; see [AzureRM provider documentation][creds-azure] (typically, you can authenticate with the `az` CLI and Terraform will pick it up automatically)
- Civo: CLI configuration file (`~/.civo.json`)
- Digital Ocean: CLI configuration file (`~/.config/doctl/config.yaml`)
- Exoscale: CLI configuration file (`~/.config/exoscale/exoscale.toml`)
- Google Cloud: we're using "Application Default Credentials (ADC)"; run `gcloud auth application-default login`; note that we'll use the default "project" set in `gcloud` unless you set the `GOOGLE_PROJECT` environment variable
- Hetzner: CLI configuration file (`~/.config/hcloud/cli.toml`)
- Linode: CLI configuration file (`~/.config/linode-cli`)
- OpenStack: you will need to write a tfvars file (check [that exemple](terraform/virtual-machines/openstack/tfvars.example))
- Oracle: Terraform defaults; see [OCI provider documentation][creds-oci] (for instance, you can set up API keys; or you can use a short-lived token generated by the OCI CLI with `oci session authenticate`)
- OVH: Terraform defaults; see [OVH provider documentation][creds-ovh] (this typically involves setting up 5 `OVH_...` environment variables)
- Scaleway: Terraform defaults; see [Scaleway provider documentation][creds-scw] (for instance, you can set environment variables, but it will also automatically pick up CLI authentication from `~/.config/scw/config.yaml`)
[creds-aws]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration
[creds-azure]: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure
[creds-oci]: https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/terraformproviderconfiguration.htm#authentication
[creds-ovh]: https://registry.terraform.io/providers/ovh/ovh/latest/docs#provider-configuration
[creds-scw]: https://registry.terraform.io/providers/scaleway/scaleway/latest/docs#authentication
## General Workflow
- fork/clone repo
- make sure your cloud credentials have been configured properly
- run `./labctl create ...` to create lab environments
- run `./labctl destroy ...` when you don't need the environments anymore
## Customizing things
You can edit the `settings/*.env` files, for instance to change the size of the clusters, the login or password used for the students...
Note that these files are sourced before executing any operation on a specific set of lab environments, which means that you can set Terraform variables by adding lines like the following one in the `*.env` files:
```bash
export TF_VAR_node_size=GP1.L
export TF_VAR_location=eu-north
```
## `./labctl` Usage
If you run `./labctl` without arguments, it will show a list of available commands.
### Summary of What `./labctl` Does For You
The script will create a Terraform configuration using a provider-specific template.
There are two modes: `pssh` and `mk8s`.
In `pssh` mode, students connect directly to the virtual machines using SSH.
The Terraform configuration creates a bunch of virtual machines, then the provisioning and configuration are done with `pssh`. There are a number of "steps" that are executed on the VMs, to install Docker, install a number of convenient tools, install and set up Kubernetes (if needed)... The list of "steps" to be executed is configured in the `settings/*.env` file.
In `mk8s` mode, students don't connect directly to the virtual machines. Instead, they connect to an SSH server running in a Pod (using the `jpetazzo/shpod` image), itself running on a Kubernetes cluster. The Kubernetes cluster is a managed cluster created by the Terraform configuration.
## `terraform` directory structure and principles
Legend:
- `📁` directory
- `📄` file
- `📄📄📄` multiple files
- `🌍` Terraform configuration that can be used "as-is"
```
📁terraform
├── 📁list-locations
│ └── 📄📄📄 helper scripts
│ (to list available locations for each provider)
├── 📁many-kubernetes
│ └── 📄📄📄 Terraform configuration template
│ (used in mk8s mode)
├── 📁one-kubernetes
│ │ (contains Terraform configurations that can spawn
│ │ a single Kubernetes cluster on a given provider)
│ ├── 📁🌍aws
│ ├── 📁🌍civo
│ ├── 📄common.tf
│ ├── 📁🌍digitalocean
│ └── ...
├── 📁providers
│ ├── 📁aws
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ ├── 📁azure
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ ├── 📁civo
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ ├── 📁digitalocean
│ │ ├── 📄config.tf
│ │ └── 📄variables.tf
│ └── ...
├── 📁tags
│ │ (contains Terraform configurations + other files
│ │ for a specific set of VMs or K8S clusters; these
│ │ are created by labctl)
│ ├── 📁2023-03-27-10-04-79-jp
│ ├── 📁2023-03-27-10-07-41-jp
│ ├── 📁2023-03-27-10-16-418-jp
│ └── ...
└── 📁virtual-machines
│ (contains Terraform configurations that can spawn
│ a bunch of virtual machines on a given provider)
├── 📁🌍aws
├── 📁🌍azure
├── 📄common.tf
├── 📁🌍digitalocean
└── ...
```
The directory structure can feel a bit overwhelming at first, but it's built with specific goals in mind.
**Consistent input/output between providers.** The per-provider configurations in `one-kubernetes` all take the same input variables, and provide the same output variables. Same thing for the per-provider configurations in `virtual-machines`.
**Don't repeat yourself.** As much as possible, common variables, definitions, and logic has been factored in the `common.tf` file that you can see in `one-kubernetes` and `virtual-machines`. That file is then symlinked in each provider-specific directory, to make sure that all providers use the same version of the `common.tf` file.
**Don't repeat yourself (again).** The things that are specific to each provider have been placed in the `providers` directory, and are shared between the `one-kubernetes` and the `virtual-machines` configurations. Specifically, for each provider, there is `config.tf` (which contains provider configuration, e.g. how to obtain the credentials for that provider) and `variables.tf` (which contains default values like which location and which VM size to use).
**Terraform configurations should work in `labctl` or standalone, without extra work.** The Terraform configurations (identified by 🌍 in the directory tree above) can be used directly. Just go to one of these directories, `terraform init`, `terraform apply`, and you're good to go. But they can also be used from `labctl`. `labctl` shouldn't barf out if you did a `terraform apply` in one of these directories (because it will only copy the `*.tf` files, and leave alone the other files, like the Terraform state).
The latter means that it should be easy to tweak these configurations, or create a new one, without having to use `labctl` to test it. It also means that if you want to use these configurations but don't care about `labctl`, you absolutely can!
## Miscellaneous info
### Making sure Python3 is the default (Mac only)
Check the `/usr/local/bin/python` symlink. It should be pointing to
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
instructions.
1) Verify that Python 3 is installed.
```
ls -la /usr/local/Cellar/Python
```
You should see one or more versions of Python 3. If you don't,
install it with `brew install python`.
2) Verify that `python` points to Python3.
```
ls -la /usr/local/bin/python
```
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
```
rm /usr/local/bin/python
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
# where xxxx is the most recent Python 3 version you saw above
```
### AWS specific notes
Initial assumptions are you're using a root account. If you'd like to use a IAM user, it will need the right permissions. For `pssh` mode, that includes at least `AmazonEC2FullAccess` and `IAMReadOnlyAccess`.
In `pssh` mode, the Terraform configuration currently uses the default VPC and Security Group. If you want to use another one, you'll have to make changes to `terraform/virtual-machines/aws`.
The default VPC Security Group does not open any ports from Internet by default. So you'll need to add Inbound rules for `SSH | TCP | 22 | 0.0.0.0/0` and `Custom TCP Rule | TCP | 8000 - 8002 | 0.0.0.0/0`.

View File

@@ -1,33 +0,0 @@
#!/bin/sh
case "$1-$2" in
linode-lb)
linode-cli nodebalancers list --json |
jq '.[] | select(.label | startswith("ccm-")) | .id' |
xargs -n1 -P10 linode-cli nodebalancers delete
;;
linode-pvc)
linode-cli volumes list --json |
jq '.[] | select(.label | startswith("pvc")) | .id' |
xargs -n1 -P10 linode-cli volumes delete
;;
digitalocean-lb)
doctl compute load-balancer list --output json |
jq .[].id |
xargs -n1 -P10 doctl compute load-balancer delete --force
;;
digitalocean-pvc)
doctl compute volume list --output json |
jq '.[] | select(.name | startswith("pvc-")) | .id' |
xargs -n1 -P10 doctl compute volume delete --force
;;
scaleway-pvc)
scw instance volume list --output json |
jq '.[] | select(.name | contains("_pvc-")) | .id' |
xargs -n1 -P10 scw instance volume delete
;;
*)
echo "Unknown combination of provider ('$1') and resource ('$2')."
;;
esac

View File

@@ -1,63 +0,0 @@
#!/bin/sh
#set -eu
if ! command -v http >/dev/null; then
echo "Could not find the 'http' command line tool."
echo "Please install it (the package name might be 'httpie')."
exit 1
fi
. ~/creds/creds.cloudflare.dns
cloudflare() {
case "$1" in
GET|POST|DELETE)
METHOD="$1"
shift
;;
*)
METHOD=""
;;
esac
URI=$1
shift
http --ignore-stdin $METHOD https://api.cloudflare.com/client/v4/$URI "$@" "Authorization:Bearer $CLOUDFLARE_TOKEN"
}
_list_zones() {
cloudflare zones?per_page=100 | jq -r .result[].name
}
_get_zone_id() {
cloudflare zones?name=$1 | jq -r .result[0].id
}
_populate_zone() {
ZONE_ID=$(_get_zone_id $1)
shift
for IPADDR in $*; do
case "$IPADDR" in
*.*) TYPE=A;;
*:*) TYPE=AAAA;;
esac
cloudflare zones/$ZONE_ID/dns_records "name=*" "type=$TYPE" "content=$IPADDR"
cloudflare zones/$ZONE_ID/dns_records "name=\@" "type=$TYPE" "content=$IPADDR"
done
}
_clear_zone() {
ZONE_ID=$(_get_zone_id $1)
for RECORD_ID in $(
cloudflare zones/$ZONE_ID/dns_records | jq -r .result[].id
); do
cloudflare DELETE zones/$ZONE_ID/dns_records/$RECORD_ID
done
}
_add_zone() {
cloudflare zones "name=$1"
}
echo "This script is still work in progress."
echo "You can source it and then use its individual functions."

View File

@@ -1,65 +0,0 @@
#!/bin/sh
#
# Baseline resource usage per vcluster in our usecase:
# 500 MB RAM
# 10% CPU
# (See https://docs.google.com/document/d/1n0lwp6rQKQUIuo_A5LQ1dgCzrmjkDjmDtNj1Jn92UrI)
# PRO2-XS = 4 core, 16 gb
# Note that we also need 2 volumes per vcluster (one for vcluster itself, one for shpod),
# so we might hit the maximum number of volumes per node!
# (TODO: check what that limit is on Scaleway and Linode)
#
# With vspod:
# 800 MB RAM
# 33% CPU
#
set -e
KONKTAG=konk
PROVIDER=linode
STUDENTS=2
case "$PROVIDER" in
linode)
export TF_VAR_node_size=g6-standard-6
export TF_VAR_location=fr-par
;;
scaleway)
export TF_VAR_node_size=PRO2-XS
# For tiny testing purposes, these are okay too:
#export TF_VAR_node_size=PLAY2-NANO
export TF_VAR_location=fr-par-2
;;
esac
# set kubeconfig file
export KUBECONFIG=~/kubeconfig
if [ "$PROVIDER" = "kind" ]; then
kind create cluster --name $KONKTAG
ADDRTYPE=InternalIP
else
if ! [ -f tags/$KONKTAG/stage2/kubeconfig.101 ]; then
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag $KONKTAG
fi
cp tags/$KONKTAG/stage2/kubeconfig.101 $KUBECONFIG
ADDRTYPE=ExternalIP
fi
# set external_ip labels
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="'$ADDRTYPE'")].address}{"\n"}{end}' |
while read node address ignoredaddresses; do
kubectl label node $node external_ip=$address
done
# vcluster all the things
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students $STUDENTS
# install prometheus stack because that's cool
helm upgrade --install --repo https://prometheus-community.github.io/helm-charts \
--namespace prom-system --create-namespace \
kube-prometheus-stack kube-prometheus-stack
# and also fix sysctl
kubectl apply -f ../k8s/sysctl.yaml --namespace kube-system

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +0,0 @@
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

View File

@@ -1,44 +0,0 @@
# This file can be sourced in order to directly run commands on
# a group of VMs whose IPs are located in ips.txt of the directory in which
# the command is run.
pssh() {
if [ -z "$TAG" ]; then
>/dev/stderr echo "Variable \$TAG is not set."
return
fi
HOSTFILE="tags/$TAG/ips.txt"
[ -f $HOSTFILE ] || {
>/dev/stderr echo "Hostfile $HOSTFILE not found."
return
}
echo "[parallel-ssh] $@"
# There are some routers that really struggle with the number of TCP
# connections that we open when deploying large fleets of clusters.
# We're adding a 1 second delay here, but this can be cranked up if
# necessary - or down to zero, too.
sleep ${PSSH_DELAY_PRE-1}
# When things go wrong, it's convenient to ask pssh to show the output
# of the failed command. Let's make that easy with a DEBUG env var.
if [ "$DEBUG" ]; then
PSSH_I=-i
else
PSSH_I=""
fi
$(which pssh || which parallel-ssh) -h $HOSTFILE -l ubuntu \
--par ${PSSH_PARALLEL_CONNECTIONS-100} \
--timeout 300 \
-O LogLevel=ERROR \
-O IdentityFile=tags/$TAG/id_rsa \
-O UserKnownHostsFile=/dev/null \
-O StrictHostKeyChecking=no \
-O ForwardAgent=yes \
$PSSH_I \
"$@"
}

View File

@@ -1,16 +0,0 @@
#!/bin/sh
DOMAINS=domains.txt
IPS=ips.txt
. ./dns-cloudflare.sh
paste "$DOMAINS" "$IPS" | while read domain ips; do
if ! [ "$domain" ]; then
echo "⚠️ No more domains!"
exit 1
fi
_clear_zone "$domain"
_populate_zone "$domain" $ips
done
echo "✅ All done."

View File

@@ -1,22 +0,0 @@
CLUSTERSIZE=3
CLUSTERPREFIX=kubenet
CLUSTERNUMBER=100
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubebins
kubetools
ips
"

View File

@@ -1,22 +0,0 @@
CLUSTERSIZE=3
CLUSTERPREFIX=kuberouter
CLUSTERNUMBER=200
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubebins
kubetools
ips
"

View File

@@ -1,27 +0,0 @@
CLUSTERSIZE=1
CLUSTERPREFIX=monokube
# We're sticking to this in the first DMUC lab,
# because it still works with Docker, and doesn't
# require a ServiceAccount signing key.
KUBEVERSION=1.19.11
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
tools
docker
disabledocker
createuser
webssh
tailhist
kubebins
kubetools
ips
"

View File

@@ -1,26 +0,0 @@
CLUSTERSIZE=3
CLUSTERPREFIX=oldversion
USER_LOGIN=k8s
USER_PASSWORD=training
# For a list of old versions, check:
# https://kubernetes.io/releases/patch-releases/#non-active-branch-history
KUBEVERSION=1.28.9
STEPS="
terraform
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubepkgs
kubeadm
kubetools
kubetest
"

View File

@@ -1,21 +0,0 @@
CLUSTERSIZE=3
CLUSTERPREFIX=polykube
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
tools
kubepkgs
kubebins
createuser
webssh
tailhist
kubetools
ips
"

View File

@@ -1,22 +0,0 @@
CLUSTERSIZE=3
CLUSTERPREFIX=test
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubepkgs
kubeadm
kubetools
kubetest
"

View File

@@ -1,19 +0,0 @@
CLUSTERSIZE=1
CLUSTERPREFIX=moby
USER_LOGIN=docker
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
ips
"

View File

@@ -1,6 +0,0 @@
CLUSTERSIZE=5
USER_LOGIN=k8s
USER_PASSWORD=
STEPS="terraform stage2"

View File

@@ -1,22 +0,0 @@
CLUSTERSIZE=4
CLUSTERPREFIX=node
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubepkgs
kubeadm
kubetools
kubetest
"

View File

@@ -1,23 +0,0 @@
CLUSTERSIZE=10
export TF_VAR_node_size=GP1.M
CLUSTERPREFIX=node
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
tools
docker
createuser
webssh
tailhist
kubepkgs
kubeadm
kubetools
kubetest
"

View File

@@ -1,4 +0,0 @@
USER_LOGIN=k8s
USER_PASSWORD=
STEPS="terraform stage2"

View File

@@ -1,22 +0,0 @@
#export TF_VAR_node_size=GP4.4
#export TF_VAR_node_size=g6-standard-6
#export TF_VAR_node_size=m7i.xlarge
CLUSTERSIZE=1
CLUSTERPREFIX=CHANGEME
USER_LOGIN=portal
USER_PASSWORD=CHANGEME
STEPS="
terraform
wait
standardize
clusterize
tools
docker
createuser
ips
"

View File

@@ -1,40 +0,0 @@
#!/bin/sh
set -e
PREFIX=$(date +%Y-%m-%d-%H-%M)
PROVIDER=openstack/enix # aws also works
STUDENTS=2
#export TF_VAR_location=eu-north-1
export TF_VAR_node_size=S
SETTINGS=admin-monokube
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \
--provider $PROVIDER \
--settings settings/$SETTINGS.env \
--students $STUDENTS
SETTINGS=admin-polykube
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \
--provider $PROVIDER \
--settings settings/$SETTINGS.env \
--students $STUDENTS
SETTINGS=admin-oldversion
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \
--provider $PROVIDER \
--settings settings/$SETTINGS.env \
--students $STUDENTS
SETTINGS=admin-test
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \
--provider $PROVIDER \
--settings settings/$SETTINGS.env \
--students $STUDENTS

View File

@@ -1 +0,0 @@
terraform/tags

View File

@@ -1,237 +0,0 @@
{#
The variables below can be customized here directly, or in your
settings.yaml file. Any variable in settings.yaml will be exposed
in here as well.
#}
{%- set url = url
| default("http://FIXME.container.training/") -%}
{%- set pagesize = pagesize
| default(10) -%}
{%- set lang = lang
| default("en") -%}
{%- set event = event
| default("training session") -%}
{%- set backside = backside
| default(False) -%}
{%- set image = image
| default(False) -%}
{%- set clusternumber = clusternumber
| default(None) -%}
{%- set thing = thing
| default("lab environment") -%}
{%- if lang == "en" -%}
{%- set intro -%}
Here is the connection information to your very own
{{ thing }} for this {{ event }}.
You can connect to it with any SSH client.
{%- endset -%}
{%- endif -%}
{%- if lang == "fr" -%}
{%- set intro -%}
Voici les informations permettant de se connecter à votre
{{ thing }} pour cette formation.
Vous pouvez vous y connecter
avec n'importe quel client SSH.
{%- endset -%}
{%- endif -%}
{%- if lang == "en" -%}
{%- set slides_are_at -%}
You can find the slides at:
{%- endset -%}
{%- endif -%}
{%- if lang == "fr" -%}
{%- set slides_are_at -%}
Le support de formation est à l'adresse suivante :
{%- endset -%}
{%- endif -%}
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<style>
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
{% if paper_size == "A4" %}
@page {
size: A4; /* Change from the default size of A4 */
margin: 0.5cm; /* Set margin on each page */
}
body {
/* this is A4 minus 0.5cm margins */
width: 20cm;
height: 28.7cm;
}
{% elif paper_size == "Letter" %}
@page {
size: Letter; /* 8.5in x 11in */
}
body {
width: 6.75in; /* two cards wide */
margin-left: 0.875in; /* (8.5in - 6.75in)/2 */
margin-top: 0.1875in; /* (11in - 5 cards)/2 */
}
{% endif %}
body, table {
line-height: 1em;
font-size: 15px;
font-family: 'Slabo 27px';
}
table {
border-spacing: 0;
margin-top: 0.4em;
margin-bottom: 0.4em;
border-left: 0.8em double grey;
padding-left: 0.4em;
}
td:first-child {
width: 10.5em;
}
div.card {
float: left;
border: 0.01in dotted black;
/*
columns * (width+left+right) < 100%
height: 33%;
width: 24.8%;
width: 33%;
*/
width: 3.355in; /* 3.375in minus two 0.01in borders */
height: 2.105in; /* 2.125in minus two 0.01in borders */
}
p {
margin: 0.8em;
}
div.front {
{% if image %}
background-image: url("{{ image }}");
background-repeat: no-repeat;
background-size: 1in;
background-position-x: 2.8in;
background-position-y: center;
{% endif %}
}
span.scale {
white-space: nowrap;
}
.qrcode img {
height: 5.8em;
padding: 1em 1em 0.5em 1em;
float: left;
}
.logpass {
font-family: monospace;
font-weight: bold;
}
.pagebreak {
page-break-after: always;
clear: both;
display: block;
height: 0;
}
</style>
<script type="text/javascript" src="qrcode.min.js"></script>
<script type="text/javascript">
function qrcodes() {
[].forEach.call(
document.getElementsByClassName("qrcode"),
(e, index) => {
new QRCode(e, {
text: "{{ qrcode }}",
correctLevel: QRCode.CorrectLevel.L
});
}
);
}
function scale() {
[].forEach.call(
document.getElementsByClassName("scale"),
(e, index) => {
var text_width = e.getBoundingClientRect().width;
var box_width = e.parentElement.getBoundingClientRect().width;
var percent = 100 * box_width / text_width + "%";
e.style.fontSize = percent;
}
);
}
</script>
</head>
<body onload="qrcodes(); scale();">
{% for login in logins %}
<div class="card front">
<p>{{ intro }}</p>
<p>
<table>
<tr>
<td>login:</td>
<td>password:</td>
</tr>
<tr>
<td class="logpass">{{ login.login }}</td>
<td class="logpass">{{ login.password }}</td>
</tr>
<tr>
<td>IP address:</td>
{% if login.port %}
<td>port:</td>
{% endif %}
</tr>
<tr>
<td class="logpass">{{ login.ipaddrs.split("\t")[0] }}</td>
{% if login.port %}
<td class="logpass">{{ login.port }}</td>
{% endif %}
</tr>
</table>
</p>
<p>
{% if url %}
{{ slides_are_at }}
<p>
<span class="scale">{{ url }}</span>
</p>
{% endif %}
</p>
</div>
{% if loop.index%pagesize==0 or loop.last %}
<span class="pagebreak"></span>
{% if backside %}
{% for x in range(pagesize) %}
<div class="card back">
{{ backside }}
{#
<p>Thanks for attending
"Getting Started With Kubernetes and Container Orchestration"
during CONFERENCE in Month YYYY!</p>
<p>If you liked that workshop,
I can train your team, in person or
online, with custom courses of
any length and any level.
</p>
{% if qrcode %}
<p>If you're interested, please scan that QR code to contact me:</p>
<span class="qrcode"></span>
{% else %}
<p>If you're interested, you can contact me at:</p>
{% endif %}
<p>jerome.petazzoni@gmail.com</p>
#}
</div>
{% endfor %}
<span class="pagebreak"></span>
{% endif %}
{% endif %}
{% endfor %}
</body>
</html>

View File

@@ -1,19 +0,0 @@
cards_template: cards.html
paper_size: Letter
url: https://2024-11-qconsf.container.training
event: workshop
backside: |
<div class="qrcode"></div>
<p>
Thanks for attending the Asynchronous Architecture Patterns workshop at QCON!
</p>
<p>
<b>This QR code will give you my contact info</b> as well as a link to a feedback form.
</p>
<p>
If you liked this workshop, I can train your team, in person or online, with custom
courses of any length and any level, on Docker, Kubernetes, and MLops.
</p>
qrcode: https://2024-11-qconsf.container.training/#contact
thing: Kubernetes cluster
image: logo-kubernetes.png

View File

@@ -1,4 +0,0 @@
#!/bin/sh
az account list-locations -o table \
--query "sort_by([?metadata.regionType == 'Physical'], &regionalDisplayName)[]
.{ displayName: displayName, regionalDisplayName: regionalDisplayName }"

View File

@@ -1,2 +0,0 @@
#!/bin/sh
civo region ls

View File

@@ -1,2 +0,0 @@
#!/bin/sh
doctl compute region list

View File

@@ -1,2 +0,0 @@
#!/bin/sh
exo zone

View File

@@ -1,2 +0,0 @@
#!/bin/sh
gcloud compute zones list

View File

@@ -1,2 +0,0 @@
#!/bin/sh
linode-cli regions list

View File

@@ -1,2 +0,0 @@
#!/bin/sh
oci iam region list

View File

@@ -1,6 +0,0 @@
#!/bin/sh
echo "# Note that this is hard-coded in $0.
# I don't know if there is a way to list regions through the Scaleway API.
fr-par
nl-ams
pl-waw"

View File

@@ -1 +0,0 @@
one-kubernetes-config/config.tf

View File

@@ -1,3 +0,0 @@
This directory should contain a config.tf file, even if it's empty.
(Because if the file doesn't exist, then the Terraform configuration
in the parent directory will fail.)

View File

@@ -1,8 +0,0 @@
This directory should contain a copy of one of the "one-kubernetes" modules.
For instance, when located in this directory, you can do:
cp ../../one-kubernetes/linode/* .
Then, move the config.tf file to ../one-kubernetes-config:
mv config.tf ../one-kubernetes-config

View File

@@ -1 +0,0 @@
one-kubernetes-module/provider.tf

View File

@@ -1,3 +0,0 @@
terraform {
required_version = ">= 1.4"
}

View File

@@ -1,274 +0,0 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.38.0"
}
helm = {
source = "hashicorp/helm"
version = "~> 3.0"
}
}
}
%{ for index, cluster in clusters ~}
provider "kubernetes" {
alias = "cluster_${index}"
config_path = "./kubeconfig.${index}"
}
provider "helm" {
alias = "cluster_${index}"
kubernetes = {
config_path = "./kubeconfig.${index}"
}
}
# Password used for SSH and code-server access
resource "random_string" "shpod_${index}" {
length = 6
special = false
upper = false
}
resource "kubernetes_namespace" "shpod_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "shpod"
}
}
data "kubernetes_service" "shpod_${index}" {
depends_on = [ helm_release.shpod_${index} ]
provider = kubernetes.cluster_${index}
metadata {
name = "shpod"
namespace = "shpod"
}
}
resource "helm_release" "shpod_${index}" {
provider = helm.cluster_${index}
repository = "https://shpod.in"
chart = "shpod"
name = "shpod"
namespace = "shpod"
create_namespace = false
values = [
yamlencode({
service = {
type = "NodePort"
}
resources = {
requests = {
cpu = "100m"
memory = "500M"
}
limits = {
cpu = "1"
memory = "1000M"
}
}
persistentVolume = {
enabled = true
}
ssh = {
password = random_string.shpod_${index}.result
}
rbac = {
cluster = {
clusterRoles = [ "cluster-admin" ]
}
}
codeServer = {
enabled = true
}
})
]
}
resource "helm_release" "metrics_server_${index}" {
# Some providers pre-install metrics-server.
# Some don't. Let's install metrics-server,
# but only if it's not already installed.
count = yamldecode(file("./flags.${index}"))["has_metrics_server"] ? 0 : 1
provider = helm.cluster_${index}
repository = "https://kubernetes-sigs.github.io/metrics-server/"
chart = "metrics-server"
version = "3.8.2"
name = "metrics-server"
namespace = "metrics-server"
create_namespace = true
values = [
yamlencode({
args = [ "--kubelet-insecure-tls" ]
})
]
}
# As of October 2025, the ebs-csi-driver addon (which is used on EKS
# to provision persistent volumes) doesn't automatically create a
# StorageClass. Here, we're trying to detect the DaemonSet created
# by the ebs-csi-driver; and if we find it, we create the corresponding
# StorageClass.
data "kubernetes_resources" "ebs_csi_node_${index}" {
provider = kubernetes.cluster_${index}
api_version = "apps/v1"
kind = "DaemonSet"
label_selector = "app.kubernetes.io/name=aws-ebs-csi-driver"
namespace = "kube-system"
}
resource "kubernetes_storage_class" "ebs_csi_${index}" {
count = (length(data.kubernetes_resources.ebs_csi_node_${index}.objects) > 0) ? 1 : 0
provider = kubernetes.cluster_${index}
metadata {
name = "ebs-csi"
annotations = {
"storageclass.kubernetes.io/is-default-class" = "true"
}
}
storage_provisioner = "ebs.csi.aws.com"
}
# This section here deserves a little explanation.
#
# When we access a cluster with shpod (either through SSH or code-server)
# there is no kubeconfig file - we simply use "in-cluster" authentication
# with a ServiceAccount token. This is a bit unusual, and ideally, I would
# prefer to have a "normal" kubeconfig file in the students' shell.
#
# So what we're doing here, is that we're populating a ConfigMap with
# a kubeconfig file; and in the initialization scripts (e.g. bashrc) we
# automatically download the kubeconfig file from the ConfigMap and place
# it in ~/.kube/kubeconfig.
#
# But, which kubeconfig file should we use? We could use the "normal"
# kubeconfig file that was generated by the provider; but in some cases,
# that kubeconfig file might use a token instead of a certificate for
# user authentication - and ideally, I would like to have a certificate
# so that in the section about auth and RBAC, we can dissect that TLS
# certificate and explain where our permissions come from.
#
# So we're creating a TLS key pair; using the CSR API to issue a user
# certificate belongong to a special group; and grant the cluster-admin
# role to that group; then we use the kubeconfig file generated by the
# provider but override the user with that TLS key pair.
#
# This is not strictly necessary but it streamlines the lesson on auth.
#
# Lastly - in the ConfigMap we actually put both the original kubeconfig,
# and the one where we injected our new user (just in case we want to
# use or look at the original for any reason).
#
# One more thing: the kubernetes.io/kube-apiserver-client signer is
# disabled on EKS, so... we don't generate that ConfigMap on EKS.
# To detect if we're on EKS, we're looking for the ebs-csi-node DaemonSet.
# (Which means that the detection will break if the ebs-csi addon is missing.)
resource "kubernetes_config_map" "kubeconfig_${index}" {
count = (length(data.kubernetes_resources.ebs_csi_node_${index}.objects) > 0) ? 0 : 1
provider = kubernetes.cluster_${index}
metadata {
name = "kubeconfig"
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
}
data = {
kubeconfig_from_provider = file("./kubeconfig.${index}")
kubeconfig_cluster_admin = <<-EOT
kind: Config
apiVersion: v1
current-context: cluster-admin@k8s-${index}
clusters:
- name: k8s-${index}
cluster:
certificate-authority-data: $${yamldecode(file("./kubeconfig.${index}")).clusters.0.cluster.certificate-authority-data}
server: $${yamldecode(file("./kubeconfig.${index}")).clusters.0.cluster.server}
contexts:
- name: cluster-admin@k8s-${index}
context:
cluster: k8s-${index}
user: cluster-admin
users:
- name: cluster-admin
user:
client-key-data: $${base64encode(tls_private_key.cluster_admin_${index}.private_key_pem)}
client-certificate-data: $${base64encode(kubernetes_certificate_signing_request_v1.cluster_admin_${index}[0].certificate)}
EOT
}
}
resource "tls_private_key" "cluster_admin_${index}" {
algorithm = "RSA"
}
resource "tls_cert_request" "cluster_admin_${index}" {
private_key_pem = tls_private_key.cluster_admin_${index}.private_key_pem
subject {
common_name = "cluster-admin"
# Note: CSR API v1 doesn't allow issuing certs with "system:masters" anymore.
#organization = "system:masters"
# We'll use this custom group name instead.cluster-admin user.
organization = "shpod-cluster-admins"
}
}
resource "kubernetes_cluster_role_binding" "shpod_cluster_admin_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "shpod-cluster-admin"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
api_group = "rbac.authorization.k8s.io"
kind = "Group"
name = "shpod-cluster-admins"
}
}
resource "kubernetes_certificate_signing_request_v1" "cluster_admin_${index}" {
count = (length(data.kubernetes_resources.ebs_csi_node_${index}.objects) > 0) ? 0 : 1
provider = kubernetes.cluster_${index}
metadata {
name = "cluster-admin"
}
spec {
usages = ["client auth"]
request = tls_cert_request.cluster_admin_${index}.cert_request_pem
signer_name = "kubernetes.io/kube-apiserver-client"
}
auto_approve = true
}
%{ endfor ~}
output "ips_txt" {
value = join("\n", [
%{ for index, cluster in clusters ~}
join("\n", concat(
split(" ", file("./externalips.${index}"))
)),
%{ endfor ~}
""
])
}
output "logins_jsonl" {
value = join("\n", [
%{ for index, cluster in clusters ~}
jsonencode({
login = "k8s",
password = random_string.shpod_${index}.result,
port = data.kubernetes_service.shpod_${index}.spec[0].port[0].node_port,
codeServerPort = data.kubernetes_service.shpod_${index}.spec[0].port[1].node_port,
ipaddrs = replace(file("./externalips.${index}"), " ", "\t"),
}),
%{ endfor ~}
""
])
}

View File

@@ -1,33 +0,0 @@
variable "tag" {
type = string
}
variable "how_many_clusters" {
type = number
default = 2
}
variable "min_nodes_per_cluster" {
type = number
default = 2
}
variable "max_nodes_per_cluster" {
type = number
default = 4
}
variable "node_size" {
type = string
default = "M"
}
variable "location" {
type = string
default = null
}
# TODO: perhaps handle if it's space-separated instead of newline?
locals {
locations = var.location == null ? [null] : split("\n", var.location)
}

Some files were not shown because too many files have changed in this diff Show More