Compare commits

..

1 Commits

Author SHA1 Message Date
Jérôme Petazzoni
09e060d1c5 🏦 Prep training Boursorama 2024-12-09 23:36:41 +01:00
196 changed files with 1798 additions and 10502 deletions

View File

@@ -1,26 +0,0 @@
{
"name": "container.training environment to get started with Docker and/or Kubernetes",
"image": "ghcr.io/jpetazzo/shpod",
"features": {
//"ghcr.io/devcontainers/features/common-utils:2": {}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [],
//"postCreateCommand": "... install extra packages...",
"postStartCommand": "dind.sh ; kind.sh",
// This lets us use "docker-outside-docker".
// Unfortunately, minikube, kind, etc. don't work very well that way;
// so for now, we'll likely use "docker-in-docker" instead (with a
// privilege dcontainer). But we're still exposing that socket in case
// someone wants to do something interesting with it.
"mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
// This is for docker-in-docker.
"privileged": true,
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
"remoteUser": "k8s"
}

2
.gitignore vendored
View File

@@ -9,7 +9,6 @@ prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
prepare-labs/terraform/tags
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
prepare-labs/terraform/virtual-machines/proxmox/*.tfvars
prepare-labs/www
slides/*.yml.html
@@ -17,7 +16,6 @@ slides/autopilot/state.yaml
slides/index.html
slides/past.html
slides/slides.zip
slides/_academy_*
node_modules
### macOS ###

View File

@@ -1,24 +1,26 @@
services:
version: "2"
services:
rng:
build: rng
ports:
- "8001:80"
- "8001:80"
hasher:
build: hasher
ports:
- "8002:80"
- "8002:80"
webui:
build: webui
ports:
- "8000:80"
- "8000:80"
volumes:
- "./webui/files/:/files/"
- "./webui/files/:/files/"
redis:
image: redis
worker:
build: worker

View File

@@ -1,8 +1,7 @@
FROM ruby:alpine
WORKDIR /app
RUN apk add --update build-base curl
RUN gem install sinatra --version '~> 3'
RUN gem install thin
COPY hasher.rb .
CMD ["ruby", "hasher.rb", "-o", "::"]
ADD hasher.rb /
CMD ["ruby", "hasher.rb"]
EXPOSE 80

View File

@@ -2,6 +2,7 @@ require 'digest'
require 'sinatra'
require 'socket'
set :bind, '0.0.0.0'
set :port, 80
post '/' do

View File

@@ -1,7 +1,5 @@
FROM python:alpine
WORKDIR /app
RUN pip install Flask
COPY rng.py .
ENV FLASK_APP=rng FLASK_RUN_HOST=:: FLASK_RUN_PORT=80
CMD ["flask", "run"]
COPY rng.py /
CMD ["python", "rng.py"]
EXPOSE 80

View File

@@ -28,5 +28,5 @@ def rng(how_many_bytes):
if __name__ == "__main__":
app.run(port=80)
app.run(host="0.0.0.0", port=80, threaded=False)

View File

@@ -1,8 +1,7 @@
FROM node:23-alpine
WORKDIR /app
FROM node:4-slim
RUN npm install express
RUN npm install morgan
RUN npm install redis@5
COPY . .
RUN npm install redis@3
COPY files/ /files/
COPY webui.js /
CMD ["node", "webui.js"]
EXPOSE 80

View File

@@ -1,34 +1,26 @@
import express from 'express';
import morgan from 'morgan';
import { createClient } from 'redis';
var client = await createClient({
url: "redis://redis",
socket: {
family: 0
}
})
.on("error", function (err) {
console.error("Redis error", err);
})
.connect();
var express = require('express');
var app = express();
var redis = require('redis');
app.use(morgan('common'));
var client = redis.createClient(6379, 'redis');
client.on("error", function (err) {
console.error("Redis error", err);
});
app.get('/', function (req, res) {
res.redirect('/index.html');
});
app.get('/json', async(req, res) => {
var coins = await client.hLen('wallet');
var hashes = await client.get('hashes');
var now = Date.now() / 1000;
res.json({
coins: coins,
hashes: hashes,
now: now
app.get('/json', function (req, res) {
client.hlen('wallet', function (err, coins) {
client.get('hashes', function (err, hashes) {
var now = Date.now() / 1000;
res.json( {
coins: coins,
hashes: hashes,
now: now
});
});
});
});

View File

@@ -1,6 +1,5 @@
FROM python:alpine
WORKDIR /app
RUN pip install redis
RUN pip install requests
COPY worker.py .
COPY worker.py /
CMD ["python", "worker.py"]

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
data:
use-forwarded-headers: true
compute-full-forwarded-for: true
use-proxy-protocol: true

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: ingress-nginx

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- M6-ingress-nginx-components.yaml
- sync.yaml
patches:
- path: M6-ingress-nginx-cm-patch.yaml
target:
kind: ConfigMap
- path: M6-ingress-nginx-svc-patch.yaml
target:
kind: Service

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
annotations:
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: true
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: true

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: kyverno

View File

@@ -1,72 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: flux-multi-tenancy
spec:
validationFailureAction: enforce
rules:
- name: serviceAccountName
exclude:
resources:
namespaces:
- flux-system
match:
resources:
kinds:
- Kustomization
- HelmRelease
validate:
message: ".spec.serviceAccountName is required"
pattern:
spec:
serviceAccountName: "?*"
- name: kustomizationSourceRefNamespace
exclude:
resources:
namespaces:
- flux-system
- ingress-nginx
- kyverno
- monitoring
- openebs
match:
resources:
kinds:
- Kustomization
preconditions:
any:
- key: "{{request.object.spec.sourceRef.namespace}}"
operator: NotEquals
value: ""
validate:
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
deny:
conditions:
- key: "{{request.object.spec.sourceRef.namespace}}"
operator: NotEquals
value: "{{request.object.metadata.namespace}}"
- name: helmReleaseSourceRefNamespace
exclude:
resources:
namespaces:
- flux-system
- ingress-nginx
- kyverno
- monitoring
- openebs
match:
resources:
kinds:
- HelmRelease
preconditions:
any:
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
operator: NotEquals
value: ""
validate:
message: "spec.chart.spec.sourceRef.namespace must be the same as metadata.namespace"
deny:
conditions:
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
operator: NotEquals
value: "{{request.object.metadata.namespace}}"

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: monitoring
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana
namespace: monitoring
spec:
ingressClassName: nginx
rules:
- host: grafana.test.metal.mybestdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kube-prometheus-stack-grafana
port:
number: 80

View File

@@ -1,35 +0,0 @@
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-from-other-namespaces
spec:
podSelector: {}
ingress:
- from:
- podSelector: {}
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-webui
spec:
podSelector:
matchLabels:
app: web
ingress:
- from: []
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-db
spec:
podSelector:
matchLabels:
app: db
ingress:
- from:
- podSelector:
matchLabels:
app: web

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: flux-system
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.5.1
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest
name: openebs

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: openebs
resources:
- M6-openebs-components.yaml
- sync.yaml
configMapGenerator:
- name: openebs-values
files:
- values.yaml=M6-openebs-values.yaml
configurations:
- M6-openebs-kustomizeconfig.yaml

View File

@@ -1,6 +0,0 @@
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease

View File

@@ -1,15 +0,0 @@
# helm install openebs --namespace openebs openebs/openebs
# --set engines.replicated.mayastor.enabled=false
# --set lvm-localpv.lvmNode.kubeletDir=/var/lib/k0s/kubelet/
# --create-namespace
engines:
replicated:
mayastor:
enabled: false
# Needed for k0s install since kubelet install is slightly divergent from vanilla install >:-(
lvm-localpv:
lvmNode:
kubeletDir: /var/lib/k0s/kubelet/
localprovisioner:
hostpathClass:
isDefaultClass: true

View File

@@ -1,38 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
namespace: rocky-test
name: rocky-full-access
rules:
- apiGroups: ["", extensions, apps]
resources: [deployments, replicasets, pods, services, ingresses, statefulsets]
verbs: [get, list, watch, create, update, patch, delete] # You can also use [*]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rocky-pv-access
rules:
- apiGroups: [""]
resources: [persistentvolumes]
verbs: [get, list, watch, create, patch]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
toolkit.fluxcd.io/tenant: rocky
name: rocky-reconciler2
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rocky-pv-access
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: gotk:rocky-test:reconciler
- kind: ServiceAccount
name: rocky
namespace: rocky-test

View File

@@ -1,19 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rocky
namespace: rocky-test
spec:
ingressClassName: nginx
rules:
- host: rocky.test.mybestdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web
port:
number: 80

View File

@@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base/rocky
patches:
- path: M6-rocky-test-patch.yaml
target:
kind: Kustomization

View File

@@ -1,7 +0,0 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: rocky
namespace: rocky-test
spec:
path: ./k8s/plain

View File

@@ -1,33 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 1
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- name: "80"
port: 80
selector:
app: blue

View File

@@ -1,12 +0,0 @@
# This removes the haproxy Deployment.
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
patches:
- patch: |-
$patch: delete
kind: Deployment
apiVersion: apps/v1
metadata:
name: haproxy

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
# Within a Kustomization, it is not possible to specify in which
# order transformations (patches, replacements, etc) should be
# executed. If we want to execute transformations in a specific
# order, one possibility is to put them in individual components,
# and then invoke these components in the order we want.
# It works, but it creates an extra level of indirection, which
# reduces readability and complicates maintenance.
components:
- setup
- cleanup

View File

@@ -1,20 +0,0 @@
global
#log stdout format raw local0
#daemon
maxconn 32
defaults
#log global
timeout client 1h
timeout connect 1h
timeout server 1h
mode http
option abortonclose
frontend metrics
bind :9000
http-request use-service prometheus-exporter
frontend ollama_frontend
bind :8000
default_backend ollama_backend
maxconn 16
backend ollama_backend
server ollama_server localhost:11434 check

View File

@@ -1,39 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: haproxy
name: haproxy
spec:
selector:
matchLabels:
app: haproxy
template:
metadata:
labels:
app: haproxy
spec:
volumes:
- name: haproxy
configMap:
name: haproxy
containers:
- image: haproxy:3.0
name: haproxy
volumeMounts:
- name: haproxy
mountPath: /usr/local/etc/haproxy
readinessProbe:
httpGet:
port: 9000
ports:
- name: haproxy
containerPort: 8000
- name: metrics
containerPort: 9000
resources:
requests:
cpu: 0.05
limits:
cpu: 1

View File

@@ -1,75 +0,0 @@
# This adds a sidecar to the ollama Deployment, by taking
# the pod template and volumes from the haproxy Deployment.
# The idea is to allow to run ollama+haproxy in two modes:
# - separately (each with their own Deployment),
# - together in the same Pod, sidecar-style.
# The YAML files define how to run them separetely, and this
# "replacements" directive fetches a specific volume and
# a specific container from the haproxy Deployment, to add
# them to the ollama Deployment.
#
# This would be simpler if kustomize allowed to append or
# merge lists in "replacements"; but it doesn't seem to be
# possible at the moment.
#
# It would be even better if kustomize allowed to perform
# a strategic merge using a fieldPath as the source, because
# we could merge both the containers and the volumes in a
# single operation.
#
# Note that technically, it might be possible to layer
# multiple kustomizations so that one generates the patch
# to be used in another; but it wouldn't be very readable
# or maintainable so we decided to not do that right now.
#
# However, the current approach (fetching fields one by one)
# has an advantage: it could let us transform the haproxy
# container into a real sidecar (i.e. an initContainer with
# a restartPolicy=Always).
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
resources:
- haproxy.yaml
configMapGenerator:
- name: haproxy
files:
- haproxy.cfg
replacements:
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.volumes.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.volumes.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.containers.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy].ports.[name=haproxy].containerPort
targets:
- select:
kind: Service
name: ollama
fieldPaths:
- spec.ports.[name=11434].targetPort

View File

@@ -1,34 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 2
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- port: 80
selector:
app: blue

View File

@@ -1,94 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# Each of these YAML files contains a Deployment and a Service.
# The blue.yaml file is here just to demonstrate that the rest
# of this Kustomization can be precisely scoped to the ollama
# Deployment (and Service): the blue Deployment and Service
# shouldn't be affected by our kustomize transformers.
resources:
- ollama.yaml
- blue.yaml
buildMetadata:
# Add a label app.kubernetes.io/managed-by=kustomize-vX.Y.Z
- managedByLabel
# Add an annotation config.kubernetes.io/origin, indicating:
# - which file defined that resource;
# - if it comes from a git repository, which one, and which
# ref (tag, branch...) it was.
- originAnnotations
# Add an annotation alpha.config.kubernetes.io/transformations
# indicating which patches and other transformers have changed
# each resource.
- transformerAnnotations
# Let's generate a ConfigMap with literal values.
# Note that this will actually add a suffix to the name of the
# ConfigMaps (e.g.: ollama-8bk8bd8m76) and it will update all
# references to the ConfigMap (e.g. in Deployment manifests)
# accordingly. The suffix is a hash of the ConfigMap contents,
# so that basically, if the ConfigMap is edited, any workload
# using that ConfigMap will automatically do a rolling update.
configMapGenerator:
- name: ollama
literals:
- "model=gemma3:270m"
- "prompt=If you visit Paris, I suggest that you"
- "queue=4"
name: ollama
patches:
# The Deployment manifest in ollama.yaml doesn't specify
# resource requests and limits, so that it can run on any
# cluster (including resource-constrained local clusters
# like KiND or minikube). The example belows add CPU
# requests and limits using a strategic merge patch.
# The patch is inlined here, but it could also be put
# in a file and referenced with "path: xxxxxx.yaml".
- patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
spec:
template:
spec:
containers:
- name: ollama
resources:
requests:
cpu: 1
limits:
cpu: 2
# This will have the same effect, with one little detail:
# JSON patches cannot specify containers by name, so this
# assumes that the ollama container is the first one in
# the pod template (whereas the strategic merge patch can
# use "merge keys" and identify containers by their name).
#- target:
# kind: Deployment
# name: ollama
# patch: |
# - op: add
# path: /spec/template/spec/containers/0/resources
# value:
# requests:
# cpu: 1
# limits:
# cpu: 2
# A "component" is a bit like a "base", in the sense that
# it lets us define some reusable resources and behaviors.
# There is a key different, though:
# - a "base" will be evaluated in isolation: it will
# generate+transform some resources, then these resources
# will be included in the main Kustomization;
# - a "component" has access to all the resources that
# have been generated by the main Kustomization, which
# means that it can transform them (with patches etc).
components:
- add-haproxy-sidecar

View File

@@ -1,73 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ollama
name: ollama
spec:
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
volumes:
- name: ollama
hostPath:
path: /opt/ollama
type: DirectoryOrCreate
containers:
- image: ollama/ollama
name: ollama
env:
- name: OLLAMA_MAX_QUEUE
valueFrom:
configMapKeyRef:
name: ollama
key: queue
- name: MODEL
valueFrom:
configMapKeyRef:
name: ollama
key: model
volumeMounts:
- name: ollama
mountPath: /root/.ollama
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- ollama pull $MODEL
livenessProbe:
httpGet:
port: 11434
readinessProbe:
exec:
command:
- /bin/sh
- -c
- ollama show $MODEL
ports:
- name: ollama
containerPort: 11434
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ollama
name: ollama
spec:
ports:
- name: "11434"
port: 11434
protocol: TCP
targetPort: 11434
selector:
app: ollama
type: ClusterIP

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices
- redis

View File

@@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices.yaml
transformers:
- |
apiVersion: builtin
kind: PrefixSuffixTransformer
metadata:
name: use-ghcr-io
prefix: ghcr.io/
fieldSpecs:
- path: spec/template/spec/containers/image

View File

@@ -1,125 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- redis.yaml

View File

@@ -1,35 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP

View File

@@ -1,160 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,30 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- dockercoins.yaml
replacements:
- sourceValue: ghcr.io/dockercoins
targets:
- select:
kind: Deployment
labelSelector: "app in (hasher,rng,webui,worker)"
# It will soon be possible to use regexes in replacement selectors,
# meaning that the "labelSelector:" above can be replaced with the
# following "name:" selector which is a tiny bit simpler:
#name: hasher|rng|webui|worker
# Regex support in replacement selectors was added by this PR:
# https://github.com/kubernetes-sigs/kustomize/pull/5863
# This PR was merged in August 2025, but as of October 2025, the
# latest release of Kustomize is 5.7.1, which was released in July.
# Hopefully the feature will be available in the next release :)
# Another possibility would be to select all Deployments, and then
# reject the one(s) for which we don't want to update the registry;
# for instance:
#reject:
# kind: Deployment
# name: redis
fieldPaths:
- spec.template.spec.containers.*.image
options:
delimiter: "/"
index: 0

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-1
spec:
validationFailureAction: enforce
rules:
- name: ensure-pod-color-is-valid
match:
@@ -17,6 +18,5 @@ spec:
operator: NotIn
values: [ red, green, blue ]
validate:
failureAction: Enforce
message: "If it exists, the label color must be red, green, or blue."
deny: {}

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-2
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
@@ -21,7 +22,6 @@ spec:
operator: NotEquals
value: ""
validate:
failureAction: Enforce
message: "Once label color has been added, it cannot be changed."
deny:
conditions:

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-3
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
@@ -21,6 +22,7 @@ spec:
operator: Equals
value: ""
validate:
failureAction: Enforce
message: "Once label color has been added, it cannot be removed."
deny: {}
deny:
conditions:

View File

@@ -66,7 +66,7 @@ Here is where we look for credentials for each provider:
- Civo: CLI configuration file (`~/.civo.json`)
- Digital Ocean: CLI configuration file (`~/.config/doctl/config.yaml`)
- Exoscale: CLI configuration file (`~/.config/exoscale/exoscale.toml`)
- Google Cloud: we're using "Application Default Credentials (ADC)"; run `gcloud auth application-default login`; note that we'll use the default "project" set in `gcloud` unless you set the `GOOGLE_PROJECT` environment variable
- Google Cloud: FIXME, note that the project name is currently hard-coded to `prepare-tf`
- Hetzner: CLI configuration file (`~/.config/hcloud/cli.toml`)
- Linode: CLI configuration file (`~/.config/linode-cli`)
- OpenStack: you will need to write a tfvars file (check [that exemple](terraform/virtual-machines/openstack/tfvars.example))

View File

@@ -36,12 +36,8 @@ _populate_zone() {
ZONE_ID=$(_get_zone_id $1)
shift
for IPADDR in $*; do
case "$IPADDR" in
*.*) TYPE=A;;
*:*) TYPE=AAAA;;
esac
cloudflare zones/$ZONE_ID/dns_records "name=*" "type=$TYPE" "content=$IPADDR"
cloudflare zones/$ZONE_ID/dns_records "name=\@" "type=$TYPE" "content=$IPADDR"
cloudflare zones/$ZONE_ID/dns_records "name=*" "type=A" "content=$IPADDR"
cloudflare zones/$ZONE_ID/dns_records "name=\@" "type=A" "content=$IPADDR"
done
}

View File

@@ -5,53 +5,34 @@
# 10% CPU
# (See https://docs.google.com/document/d/1n0lwp6rQKQUIuo_A5LQ1dgCzrmjkDjmDtNj1Jn92UrI)
# PRO2-XS = 4 core, 16 gb
#
# With vspod:
# 800 MB RAM
# 33% CPU
#
set -e
KONKTAG=konk
PROVIDER=linode
STUDENTS=5
PROVIDER=scaleway
case "$PROVIDER" in
linode)
export TF_VAR_node_size=g6-standard-6
export TF_VAR_location=fr-par
export TF_VAR_location=eu-west
;;
scaleway)
export TF_VAR_node_size=PRO2-XS
# For tiny testing purposes, these are okay too:
#export TF_VAR_node_size=PLAY2-NANO
export TF_VAR_location=fr-par-2
;;
esac
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag konk
# set kubeconfig file
export KUBECONFIG=~/kubeconfig
if [ "$PROVIDER" = "kind" ]; then
kind create cluster --name $KONKTAG
ADDRTYPE=InternalIP
else
if ! [ -f tags/$KONKTAG/stage2/kubeconfig.101 ]; then
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag $KONKTAG
fi
cp tags/$KONKTAG/stage2/kubeconfig.101 $KUBECONFIG
ADDRTYPE=ExternalIP
fi
cp tags/konk/stage2/kubeconfig.101 $KUBECONFIG
# set external_ip labels
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="'$ADDRTYPE'")].address}{"\n"}{end}' |
while read node address ignoredaddresses; do
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="ExternalIP")].address}{"\n"}{end}' |
while read node address; do
kubectl label node $node external_ip=$address
done
# vcluster all the things
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students $STUDENTS
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students 50
# install prometheus stack because that's cool
helm upgrade --install --repo https://prometheus-community.github.io/helm-charts \

View File

@@ -49,41 +49,6 @@ _cmd_clean() {
done
}
_cmd codeserver "Install code-server on the clusters"
_cmd_codeserver() {
TAG=$1
need_tag
ARCH=${ARCHITECTURE-amd64}
CODESERVER_VERSION=4.96.4
CODESERVER_URL=\$GITHUB/coder/code-server/releases/download/v${CODESERVER_VERSION}/code-server-${CODESERVER_VERSION}-linux-${ARCH}.tar.gz
pssh "
set -e
i_am_first_node || exit 0
if ! [ -x /usr/local/bin/code-server ]; then
curl -fsSL $CODESERVER_URL | sudo tar zx -C /opt
sudo ln -s /opt/code-server-${CODESERVER_VERSION}-linux-${ARCH}/bin/code-server /usr/local/bin/code-server
sudo -u $USER_LOGIN -H code-server --install-extension ms-azuretools.vscode-docker
sudo -u $USER_LOGIN -H code-server --install-extension ms-kubernetes-tools.vscode-kubernetes-tools
sudo -u $USER_LOGIN -H mkdir -p /home/$USER_LOGIN/.local/share/code-server/User
echo '{\"workbench.startupEditor\": \"terminal\"}' | sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.local/share/code-server/User/settings.json
sudo -u $USER_LOGIN mkdir -p /home/$USER_LOGIN/.config/systemd/user
sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.config/systemd/user/code-server.service <<EOF
[Unit]
Description=code-server
[Install]
WantedBy=default.target
[Service]
ExecStart=/usr/local/bin/code-server --bind-addr [::]:1789
Restart=always
EOF
sudo systemctl --user -M $USER_LOGIN@ enable code-server.service --now
sudo loginctl enable-linger $USER_LOGIN
fi"
}
_cmd createuser "Create the user that students will use"
_cmd_createuser() {
TAG=$1
@@ -230,7 +195,7 @@ _cmd_create() {
;;
*) die "Invalid mode: $MODE (supported modes: mk8s, pssh)." ;;
esac
if ! [ -f "$SETTINGS" ]; then
die "Settings file ($SETTINGS) not found."
fi
@@ -270,27 +235,7 @@ _cmd_create() {
ln -s ../../$SETTINGS tags/$TAG/settings.env.orig
cp $SETTINGS tags/$TAG/settings.env
# For Google Cloud, it is necessary to specify which "project" to use.
# Unfortunately, the Terraform provider doesn't seem to have a way
# to detect which Google Cloud project you want to use; it has to be
# specified one way or another. Let's decide that it should be set with
# the GOOGLE_PROJECT env var; and if that var is not set, we'll try to
# figure it out from gcloud.
# (See https://github.com/hashicorp/terraform-provider-google/issues/10907#issuecomment-1015721600)
# Since we need that variable to be set each time we'll call Terraform
# (e.g. when destroying the environment), let's save it to the settings.env
# file.
if [ "$PROVIDER" = "googlecloud" ]; then
if ! [ "$GOOGLE_PROJECT" ]; then
info "PROVIDER=googlecloud but GOOGLE_PROJECT is not set. Detecting it."
GOOGLE_PROJECT=$(gcloud config get project)
info "GOOGLE_PROJECT will be set to '$GOOGLE_PROJECT'."
fi
echo "export GOOGLE_PROJECT=$GOOGLE_PROJECT" >> tags/$TAG/settings.env
fi
. tags/$TAG/settings.env
. $SETTINGS
echo $MODE > tags/$TAG/mode
echo $PROVIDER > tags/$TAG/provider
@@ -317,9 +262,20 @@ _cmd_create() {
if [ "$CLUSTERSIZE" ]; then
echo nodes_per_cluster = $CLUSTERSIZE >> terraform.tfvars
fi
for RETRY in 1 2 3; do
if terraform apply -auto-approve; then
touch terraform.ok
break
fi
done
if ! [ -f terraform.ok ]; then
die "Terraform failed."
fi
)
sep
info "Successfully created $COUNT instances with tag $TAG"
echo create_ok > tags/$TAG/status
# If the settings.env file has a "STEPS" field,
# automatically execute all the actions listed in that field.
@@ -375,8 +331,8 @@ _cmd_clusterize() {
pssh -I < tags/$TAG/clusters.tsv "
grep -w \$PSSH_HOST | tr '\t' '\n' > /tmp/cluster"
pssh "
echo \$PSSH_HOST > /tmp/ip_address
head -n 1 /tmp/cluster | sudo tee /etc/ip_address_of_first_node
echo \$PSSH_HOST > /tmp/ipv4
head -n 1 /tmp/cluster | sudo tee /etc/ipv4_of_first_node
echo ${CLUSTERPREFIX}1 | sudo tee /etc/name_of_first_node
echo HOSTIP=\$PSSH_HOST | sudo tee -a /etc/environment
NODEINDEX=\$((\$PSSH_NODENUM%$CLUSTERSIZE+1))
@@ -394,13 +350,9 @@ _cmd_clusterize() {
done < /tmp/cluster
"
jq --raw-input --compact-output \
--arg USER_LOGIN "$USER_LOGIN" --arg USER_PASSWORD "$USER_PASSWORD" '
{
"login": $USER_LOGIN,
"password": $USER_PASSWORD,
"ipaddrs": .
}' < tags/$TAG/clusters.tsv > tags/$TAG/logins.jsonl
while read line; do
printf '{"login": "%s", "password": "%s", "ipaddrs": "%s"}\n' "$USER_LOGIN" "$USER_PASSWORD" "$line"
done < tags/$TAG/clusters.tsv > tags/$TAG/logins.jsonl
echo cluster_ok > tags/$TAG/status
}
@@ -459,7 +411,7 @@ _cmd_docker() {
set -e
### Install docker-compose.
sudo curl -fsSL -o /usr/local/bin/docker-compose \
\$GITHUB/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-$COMPOSE_PLATFORM
https://github.com/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-$COMPOSE_PLATFORM
sudo chmod +x /usr/local/bin/docker-compose
docker-compose version
@@ -467,7 +419,7 @@ _cmd_docker() {
##VERSION## https://github.com/docker/machine/releases
MACHINE_VERSION=v0.16.2
sudo curl -fsSL -o /usr/local/bin/docker-machine \
\$GITHUB/docker/machine/releases/download/\$MACHINE_VERSION/docker-machine-\$(uname -s)-\$(uname -m)
https://github.com/docker/machine/releases/download/\$MACHINE_VERSION/docker-machine-\$(uname -s)-\$(uname -m)
sudo chmod +x /usr/local/bin/docker-machine
docker-machine version
"
@@ -500,10 +452,10 @@ _cmd_kubebins() {
set -e
cd /usr/local/bin
if ! [ -x etcd ]; then
curl -L \$GITHUB/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$ARCH.tar.gz \
curl -L https://github.com/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-$ARCH.tar.gz \
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
fi
if ! [ -x kube-apiserver ]; then
if ! [ -x hyperkube ]; then
##VERSION##
curl -L https://dl.k8s.io/$K8SBIN_VERSION/kubernetes-server-linux-$ARCH.tar.gz \
| sudo tar --strip-components=3 -zx \
@@ -512,7 +464,7 @@ _cmd_kubebins() {
sudo mkdir -p /opt/cni/bin
cd /opt/cni/bin
if ! [ -x bridge ]; then
curl -L \$GITHUB/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$ARCH-$CNI_VERSION.tgz \
curl -L https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-$ARCH-$CNI_VERSION.tgz \
| sudo tar -zx
fi
"
@@ -562,18 +514,6 @@ EOF"
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
echo 'alias k=kubecolor' | sudo tee /etc/bash_completion.d/k &&
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
# Install helm early
# (so that we can use it to install e.g. Cilium etc.)
ARCH=${ARCHITECTURE-amd64}
HELM_VERSION=3.19.1
pssh "
if [ ! -x /usr/local/bin/helm ]; then
curl -fsSL https://get.helm.sh/helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz |
sudo tar --strip-components=1 --wildcards -zx -C /usr/local/bin '*/helm'
helm completion bash | sudo tee /etc/bash_completion.d/helm
helm version
fi"
}
_cmd kubeadm "Setup kubernetes clusters with kubeadm"
@@ -597,18 +537,6 @@ _cmd_kubeadm() {
# Initialize kube control plane
pssh --timeout 200 "
IPV6=\$(ip -json a | jq -r '.[].addr_info[] | select(.scope==\"global\" and .family==\"inet6\") | .local' | head -n1)
if [ \"\$IPV6\" ]; then
ADVERTISE=\"advertiseAddress: \$IPV6\"
SERVICE_SUBNET=\"serviceSubnet: fdff::/112\"
touch /tmp/install-cilium-ipv6-only
touch /tmp/ipv6-only
else
ADVERTISE=
SERVICE_SUBNET=
touch /tmp/install-weave
fi
echo IPV6=\$IPV6 ADVERTISE=\$ADVERTISE
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
kubeadm token generate > /tmp/token &&
cat >/tmp/kubeadm-config.yaml <<EOF
@@ -616,12 +544,9 @@ kind: InitConfiguration
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- token: \$(cat /tmp/token)
localAPIEndpoint:
\$ADVERTISE
nodeRegistration:
ignorePreflightErrors:
- NumCPU
- FileContent--proc-sys-net-ipv6-conf-default-forwarding
$IGNORE_SYSTEMVERIFICATION
$IGNORE_SWAP
$IGNORE_IPTABLES
@@ -648,9 +573,7 @@ kind: ClusterConfiguration
apiVersion: kubeadm.k8s.io/v1beta3
apiServer:
certSANs:
- \$(cat /tmp/ip_address)
networking:
\$SERVICE_SUBNET
- \$(cat /tmp/ipv4)
$CLUSTER_CONFIGURATION_KUBERNETESVERSION
EOF
sudo kubeadm init --config=/tmp/kubeadm-config.yaml
@@ -669,20 +592,7 @@ EOF
# Install weave as the pod network
pssh "
if i_am_first_node; then
if [ -f /tmp/install-weave ]; then
curl -fsSL \$GITHUB/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml |
sed s,weaveworks/weave,quay.io/rackspace/weave, |
kubectl apply -f-
fi
if [ -f /tmp/install-cilium-ipv6-only ]; then
helm upgrade -i cilium cilium --repo https://helm.cilium.io/ \
--namespace kube-system \
--set cni.chainingMode=portmap \
--set ipv6.enabled=true \
--set ipv4.enabled=false \
--set underlayProtocol=ipv6 \
--version 1.18.3
fi
kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml
fi"
# FIXME this is a gross hack to add the deployment key to our SSH agent,
@@ -705,16 +615,13 @@ EOF
fi
# Install metrics server
pssh -I <../k8s/metrics-server.yaml "
pssh "
if i_am_first_node; then
kubectl apply -f-
fi"
# It would be nice to be able to use that helm chart for metrics-server.
# Unfortunately, the charts themselves are on github.com and we want to
# avoid that due to their lack of IPv6 support.
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
#helm upgrade --install metrics-server \
# --repo https://kubernetes-sigs.github.io/metrics-server/ metrics-server \
# --namespace kube-system --set args={--kubelet-insecure-tls}
fi"
}
_cmd kubetools "Install a bunch of CLI tools for Kubernetes"
@@ -741,7 +648,7 @@ _cmd_kubetools() {
# Install ArgoCD CLI
##VERSION## https://github.com/argoproj/argo-cd/releases/latest
URL=\$GITHUB/argoproj/argo-cd/releases/latest/download/argocd-linux-${ARCH}
URL=https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-${ARCH}
pssh "
if [ ! -x /usr/local/bin/argocd ]; then
sudo curl -o /usr/local/bin/argocd -fsSL $URL
@@ -754,7 +661,7 @@ _cmd_kubetools() {
##VERSION## https://github.com/fluxcd/flux2/releases
FLUX_VERSION=2.3.0
FILENAME=flux_${FLUX_VERSION}_linux_${ARCH}
URL=\$GITHUB/fluxcd/flux2/releases/download/v$FLUX_VERSION/$FILENAME.tar.gz
URL=https://github.com/fluxcd/flux2/releases/download/v$FLUX_VERSION/$FILENAME.tar.gz
pssh "
if [ ! -x /usr/local/bin/flux ]; then
curl -fsSL $URL |
@@ -769,7 +676,7 @@ _cmd_kubetools() {
set -e
if ! [ -x /usr/local/bin/kctx ]; then
cd /tmp
git clone \$GITHUB/ahmetb/kubectx
git clone https://github.com/ahmetb/kubectx
sudo cp kubectx/kubectx /usr/local/bin/kctx
sudo cp kubectx/kubens /usr/local/bin/kns
sudo cp kubectx/completion/*.bash /etc/bash_completion.d
@@ -780,7 +687,7 @@ _cmd_kubetools() {
set -e
if ! [ -d /opt/kube-ps1 ]; then
cd /tmp
git clone \$GITHUB/jonmosco/kube-ps1
git clone https://github.com/jonmosco/kube-ps1
sudo mv kube-ps1 /opt/kube-ps1
sudo -u $USER_LOGIN sed -i s/docker-prompt/kube_ps1/ /home/$USER_LOGIN/.bashrc &&
sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc <<EOF
@@ -797,7 +704,7 @@ EOF
##VERSION## https://github.com/stern/stern/releases
STERN_VERSION=1.29.0
FILENAME=stern_${STERN_VERSION}_linux_${ARCH}
URL=\$GITHUB/stern/stern/releases/download/v$STERN_VERSION/$FILENAME.tar.gz
URL=https://github.com/stern/stern/releases/download/v$STERN_VERSION/$FILENAME.tar.gz
pssh "
if [ ! -x /usr/local/bin/stern ]; then
curl -fsSL $URL |
@@ -808,11 +715,9 @@ EOF
fi"
# Install helm
HELM_VERSION=3.19.1
pssh "
if [ ! -x /usr/local/bin/helm ]; then
curl -fsSL https://get.helm.sh/helm-v${HELM_VERSION}-linux-${ARCH}.tar.gz |
sudo tar --strip-components=1 --wildcards -zx -C /usr/local/bin '*/helm'
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 | sudo bash &&
helm completion bash | sudo tee /etc/bash_completion.d/helm
helm version
fi"
@@ -820,7 +725,7 @@ EOF
# Install kustomize
##VERSION## https://github.com/kubernetes-sigs/kustomize/releases
KUSTOMIZE_VERSION=v5.4.1
URL=\$GITHUB/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${ARCH}.tar.gz
URL=https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${ARCH}.tar.gz
pssh "
if [ ! -x /usr/local/bin/kustomize ]; then
curl -fsSL $URL |
@@ -837,17 +742,15 @@ EOF
pssh "
if [ ! -x /usr/local/bin/ship ]; then
##VERSION##
curl -fsSL \$GITHUB/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_$ARCH.tar.gz |
curl -fsSL https://github.com/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_$ARCH.tar.gz |
sudo tar -C /usr/local/bin -zx ship
fi"
# Install the AWS IAM authenticator
AWSIAMAUTH_VERSION=0.7.8
URL=\$GITHUB/kubernetes-sigs/aws-iam-authenticator/releases/download/v${AWSIAMAUTH_VERSION}/aws-iam-authenticator_${AWSIAMAUTH_VERSION}_linux_${ARCH}
pssh "
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
##VERSION##
sudo curl -fsSLo /usr/local/bin/aws-iam-authenticator $URL
sudo curl -fsSLo /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/$ARCH/aws-iam-authenticator
sudo chmod +x /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
fi"
@@ -857,17 +760,17 @@ EOF
if [ ! -x /usr/local/bin/jless ]; then
##VERSION##
sudo apt-get install -y libxcb-render0 libxcb-shape0 libxcb-xfixes0
wget \$GITHUB/PaulJuliusMartinez/jless/releases/download/v0.9.0/jless-v0.9.0-x86_64-unknown-linux-gnu.zip
wget https://github.com/PaulJuliusMartinez/jless/releases/download/v0.9.0/jless-v0.9.0-x86_64-unknown-linux-gnu.zip
unzip jless-v0.9.0-x86_64-unknown-linux-gnu
sudo mv jless /usr/local/bin
fi"
# Install the krew package manager
pssh "
if [ ! -d /home/$USER_LOGIN/.krew ] && [ ! -f /tmp/ipv6-only ]; then
if [ ! -d /home/$USER_LOGIN/.krew ]; then
cd /tmp &&
KREW=krew-linux_$ARCH
curl -fsSL \$GITHUB/kubernetes-sigs/krew/releases/latest/download/\$KREW.tar.gz |
curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/\$KREW.tar.gz |
tar -zxf- &&
sudo -u $USER_LOGIN -H ./\$KREW install krew &&
echo export PATH=/home/$USER_LOGIN/.krew/bin:\\\$PATH | sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc
@@ -875,7 +778,7 @@ EOF
# Install kubecolor
KUBECOLOR_VERSION=0.4.0
URL=\$GITHUB/kubecolor/kubecolor/releases/download/v${KUBECOLOR_VERSION}/kubecolor_${KUBECOLOR_VERSION}_linux_${ARCH}.tar.gz
URL=https://github.com/kubecolor/kubecolor/releases/download/v${KUBECOLOR_VERSION}/kubecolor_${KUBECOLOR_VERSION}_linux_${ARCH}.tar.gz
pssh "
if [ ! -x /usr/local/bin/kubecolor ]; then
##VERSION##
@@ -887,7 +790,7 @@ EOF
pssh "
if [ ! -x /usr/local/bin/k9s ]; then
FILENAME=k9s_Linux_$ARCH.tar.gz &&
curl -fsSL \$GITHUB/derailed/k9s/releases/latest/download/\$FILENAME |
curl -fsSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
sudo tar -C /usr/local/bin -zx k9s
k9s version
fi"
@@ -896,7 +799,7 @@ EOF
pssh "
if [ ! -x /usr/local/bin/popeye ]; then
FILENAME=popeye_Linux_$ARCH.tar.gz &&
curl -fsSL \$GITHUB/derailed/popeye/releases/latest/download/\$FILENAME |
curl -fsSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
sudo tar -C /usr/local/bin -zx popeye
popeye version
fi"
@@ -909,7 +812,7 @@ EOF
if [ ! -x /usr/local/bin/tilt ]; then
TILT_VERSION=0.33.13
FILENAME=tilt.\$TILT_VERSION.linux.$TILT_ARCH.tar.gz
curl -fsSL \$GITHUB/tilt-dev/tilt/releases/download/v\$TILT_VERSION/\$FILENAME |
curl -fsSL https://github.com/tilt-dev/tilt/releases/download/v\$TILT_VERSION/\$FILENAME |
sudo tar -C /usr/local/bin -zx tilt
tilt completion bash | sudo tee /etc/bash_completion.d/tilt
tilt version
@@ -927,7 +830,7 @@ EOF
# Install Kompose
pssh "
if [ ! -x /usr/local/bin/kompose ]; then
curl -fsSLo kompose \$GITHUB/kubernetes/kompose/releases/latest/download/kompose-linux-$ARCH &&
curl -fsSLo kompose https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-$ARCH &&
sudo install kompose /usr/local/bin
kompose completion bash | sudo tee /etc/bash_completion.d/kompose
kompose version
@@ -936,7 +839,7 @@ EOF
# Install KinD
pssh "
if [ ! -x /usr/local/bin/kind ]; then
curl -fsSLo kind \$GITHUB/kubernetes-sigs/kind/releases/latest/download/kind-linux-$ARCH &&
curl -fsSLo kind https://github.com/kubernetes-sigs/kind/releases/latest/download/kind-linux-$ARCH &&
sudo install kind /usr/local/bin
kind completion bash | sudo tee /etc/bash_completion.d/kind
kind version
@@ -945,7 +848,7 @@ EOF
# Install YTT
pssh "
if [ ! -x /usr/local/bin/ytt ]; then
curl -fsSLo ytt \$GITHUB/vmware-tanzu/carvel-ytt/releases/latest/download/ytt-linux-$ARCH &&
curl -fsSLo ytt https://github.com/vmware-tanzu/carvel-ytt/releases/latest/download/ytt-linux-$ARCH &&
sudo install ytt /usr/local/bin
ytt completion bash | sudo tee /etc/bash_completion.d/ytt
ytt version
@@ -953,7 +856,7 @@ EOF
##VERSION## https://github.com/bitnami-labs/sealed-secrets/releases
KUBESEAL_VERSION=0.26.2
URL=\$GITHUB/bitnami-labs/sealed-secrets/releases/download/v${KUBESEAL_VERSION}/kubeseal-${KUBESEAL_VERSION}-linux-${ARCH}.tar.gz
URL=https://github.com/bitnami-labs/sealed-secrets/releases/download/v${KUBESEAL_VERSION}/kubeseal-${KUBESEAL_VERSION}-linux-${ARCH}.tar.gz
#case $ARCH in
#amd64) FILENAME=kubeseal-linux-amd64;;
#arm64) FILENAME=kubeseal-arm64;;
@@ -970,7 +873,7 @@ EOF
VELERO_VERSION=1.13.2
pssh "
if [ ! -x /usr/local/bin/velero ]; then
curl -fsSL \$GITHUB/vmware-tanzu/velero/releases/download/v$VELERO_VERSION/velero-v$VELERO_VERSION-linux-$ARCH.tar.gz |
curl -fsSL https://github.com/vmware-tanzu/velero/releases/download/v$VELERO_VERSION/velero-v$VELERO_VERSION-linux-$ARCH.tar.gz |
sudo tar --strip-components=1 --wildcards -zx -C /usr/local/bin '*/velero'
velero completion bash | sudo tee /etc/bash_completion.d/velero
velero version --client-only
@@ -980,7 +883,7 @@ EOF
KUBENT_VERSION=0.7.2
pssh "
if [ ! -x /usr/local/bin/kubent ]; then
curl -fsSL \$GITHUB/doitintl/kube-no-trouble/releases/download/${KUBENT_VERSION}/kubent-${KUBENT_VERSION}-linux-$ARCH.tar.gz |
curl -fsSL https://github.com/doitintl/kube-no-trouble/releases/download/${KUBENT_VERSION}/kubent-${KUBENT_VERSION}-linux-$ARCH.tar.gz |
sudo tar -zxvf- -C /usr/local/bin kubent
kubent --version
fi"
@@ -988,7 +891,7 @@ EOF
# Ngrok. Note that unfortunately, this is the x86_64 binary.
# We might have to rethink how to handle this for multi-arch environments.
pssh "
if [ ! -x /usr/local/bin/ngrok ] && [ ! -f /tmp/ipv6-only ]; then
if [ ! -x /usr/local/bin/ngrok ]; then
curl -fsSL https://bin.equinox.io/c/bNyj1mQVY4c/ngrok-v3-stable-linux-amd64.tgz |
sudo tar -zxvf- -C /usr/local/bin ngrok
fi"
@@ -1045,7 +948,7 @@ _cmd_logins() {
need_tag $TAG
cat tags/$TAG/logins.jsonl \
| jq -r '"\(if .codeServerPort then "\(.codeServerPort)\t" else "" end )\(.password)\tssh -l \(.login)\(if .port then " -p \(.port)" else "" end)\t\(.ipaddrs)"'
| jq -r '"\(.password)\tssh -l \(.login)\(if .port then " -p \(.port)" else "" end)\t\(.ipaddrs)"'
}
_cmd maketag "Generate a quasi-unique tag for a group of instances"
@@ -1087,9 +990,7 @@ _cmd_ping() {
TAG=$1
need_tag
# If we connect to our VMs over IPv6, the IP address is between brackets.
# Unfortunately, fping doesn't support that; so let's strip brackets here.
tr -d [] < tags/$TAG/ips.txt | fping
fping < tags/$TAG/ips.txt
}
_cmd stage2 "Finalize the setup of managed Kubernetes clusters"
@@ -1161,7 +1062,7 @@ _cmd_standardize() {
sudo netfilter-persistent start
fi"
# oracle-cloud-agent upgrades packages in the background.
# oracle-cloud-agent upgrades pacakges in the background.
# This breaks our deployment scripts, because when we invoke apt-get, it complains
# that the lock already exists (symptom: random "Exited with error code 100").
# Workaround: if we detect oracle-cloud-agent, remove it.
@@ -1173,15 +1074,6 @@ _cmd_standardize() {
sudo snap remove oracle-cloud-agent
sudo dpkg --remove --force-remove-reinstreq unified-monitoring-agent
fi"
# Check if a cachttps instance is available.
# (This is used to access GitHub on IPv6-only hosts.)
pssh "
if curl -fsSLI http://cachttps.internal:3131/https://github.com/ >/dev/null; then
echo GITHUB=http://cachttps.internal:3131/https://github.com
else
echo GITHUB=https://github.com
fi | sudo tee -a /etc/environment"
}
_cmd tailhist "Install history viewer on port 1088"
@@ -1197,8 +1089,8 @@ _cmd_tailhist () {
pssh "
set -e
sudo apt-get install unzip -y
wget -c \$GITHUB/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0-linux_$ARCH.zip
unzip -o websocketd-0.3.0-linux_$ARCH.zip websocketd
wget -c https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0-linux_$ARCH.zip
unzip websocketd-0.3.0-linux_$ARCH.zip websocketd
sudo mv websocketd /usr/local/bin/websocketd
sudo mkdir -p /opt/tailhist
sudo tee /opt/tailhist.service <<EOF
@@ -1221,35 +1113,14 @@ EOF
pssh -I sudo tee /opt/tailhist/index.html <lib/tailhist.html
}
_cmd terraform "Apply Terraform configuration to provision resources."
_cmd_terraform() {
TAG=$1
need_tag
echo terraforming > tags/$TAG/status
(
cd tags/$TAG
terraform apply -auto-approve
# The Terraform provider for Proxmox has a bug; sometimes it fails
# to obtain VM address from the QEMU agent. In that case, we put
# ERROR in the ips.txt file (instead of the VM IP address). Detect
# that so that we run Terraform again (this typically solves the issue).
if grep -q ERROR ips.txt; then
die "Couldn't obtain IP address of some machines. Try to re-run terraform."
fi
)
echo terraformed > tags/$TAG/status
}
_cmd tools "Install a bunch of useful tools (editors, git, jq...)"
_cmd_tools() {
TAG=$1
need_tag
pssh "
set -e
sudo apt-get -q update
sudo apt-get -qy install apache2-utils argon2 emacs-nox git httping htop jid joe jq mosh tree unzip
sudo apt-get -qy install apache2-utils emacs-nox git httping htop jid joe jq mosh python-setuptools tree unzip
# This is for VMs with broken PRNG (symptom: running docker-compose randomly hangs)
sudo apt-get -qy install haveged
"
@@ -1296,17 +1167,14 @@ fi
"
}
_cmd ssh "Open an SSH session to a node (first one by default)"
_cmd ssh "Open an SSH session to the first node of a tag"
_cmd_ssh() {
TAG=$1
need_tag
if [ "$2" ]; then
ssh -l ubuntu -i tags/$TAG/id_rsa $2
else
IP=$(head -1 tags/$TAG/ips.txt)
info "Logging into $IP (default password: $USER_PASSWORD)"
ssh $SSHOPTS $USER_LOGIN@$IP
fi
IP=$(head -1 tags/$TAG/ips.txt)
info "Logging into $IP (default password: $USER_PASSWORD)"
ssh $SSHOPTS $USER_LOGIN@$IP
}
_cmd tags "List groups of VMs known locally"
@@ -1392,13 +1260,7 @@ _cmd_passwords() {
$0 ips "$TAG" | paste "$PASSWORDS_FILE" - | while read password nodes; do
info "Setting password for $nodes..."
for node in $nodes; do
echo $USER_LOGIN $password | ssh $SSHOPTS -i tags/$TAG/id_rsa ubuntu@$node '
read login password
echo $login:$password | sudo chpasswd
hashedpassword=$(echo -n $password | argon2 saltysalt$RANDOM -e)
sudo -u $login mkdir -p /home/$login/.config/code-server
echo "hashed-password: \"$hashedpassword\"" | sudo -u $login tee /home/$login/.config/code-server/config.yaml >/dev/null
'
echo $USER_LOGIN:$password | ssh $SSHOPTS -i tags/$TAG/id_rsa ubuntu@$node sudo chpasswd
done
done
info "Done."
@@ -1430,11 +1292,6 @@ _cmd_wait() {
pssh -l $SSH_USER "
if [ -d /var/lib/cloud ]; then
cloud-init status --wait
case $? in
0) exit 0;; # all is good
2) exit 0;; # recoverable error (happens with proxmox deprecated cloud-init payloads)
*) exit 1;; # all other problems
esac
fi"
}
@@ -1463,7 +1320,7 @@ _cmd_webssh() {
sudo apt-get install python3-tornado python3-paramiko -y"
pssh "
cd /opt
[ -d webssh ] || sudo git clone \$GITHUB/jpetazzo/webssh"
[ -d webssh ] || sudo git clone https://github.com/jpetazzo/webssh"
pssh "
for KEYFILE in /etc/ssh/*.pub; do
read a b c < \$KEYFILE; echo localhost \$a \$b
@@ -1477,7 +1334,7 @@ WantedBy=multi-user.target
[Service]
WorkingDirectory=/opt/webssh
ExecStart=/usr/bin/env python3 run.py --fbidhttp=false --port=1080 --policy=reject
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
User=nobody
Group=nogroup
Restart=always
@@ -1548,7 +1405,7 @@ test_vm() {
"whoami" \
"hostname -i" \
"ls -l /usr/local/bin/i_am_first_node" \
"grep . /etc/name_of_first_node /etc/ip_addres_of_first_node" \
"grep . /etc/name_of_first_node /etc/ipv4_of_first_node" \
"cat /etc/hosts" \
"hostnamectl status" \
"docker version | grep Version -B1" \

View File

@@ -23,14 +23,6 @@ pssh() {
# necessary - or down to zero, too.
sleep ${PSSH_DELAY_PRE-1}
# When things go wrong, it's convenient to ask pssh to show the output
# of the failed command. Let's make that easy with a DEBUG env var.
if [ "$DEBUG" ]; then
PSSH_I=-i
else
PSSH_I=""
fi
$(which pssh || which parallel-ssh) -h $HOSTFILE -l ubuntu \
--par ${PSSH_PARALLEL_CONNECTIONS-100} \
--timeout 300 \
@@ -39,6 +31,5 @@ pssh() {
-O UserKnownHostsFile=/dev/null \
-O StrictHostKeyChecking=no \
-O ForwardAgent=yes \
$PSSH_I \
"$@"
}

View File

@@ -7,7 +7,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -7,7 +7,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -11,7 +11,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -10,7 +10,6 @@ USER_PASSWORD=training
KUBEVERSION=1.28.9
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -6,7 +6,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -6,7 +6,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -6,7 +6,6 @@ USER_LOGIN=docker
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
@@ -15,5 +14,6 @@ STEPS="
createuser
webssh
tailhist
cards
ips
"
"

View File

@@ -3,4 +3,4 @@ CLUSTERSIZE=5
USER_LOGIN=k8s
USER_PASSWORD=
STEPS="terraform stage2"
STEPS="stage2"

View File

@@ -6,7 +6,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -7,7 +7,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -1,4 +1,4 @@
USER_LOGIN=k8s
USER_PASSWORD=
STEPS="terraform stage2"
STEPS="stage2"

View File

@@ -1,4 +1,4 @@
#export TF_VAR_node_size=GP4.4
#export TF_VAR_node_size=GP2.4
#export TF_VAR_node_size=g6-standard-6
#export TF_VAR_node_size=m7i.xlarge
@@ -11,7 +11,6 @@ USER_LOGIN=portal
USER_PASSWORD=CHANGEME
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -2,11 +2,7 @@ terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.38.0"
}
helm = {
source = "hashicorp/helm"
version = "~> 3.0"
version = "2.16.1"
}
}
}
@@ -18,20 +14,6 @@ provider "kubernetes" {
config_path = "./kubeconfig.${index}"
}
provider "helm" {
alias = "cluster_${index}"
kubernetes = {
config_path = "./kubeconfig.${index}"
}
}
# Password used for SSH and code-server access
resource "random_string" "shpod_${index}" {
length = 6
special = false
upper = false
}
resource "kubernetes_namespace" "shpod_${index}" {
provider = kubernetes.cluster_${index}
metadata {
@@ -39,53 +21,121 @@ resource "kubernetes_namespace" "shpod_${index}" {
}
}
data "kubernetes_service" "shpod_${index}" {
depends_on = [ helm_release.shpod_${index} ]
resource "kubernetes_deployment" "shpod_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "shpod"
namespace = "shpod"
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
}
spec {
selector {
match_labels = {
app = "shpod"
}
}
template {
metadata {
labels = {
app = "shpod"
}
}
spec {
service_account_name = "shpod"
container {
image = "jpetazzo/shpod"
name = "shpod"
env {
name = "PASSWORD"
value = random_string.shpod_${index}.result
}
lifecycle {
post_start {
exec {
command = [ "sh", "-c", "curl http://myip.enix.org/REMOTE_ADDR > /etc/HOSTIP || true" ]
}
}
}
resources {
limits = {
cpu = "2"
memory = "500M"
}
requests = {
cpu = "100m"
memory = "250M"
}
}
}
}
}
}
}
resource "helm_release" "shpod_${index}" {
provider = helm.cluster_${index}
repository = "https://shpod.in"
chart = "shpod"
name = "shpod"
namespace = "shpod"
create_namespace = false
values = [
yamlencode({
service = {
type = "NodePort"
}
resources = {
requests = {
cpu = "100m"
memory = "500M"
}
limits = {
cpu = "1"
memory = "1000M"
}
}
persistentVolume = {
enabled = true
}
ssh = {
password = random_string.shpod_${index}.result
}
rbac = {
cluster = {
clusterRoles = [ "cluster-admin" ]
}
}
codeServer = {
enabled = true
}
})
]
resource "kubernetes_service" "shpod_${index}" {
provider = kubernetes.cluster_${index}
lifecycle {
# Folks might alter their shpod Service to expose extra ports.
# Don't reset their changes.
ignore_changes = [ spec ]
}
metadata {
name = "shpod"
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
}
spec {
selector = {
app = "shpod"
}
port {
name = "ssh"
port = 22
target_port = 22
}
type = "NodePort"
}
}
resource "kubernetes_service_account" "shpod_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "shpod"
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
}
}
resource "kubernetes_cluster_role_binding" "shpod_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "shpod"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
kind = "ServiceAccount"
name = "shpod"
namespace = "shpod"
}
subject {
api_group = "rbac.authorization.k8s.io"
kind = "Group"
name = "shpod-cluster-admins"
}
}
resource "random_string" "shpod_${index}" {
length = 6
special = false
upper = false
}
provider "helm" {
alias = "cluster_${index}"
kubernetes {
config_path = "./kubeconfig.${index}"
}
}
resource "helm_release" "metrics_server_${index}" {
@@ -100,75 +150,13 @@ resource "helm_release" "metrics_server_${index}" {
name = "metrics-server"
namespace = "metrics-server"
create_namespace = true
values = [
yamlencode({
args = [ "--kubelet-insecure-tls" ]
})
]
}
# As of October 2025, the ebs-csi-driver addon (which is used on EKS
# to provision persistent volumes) doesn't automatically create a
# StorageClass. Here, we're trying to detect the DaemonSet created
# by the ebs-csi-driver; and if we find it, we create the corresponding
# StorageClass.
data "kubernetes_resources" "ebs_csi_node_${index}" {
provider = kubernetes.cluster_${index}
api_version = "apps/v1"
kind = "DaemonSet"
label_selector = "app.kubernetes.io/name=aws-ebs-csi-driver"
namespace = "kube-system"
}
resource "kubernetes_storage_class" "ebs_csi_${index}" {
count = (length(data.kubernetes_resources.ebs_csi_node_${index}.objects) > 0) ? 1 : 0
provider = kubernetes.cluster_${index}
metadata {
name = "ebs-csi"
annotations = {
"storageclass.kubernetes.io/is-default-class" = "true"
}
set {
name = "args"
value = "{--kubelet-insecure-tls}"
}
storage_provisioner = "ebs.csi.aws.com"
}
# This section here deserves a little explanation.
#
# When we access a cluster with shpod (either through SSH or code-server)
# there is no kubeconfig file - we simply use "in-cluster" authentication
# with a ServiceAccount token. This is a bit unusual, and ideally, I would
# prefer to have a "normal" kubeconfig file in the students' shell.
#
# So what we're doing here, is that we're populating a ConfigMap with
# a kubeconfig file; and in the initialization scripts (e.g. bashrc) we
# automatically download the kubeconfig file from the ConfigMap and place
# it in ~/.kube/kubeconfig.
#
# But, which kubeconfig file should we use? We could use the "normal"
# kubeconfig file that was generated by the provider; but in some cases,
# that kubeconfig file might use a token instead of a certificate for
# user authentication - and ideally, I would like to have a certificate
# so that in the section about auth and RBAC, we can dissect that TLS
# certificate and explain where our permissions come from.
#
# So we're creating a TLS key pair; using the CSR API to issue a user
# certificate belongong to a special group; and grant the cluster-admin
# role to that group; then we use the kubeconfig file generated by the
# provider but override the user with that TLS key pair.
#
# This is not strictly necessary but it streamlines the lesson on auth.
#
# Lastly - in the ConfigMap we actually put both the original kubeconfig,
# and the one where we injected our new user (just in case we want to
# use or look at the original for any reason).
#
# One more thing: the kubernetes.io/kube-apiserver-client signer is
# disabled on EKS, so... we don't generate that ConfigMap on EKS.
# To detect if we're on EKS, we're looking for the ebs-csi-node DaemonSet.
# (Which means that the detection will break if the ebs-csi addon is missing.)
resource "kubernetes_config_map" "kubeconfig_${index}" {
count = (length(data.kubernetes_resources.ebs_csi_node_${index}.objects) > 0) ? 0 : 1
provider = kubernetes.cluster_${index}
metadata {
name = "kubeconfig"
@@ -194,7 +182,7 @@ resource "kubernetes_config_map" "kubeconfig_${index}" {
- name: cluster-admin
user:
client-key-data: $${base64encode(tls_private_key.cluster_admin_${index}.private_key_pem)}
client-certificate-data: $${base64encode(kubernetes_certificate_signing_request_v1.cluster_admin_${index}[0].certificate)}
client-certificate-data: $${base64encode(kubernetes_certificate_signing_request_v1.cluster_admin_${index}.certificate)}
EOT
}
}
@@ -214,25 +202,7 @@ resource "tls_cert_request" "cluster_admin_${index}" {
}
}
resource "kubernetes_cluster_role_binding" "shpod_cluster_admin_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "shpod-cluster-admin"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
api_group = "rbac.authorization.k8s.io"
kind = "Group"
name = "shpod-cluster-admins"
}
}
resource "kubernetes_certificate_signing_request_v1" "cluster_admin_${index}" {
count = (length(data.kubernetes_resources.ebs_csi_node_${index}.objects) > 0) ? 0 : 1
provider = kubernetes.cluster_${index}
metadata {
name = "cluster-admin"
@@ -264,8 +234,7 @@ output "logins_jsonl" {
jsonencode({
login = "k8s",
password = random_string.shpod_${index}.result,
port = data.kubernetes_service.shpod_${index}.spec[0].port[0].node_port,
codeServerPort = data.kubernetes_service.shpod_${index}.spec[0].port[1].node_port,
port = kubernetes_service.shpod_${index}.spec[0].port[0].node_port,
ipaddrs = replace(file("./externalips.${index}"), " ", "\t"),
}),
%{ endfor ~}

View File

@@ -23,7 +23,7 @@ variable "node_size" {
}
variable "location" {
type = string
type = string
default = null
}

View File

@@ -1,45 +1,60 @@
data "aws_eks_cluster_versions" "_" {
default_only = true
# Taken from:
# https://github.com/hashicorp/learn-terraform-provision-eks-cluster/blob/main/main.tf
data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.19.0"
name = var.cluster_name
cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
public_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 21.0"
name = var.cluster_name
kubernetes_version = data.aws_eks_cluster_versions._.cluster_versions[0].cluster_version
vpc_id = local.vpc_id
subnet_ids = local.subnet_ids
endpoint_public_access = true
enable_cluster_creator_admin_permissions = true
upgrade_policy = {
# The default policy is EXTENDED, which incurs additional costs
# when running an old control plane. We don't advise to run old
# control planes, but we also don't want to incur costs if an
# old version is chosen accidentally.
support_type = "STANDARD"
}
source = "terraform-aws-modules/eks/aws"
version = "19.5.1"
cluster_name = var.cluster_name
cluster_version = "1.24"
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
cluster_endpoint_public_access = true
eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
addons = {
coredns = {}
eks-pod-identity-agent = {
before_compute = true
}
kube-proxy = {}
vpc-cni = {
before_compute = true
}
aws-ebs-csi-driver = {
service_account_role_arn = module.irsa-ebs-csi.iam_role_arn
}
}
eks_managed_node_groups = {
x86 = {
name = "x86"
one = {
name = "node-group-one"
instance_types = [local.node_size]
min_size = var.min_nodes_per_pool
max_size = var.max_nodes_per_pool
desired_size = var.min_nodes_per_pool
min_size = var.min_nodes_per_pool
max_size = var.max_nodes_per_pool
desired_size = var.min_nodes_per_pool
}
}
}
@@ -51,7 +66,7 @@ data "aws_iam_policy" "ebs_csi_policy" {
module "irsa-ebs-csi" {
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
version = "~> 5.39.0"
version = "4.7.0"
create_role = true
role_name = "AmazonEKSTFEBSCSIRole-${module.eks.cluster_name}"
@@ -60,9 +75,13 @@ module "irsa-ebs-csi" {
oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"]
}
resource "aws_vpc_security_group_ingress_rule" "_" {
security_group_id = module.eks.node_security_group_id
cidr_ipv4 = "0.0.0.0/0"
ip_protocol = -1
description = "Allow all traffic to Kubernetes nodes (so that we can use NodePorts, hostPorts, etc.)"
resource "aws_eks_addon" "ebs-csi" {
cluster_name = module.eks.cluster_name
addon_name = "aws-ebs-csi-driver"
addon_version = "v1.5.2-eksbuild.1"
service_account_role_arn = module.irsa-ebs-csi.iam_role_arn
tags = {
"eks_addon" = "ebs-csi"
"terraform" = "true"
}
}

View File

@@ -2,7 +2,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 6.17.0"
version = "~> 4.47.0"
}
}
}

View File

@@ -1,61 +0,0 @@
# OK, we have two options here.
# 1. Create our own VPC
# - Pros: provides good isolation from other stuff deployed in the
# AWS account; makes sure that we don't interact with
# existing security groups, subnets, etc.
# - Cons: by default, there is a quota of 5 VPC per region, so
# we can only deploy 5 clusters
# 2. Use the default VPC
# - Pros/cons: the opposite :)
variable "use_default_vpc" {
type = bool
default = true
}
data "aws_vpc" "default" {
default = true
}
data "aws_subnets" "default" {
filter {
name = "vpc-id"
values = [data.aws_vpc.default.id]
}
}
data "aws_availability_zones" "available" {}
module "vpc" {
count = var.use_default_vpc ? 0 : 1
source = "terraform-aws-modules/vpc/aws"
version = "~> 6.0"
name = var.cluster_name
cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
private_subnets = ["10.0.11.0/24", "10.0.12.0/24", "10.0.13.0/24"]
public_subnets = ["10.0.21.0/24", "10.0.22.0/24", "10.0.23.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
map_public_ip_on_launch = true
public_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}
locals {
vpc_id = var.use_default_vpc ? data.aws_vpc.default.id : module.vpc[0].vpc_id
subnet_ids = var.use_default_vpc ? data.aws_subnets.default.ids : module.vpc[0].public_subnets
}

View File

@@ -0,0 +1,12 @@
locals {
location = var.location != null ? var.location : "europe-north1-a"
region = replace(local.location, "/-[a-z]$/", "")
# Unfortunately, the following line doesn't work
# (that attribute just returns an empty string)
# so we have to hard-code the project name.
#project = data.google_client_config._.project
project = "prepare-tf"
}
data "google_client_config" "_" {}

View File

@@ -1,7 +1,7 @@
resource "google_container_cluster" "_" {
name = var.cluster_name
location = local.location
deletion_protection = false
name = var.cluster_name
project = local.project
location = local.location
#min_master_version = var.k8s_version
# To deploy private clusters, uncomment the section below,
@@ -42,7 +42,7 @@ resource "google_container_cluster" "_" {
node_pool {
name = "x86"
node_config {
tags = ["lab-${var.cluster_name}"]
tags = var.common_tags
machine_type = local.node_size
}
initial_node_count = var.min_nodes_per_pool
@@ -62,25 +62,3 @@ resource "google_container_cluster" "_" {
}
}
}
resource "google_compute_firewall" "_" {
name = "lab-${var.cluster_name}"
network = "default"
allow {
protocol = "tcp"
ports = ["0-65535"]
}
allow {
protocol = "udp"
ports = ["0-65535"]
}
allow {
protocol = "icmp"
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["lab-${var.cluster_name}"]
}

View File

@@ -6,8 +6,6 @@ output "has_metrics_server" {
value = true
}
data "google_client_config" "_" {}
output "kubeconfig" {
sensitive = true
value = <<-EOT

View File

@@ -1 +0,0 @@
../../providers/googlecloud/provider.tf

View File

@@ -0,0 +1,8 @@
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "4.5.0"
}
}
}

View File

@@ -30,7 +30,7 @@ resource "scaleway_k8s_pool" "_" {
max_size = var.max_nodes_per_pool
autoscaling = var.max_nodes_per_pool > var.min_nodes_per_pool
autohealing = true
depends_on = [scaleway_instance_security_group._]
depends_on = [ scaleway_instance_security_group._ ]
}
data "scaleway_k8s_version" "_" {

View File

@@ -4,36 +4,25 @@ resource "helm_release" "_" {
create_namespace = true
repository = "https://charts.loft.sh"
chart = "vcluster"
version = "0.27.1"
values = [
yamlencode({
controlPlane = {
proxy = {
extraSANs = [ local.guest_api_server_host ]
}
service = {
spec = {
type = "NodePort"
}
}
statefulSet = {
persistence = {
volumeClaim = {
enabled = true
}
}
}
}
sync = {
fromHost = {
nodes = {
enabled = true
selector = {
all = true
}
}
}
}
})
]
version = "0.19.7"
set {
name = "service.type"
value = "NodePort"
}
set {
name = "storage.persistence"
value = "false"
}
set {
name = "sync.nodes.enabled"
value = "true"
}
set {
name = "sync.nodes.syncAllNodes"
value = "true"
}
set {
name = "syncer.extraArgs"
value = "{--tls-san=${local.guest_api_server_host}}"
}
}

View File

@@ -1,8 +0,0 @@
terraform {
required_providers {
helm = {
source = "hashicorp/helm"
version = "~> 3.0"
}
}
}

View File

@@ -1,8 +0,0 @@
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "~> 7.0"
}
}
}

View File

@@ -9,9 +9,5 @@ variable "node_sizes" {
variable "location" {
type = string
default = "europe-north1-a"
default = null
}
locals {
location = (var.location != "" && var.location != null) ? var.location : "europe-north1-a"
}

View File

@@ -1,30 +0,0 @@
variable "proxmox_endpoint" {
type = string
default = "https://localhost:8006/"
}
variable "proxmox_username" {
type = string
default = null
}
variable "proxmox_password" {
type = string
default = null
}
variable "proxmox_storage" {
type = string
default = "local"
}
variable "proxmox_template_node_name" {
type = string
default = null
}
variable "proxmox_template_vm_id" {
type = number
default = null
}

View File

@@ -1,11 +0,0 @@
# Since node size needs to be a string...
# To indicate number of CPUs + RAM, just pass it as a string with a space between them.
# RAM is in megabytes.
variable "node_sizes" {
type = map(any)
default = {
S = "1 2048"
M = "2 4096"
L = "3 8192"
}
}

View File

@@ -1,5 +1,5 @@
provider "helm" {
kubernetes = {
kubernetes {
config_path = "~/kubeconfig"
}
}

View File

@@ -56,15 +56,13 @@ locals {
cluster_name = format("%s-%03d", var.tag, cn[0])
node_name = format("%s-%03d-%03d", var.tag, cn[0], cn[1])
node_size = lookup(var.node_sizes, var.node_size, var.node_size)
node_index = cn[0] * var.nodes_per_cluster + cn[1]
}
}
}
resource "local_file" "ip_addresses" {
content = join("", formatlist("%s\n", [
for key, value in local.ip_addresses :
strcontains(value, ".") ? value : "[${value}]"
for key, value in local.ip_addresses : value
]))
filename = "ips.txt"
file_permission = "0600"

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/googlecloud/config.tf

View File

@@ -1,54 +0,0 @@
# Note: names and tags on GCP have to match a specific regex:
# (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)
# In other words, they must start with a letter; and generally,
# we make them start with a number (year-month-day-etc, so 2025-...)
# so we prefix names and tags with "lab-" in this configuration.
resource "google_compute_instance" "_" {
for_each = local.nodes
zone = var.location
name = "lab-${each.value.node_name}"
tags = ["lab-${var.tag}"]
machine_type = each.value.node_size
boot_disk {
initialize_params {
image = "ubuntu-os-cloud/ubuntu-2404-lts-amd64"
}
}
network_interface {
network = "default"
access_config {}
}
metadata = {
"ssh-keys" = "ubuntu:${tls_private_key.ssh.public_key_openssh}"
}
}
locals {
ip_addresses = {
for key, value in local.nodes :
key => google_compute_instance._[key].network_interface[0].access_config[0].nat_ip
}
}
resource "google_compute_firewall" "_" {
name = "lab-${var.tag}"
network = "default"
allow {
protocol = "tcp"
ports = ["0-65535"]
}
allow {
protocol = "udp"
ports = ["0-65535"]
}
allow {
protocol = "icmp"
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["lab-${var.tag}"]
}

View File

@@ -1 +0,0 @@
../../providers/googlecloud/provider.tf

View File

@@ -1 +0,0 @@
../../providers/googlecloud/variables.tf

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/proxmox/config.tf

View File

@@ -1,37 +0,0 @@
# If we deploy in IPv6-only environments, and the students don't have IPv6
# connectivity, we want to offer a way to connect anyway. Our solution is
# to generate an HAProxy configuration snippet, that can be copied to a
# DualStack machine which will act as a proxy to our IPv6 machines.
# Note that the snippet still has to be copied, so this is not a 100%
# streamlined solution!
locals {
portmaps = {
for key, value in local.nodes :
(10000 + proxmox_virtual_environment_vm._[key].vm_id) => local.ip_addresses[key]
}
}
resource "local_file" "haproxy" {
filename = "./${var.tag}.cfg"
file_permission = "0644"
content = join("\n", [for port, address in local.portmaps : <<-EOT
frontend f${port}
bind *:${port}
default_backend b${port}
backend b${port}
mode tcp
server s${port} [${address}]:22 maxconn 16
EOT
])
}
resource "local_file" "sshproxy" {
filename = "sshproxy.txt"
file_permission = "0644"
content = join("", [
for cid in range(1, 1 + var.how_many_clusters) :
format("ssh -l k8s -p %d\n", proxmox_virtual_environment_vm._[format("c%03dn%03d", cid, 1)].vm_id + 10000)
])
}

View File

@@ -1,113 +0,0 @@
data "proxmox_virtual_environment_nodes" "_" {}
data "proxmox_virtual_environment_vms" "_" {
filter {
name = "template"
values = [true]
}
}
data "proxmox_virtual_environment_vms" "templates" {
for_each = toset(data.proxmox_virtual_environment_nodes._.names)
tags = ["ubuntu"]
filter {
name = "node_name"
values = [each.value]
}
filter {
name = "template"
values = [true]
}
}
locals {
pve_nodes = data.proxmox_virtual_environment_nodes._.names
pve_node = { for k, v in local.nodes : k => local.pve_nodes[v.node_index % length(local.pve_nodes)] }
pve_template_id = { for k, v in local.nodes : k => data.proxmox_virtual_environment_vms.templates[local.pve_node[k]].vms[0].vm_id }
}
resource "proxmox_virtual_environment_vm" "_" {
for_each = local.nodes
node_name = local.pve_node[each.key]
name = each.value.node_name
tags = ["container.training", var.tag]
stop_on_destroy = true
cpu {
cores = split(" ", each.value.node_size)[0]
type = "x86-64-v2-AES" # recommended for modern CPUs
}
memory {
dedicated = split(" ", each.value.node_size)[1]
}
#disk {
# datastore_id = var.proxmox_storage
# file_id = proxmox_virtual_environment_file._.id
# interface = "scsi0"
# size = 30
# discard = "on"
#}
### Strategy 1: clone from shared storage
#clone {
# vm_id = var.proxmox_template_vm_id
# node_name = var.proxmox_template_node_name
# full = false
#}
### Strategy 2: clone from local storage
### (requires that the template exists on each node)
clone {
vm_id = local.pve_template_id[each.key]
node_name = local.pve_node[each.key]
full = false
}
agent {
enabled = true
}
initialization {
datastore_id = var.proxmox_storage
user_account {
username = "ubuntu"
keys = [trimspace(tls_private_key.ssh.public_key_openssh)]
}
ip_config {
ipv4 {
address = "dhcp"
}
ipv6 {
address = "dhcp"
}
}
}
network_device {
bridge = "vmbr0"
}
operating_system {
type = "l26"
}
}
#resource "proxmox_virtual_environment_download_file" "ubuntu_2404_20250115" {
# content_type = "iso"
# datastore_id = "cephfs"
# node_name = "pve-lsd-1"
# url = "https://cloud-images.ubuntu.com/releases/24.04/release-20250115/ubuntu-24.04-server-cloudimg-amd64.img"
# file_name = "ubuntu_2404_20250115.img"
#}
#
#resource "proxmox_virtual_environment_file" "_" {
# datastore_id = "cephfs"
# node_name = "pve-lsd-1"
# source_file {
# path = "/root/noble-server-cloudimg-amd64.img"
# }
#}
locals {
ip_addresses = {
for key, value in local.nodes :
key => [for addr in flatten(concat(
proxmox_virtual_environment_vm._[key].ipv6_addresses,
proxmox_virtual_environment_vm._[key].ipv4_addresses,
["ERROR"])) :
addr if addr != "127.0.0.1" && addr != "::1"][0]
}
}

View File

@@ -1,15 +0,0 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "~> 0.86.0"
}
}
}
provider "proxmox" {
endpoint = var.proxmox_endpoint
username = var.proxmox_username
password = var.proxmox_password
insecure = true
}

View File

@@ -1,20 +0,0 @@
# If you want to deploy to Proxmox, you need to:
# 1) copy that file to e.g. myproxmoxcluster.tfvars
# 2) make sure you have a VM template with QEMU agent pre-installed
# 3) customize the copy (you need to replace all the CHANGEME values)
# 4) deploy with "labctl create --provider proxmox/myproxmoxcluster ..."
proxmox_endpoint = "https://localhost:8006/"
proxmox_username = "terraform@pve"
proxmox_password = "CHANGEME"
# Which storage to use for VM disks. Defaults to "local".
#proxmox_storage = "ceph"
#proxmox_storage = "local-zfs"
# We recently rewrote the Proxmox configurations to automatically
# detect which template to use; so these variables aren't used anymore.
#proxmox_template_node_name = "CHANGEME"
#proxmox_template_vm_id = CHANGEME

View File

@@ -1 +0,0 @@
../../providers/proxmox/variables.tf

View File

@@ -2,7 +2,7 @@
#/ /kube-halfday.yml.html 200!
#/ /kube-fullday.yml.html 200!
#/ /kube-twodays.yml.html 200!
/ /menu.html 200!
/ /kube.yml.html 200!
# And this allows to do "git clone https://container.training".
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack

View File

@@ -1,31 +0,0 @@
#!/usr/bin/env python
import os
import re
import sys
html_file = sys.argv[1]
output_file_template = "_academy_{}.html"
title_regex = "name: toc-(.*)"
redirects = open("_redirects", "w")
sections = re.split(title_regex, open(html_file).read())[1:]
while sections:
link, markdown = sections[0], sections[1]
sections = sections[2:]
output_file_name = output_file_template.format(link)
with open(output_file_name, "w") as f:
html = open("workshop.html").read()
html = html.replace("@@MARKDOWN@@", markdown)
titles = re.findall("# (.*)", markdown) + [""]
html = html.replace("@@TITLE@@", "{} — Kubernetes Academy".format(titles[0]))
html = html.replace("@@SLIDENUMBERPREFIX@@", "")
html = html.replace("@@EXCLUDE@@", "")
html = html.replace(".nav[", ".hide[")
f.write(html)
redirects.write("/{} /{} 200!\n".format(link, output_file_name))
html = open(html_file).read()
html = re.sub("#toc-([^)]*)", "_academy_\\1.html", html)
sys.stdout.write(html)

View File

@@ -1,81 +0,0 @@
title: |
Containers
chat: "[Mattermost](https://training.enix.io/)"
gitrepo: github.com/jpetazzo/container.training
slides: https://2025-06-dila.container.training/
#slidenumberprefix: "#SomeHashTag &mdash; "
exclude:
- self-paced
content:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/chat-room-im.md
#- shared/chat-room-zoom-meeting.md
#- shared/chat-room-zoom-webinar.md
- shared/toc.md
- # DAY 1
#- containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Namespaces_Cgroups.md
- containers/Init_Systems.md
- exercises/container-from-scratch-details.md
- # DAY 2
- containers/Initial_Images.md
- containers/Copy_On_Write.md
- containers/Images_Deep_Dive.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- containers/Multi_Stage_Builds.md
- containers/Buildkit.md
- containers/Exercise_Dockerfile_Basic.md
- containers/Exercise_Dockerfile_Multistage.md
- containers/Exercise_Dockerfile_Buildkit.md
- exercises/image-from-scratch-details.md
- # DAY 3
- |
# Security mechanisms
[🔗 Overall security features](#toc-security-features)
[🔗 User namespaces](#96)
- containers/Security.md
- containers/Rootless_Networking.md
- containers/Container_Engines.md
- shared/cloud-native-security.md
- shared/thankyou.md
#- containers/links.md
#- containers/Orchestration_Overview.md
#- containers/Application_Configuration.md
#- containers/Logging.md
#- containers/Training_Environment.md
#- containers/Installing_Docker.md
#- containers/First_Containers.md
#- containers/Background_Containers.md
#- containers/Building_Images_Interactively.md
#- containers/Building_Images_With_Dockerfiles.md
#- containers/Cmd_And_Entrypoint.md
#- containers/Copying_Files_During_Build.md
#- containers/Container_Networking_Basics.md
#- containers/Local_Development_Workflow.md
#- containers/Container_Network_Model.md
#- containers/Compose_For_Dev_Stacks.md
#- containers/Exercise_Composefile.md
#- containers/Start_And_Attach.md
#- containers/Naming_And_Inspecting.md
#- containers/Labels.md
#- containers/Getting_Inside.md
#- containers/Dockerfile_Tips.md
#- containers/Advanced_Dockerfiles.md
#- containers/Publishing_To_Docker_Hub.md
#- containers/Network_Drivers.md
#- containers/Docker_Machine.md

View File

@@ -29,20 +29,6 @@ At the end of this lesson, you will be able to:
---
## `Dockerfile` example
```
FROM python:alpine
WORKDIR /app
RUN pip install Flask
COPY rng.py .
ENV FLASK_APP=rng FLASK_RUN_HOST=:: FLASK_RUN_PORT=80
CMD ["flask", "run"]
EXPOSE 80
```
---
## Writing our first `Dockerfile`
Our Dockerfile must be in a **new, empty directory**.

View File

@@ -32,7 +32,7 @@ Compose enables a simple, powerful onboarding workflow:
1. Checkout our code.
2. Run `docker compose up`.
2. Run `docker-compose up`.
3. Our app is up and running!
@@ -66,19 +66,19 @@ class: pic
1. Write Dockerfiles
2. Describe our stack of containers in a YAML file (the "Compose file")
2. Describe our stack of containers in a YAML file called `docker-compose.yml`
3. `docker compose up` (or `docker compose up -d` to run in the background)
3. `docker-compose up` (or `docker-compose up -d` to run in the background)
4. Compose pulls and builds the required images, and starts the containers
5. Compose shows the combined logs of all the containers
(if running in the background, use `docker compose logs`)
(if running in the background, use `docker-compose logs`)
6. Hit Ctrl-C to stop the whole stack
(if running in the background, use `docker compose stop`)
(if running in the background, use `docker-compose stop`)
---
@@ -86,11 +86,11 @@ class: pic
After making changes to our source code, we can:
1. `docker compose build` to rebuild container images
1. `docker-compose build` to rebuild container images
2. `docker compose up` to restart the stack with the new images
2. `docker-compose up` to restart the stack with the new images
We can also combine both with `docker compose up --build`
We can also combine both with `docker-compose up --build`
Compose will be smart, and only recreate the containers that have changed.
@@ -114,7 +114,7 @@ cd trainingwheels
Second step: start the app.
```bash
docker compose up
docker-compose up
```
Watch Compose build and run the app.
@@ -141,17 +141,7 @@ After ten seconds (or if we press `^C` again) it will forcibly kill them.
---
## The Compose file
* Historically: docker-compose.yml or .yaml
* Recently (kind of): can also be named compose.yml or .yaml
(Since [version 1.28.6, March 2021](https://docs.docker.com/compose/releases/release-notes/#1286))
---
## Example
## The `docker-compose.yml` file
Here is the file used in the demo:
@@ -182,9 +172,9 @@ services:
A Compose file has multiple sections:
* `services` is mandatory. Each service corresponds to a container.
* `version` is mandatory. (Typically use "3".)
* `version` is optional (it used to be mandatory). It can be ignored.
* `services` is mandatory. Each service corresponds to a container.
* `networks` is optional and indicates to which networks containers should be connected.
<br/>(By default, containers will be connected on a private, per-compose-file network.)
@@ -193,24 +183,24 @@ A Compose file has multiple sections:
---
class: extra-details
## Compose file versions
* Version 1 is legacy and shouldn't be used.
(If you see a Compose file without a `services` block, it's a legacy v1 file.)
(If you see a Compose file without `version` and `services`, it's a legacy v1 file.)
* Version 2 added support for networks and volumes.
* Version 3 added support for deployment options (scaling, rolling updates, etc).
* Typically use `version: "3"`.
The [Docker documentation](https://docs.docker.com/compose/compose-file/)
has excellent information about the Compose file format if you need to know more about versions.
---
## Containers in Compose file
## Containers in `docker-compose.yml`
Each service in the YAML file must contain either `build`, or `image`.
@@ -288,7 +278,7 @@ For the full list, check: https://docs.docker.com/compose/compose-file/
`frontcopy_www`, `frontcopy_www_1`, `frontcopy_db_1`
- Alternatively, use `docker compose -p frontcopy`
- Alternatively, use `docker-compose -p frontcopy`
(to set the `--project-name` of a stack, which default to the dir name)
@@ -298,10 +288,10 @@ For the full list, check: https://docs.docker.com/compose/compose-file/
## Checking stack status
We have `ps`, `docker ps`, and similarly, `docker compose ps`:
We have `ps`, `docker ps`, and similarly, `docker-compose ps`:
```bash
$ docker compose ps
$ docker-compose ps
Name Command State Ports
----------------------------------------------------------------------------
trainingwheels_redis_1 /entrypoint.sh red Up 6379/tcp
@@ -320,13 +310,13 @@ If you have started your application in the background with Compose and
want to stop it easily, you can use the `kill` command:
```bash
$ docker compose kill
$ docker-compose kill
```
Likewise, `docker compose rm` will let you remove containers (after confirmation):
Likewise, `docker-compose rm` will let you remove containers (after confirmation):
```bash
$ docker compose rm
$ docker-compose rm
Going to remove trainingwheels_redis_1, trainingwheels_www_1
Are you sure? [yN] y
Removing trainingwheels_redis_1...
@@ -337,19 +327,19 @@ Removing trainingwheels_www_1...
## Cleaning up (2)
Alternatively, `docker compose down` will stop and remove containers.
Alternatively, `docker-compose down` will stop and remove containers.
It will also remove other resources, like networks that were created for the application.
```bash
$ docker compose down
$ docker-compose down
Stopping trainingwheels_www_1 ... done
Stopping trainingwheels_redis_1 ... done
Removing trainingwheels_www_1 ... done
Removing trainingwheels_redis_1 ... done
```
Use `docker compose down -v` to remove everything including volumes.
Use `docker-compose down -v` to remove everything including volumes.
---
@@ -379,15 +369,15 @@ Use `docker compose down -v` to remove everything including volumes.
- If the container is deleted, the volume gets orphaned
- Example: `docker compose down && docker compose up`
- Example: `docker-compose down && docker-compose up`
- the old volume still exists, detached from its container
- a new volume gets created
- `docker compose down -v`/`--volumes` deletes volumes
- `docker-compose down -v`/`--volumes` deletes volumes
(but **not** `docker compose down && docker compose down -v`!)
(but **not** `docker-compose down && docker-compose down -v`!)
---
@@ -406,9 +396,9 @@ volumes:
- Volume will be named `<project>_data`
- It won't be orphaned with `docker compose down`
- It won't be orphaned with `docker-compose down`
- It will correctly be removed with `docker compose down -v`
- It will correctly be removed with `docker-compose down -v`
---
@@ -427,7 +417,7 @@ services:
(for migration, backups, disk usage accounting...)
- Won't be removed by `docker compose down -v`
- Won't be removed by `docker-compose down -v`
---
@@ -461,7 +451,7 @@ services:
- This is used when bringing up individual services
(e.g. `docker compose up blah` or `docker compose run foo`)
(e.g. `docker-compose up blah` or `docker-compose run foo`)
⚠️ It doesn't make a service "wait" for another one to be up!
@@ -481,9 +471,7 @@ class: extra-details
- `docker compose` command to deploy Compose stacks to some clouds
- in Go instead of Python
- progressively getting feature parity with `docker compose`
- progressively getting feature parity with `docker-compose`
- also provides numerous improvements (e.g. leverages BuildKit by default)

View File

@@ -84,9 +84,9 @@ like Windows, macOS, Solaris, FreeBSD ...
* Each `lxc-start` process exposes a custom API over a local UNIX socket, allowing to interact with the container.
* No notion of image (container filesystems had be managed manually).
* No notion of image (container filesystems have to be managed manually).
* Networking had to be set up manually.
* Networking has to be set up manually.
---
@@ -98,22 +98,10 @@ like Windows, macOS, Solaris, FreeBSD ...
* Daemon exposing a REST API.
* Can run containers and virtual machines.
* Can manage images, snapshots, migrations, networking, storage.
* "offers a user experience similar to virtual machines but using Linux containers instead."
* Driven by Canonical.
---
## Incus
* Community-driven fork of LXD.
* Relatively recent [announced in August 2023](https://linuxcontainers.org/incus/announcement/) so time will tell what the notable differences will be.
---
## CRI-O
@@ -152,7 +140,7 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
---
## [Kata containers](https://katacontainers.io/)
## Kata containers
* OCI-compliant runtime.
@@ -164,7 +152,7 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
---
## [gVisor](https://gvisor.dev/)
## gVisor
* OCI-compliant runtime.
@@ -182,17 +170,7 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
---
## Others
- Micro VMs: Firecracker, Edera...
- [crun](https://github.com/containers/crun) (runc rewritten in C)
- [youki](https://youki-dev.github.io/youki/) (runc rewritten in Rust)
---
## To Docker Or Not To Docker
## Overall ...
* The Docker Engine is very developer-centric:
@@ -206,26 +184,8 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
* As a result, it is a fantastic tool in development environments.
* On Kubernetes clusters, containerd or CRI-O are better choices.
* On servers:
* On Kubernetes clusters, the container engine is an implementation detail.
- Docker is a good default choice
---
## Different levels
- Directly use namespaces, cgroups, capabilities with custom code or scripts
*useful for troubleshooting/debugging and for educative purposes; e.g. pipework*
- Use low-level engines like runc, crun, youki
*useful when building custom architectures; e.g. a brand new orchestrator*
- Use low-level APIs like CRI or containerd grpc API
*useful to achieve high-level features like Docker, but without Docker; e.g. ctr, nerdctl*
- Use high-level APIs like Docker and Kubernetes
*that's what most people will do*
- If you use Kubernetes, the engine doesn't matter

Some files were not shown because too many files have changed in this diff Show More